repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
EDUlib/edx-platform | common/lib/xmodule/xmodule/modulestore/exceptions.py | 1 | 3789 | """
Exceptions thrown by KeyStore objects
"""
class ItemNotFoundError(Exception):
pass
class ItemWriteConflictError(Exception):
pass
class MultipleCourseBlocksFound(Exception):
"""
Raise this exception when Iterating over the course blocks return multiple course blocks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class MultipleLibraryBlocksFound(Exception):
"""
Raise this exception when Iterating over the library blocks return multiple library blocks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class InsufficientSpecificationError(Exception):
pass
class OverSpecificationError(Exception):
pass
class InvalidLocationError(Exception):
pass
class NoPathToItem(Exception):
pass
class ReferentialIntegrityError(Exception):
"""
An incorrect pointer to an object exists. For example, 2 parents point to the same child, an
xblock points to a nonexistent child (which probably raises ItemNotFoundError instead depending
on context).
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class DuplicateItemError(Exception):
"""
Attempted to create an item which already exists.
"""
def __init__(self, element_id, store=None, collection=None):
super().__init__()
self.element_id = element_id
self.store = store
self.collection = collection
def __str__(self, *args, **kwargs):
"""
Print info about what's duplicated
"""
return "{store}[{collection}] already has {element_id} ({exception})".format(
store=self.store,
collection=self.collection,
element_id=self.element_id,
exception=Exception.__str__(self, *args, **kwargs),
)
class VersionConflictError(Exception):
"""
The caller asked for either draft or published head and gave a version which conflicted with it.
"""
def __init__(self, requestedLocation, currentHeadVersionGuid):
super().__init__('Requested {}, but current head is {}'.format(
requestedLocation,
currentHeadVersionGuid
))
class DuplicateCourseError(Exception):
"""
An attempt to create a course whose id duplicates an existing course's
"""
def __init__(self, course_id, existing_entry):
"""
existing_entry will have the who, when, and other properties of the existing entry
"""
super().__init__(
f'Cannot create course {course_id}, which duplicates {existing_entry}'
)
self.course_id = course_id
self.existing_entry = existing_entry
class InvalidBranchSetting(Exception):
"""
Raised when the process' branch setting did not match the required setting for the attempted operation on a store.
"""
def __init__(self, expected_setting, actual_setting):
super().__init__(f"Invalid branch: expected {expected_setting} but got {actual_setting}") # lint-amnesty, pylint: disable=line-too-long, super-with-arguments
self.expected_setting = expected_setting
self.actual_setting = actual_setting
class InvalidProctoringProvider(Exception):
"""
Error with selected proctoring provider raised when the provided is unknown.
"""
def __init__(self, proctoring_provider, available_providers):
super().__init__()
self.proctoring_provider = proctoring_provider
self.available_providers = available_providers
def __str__(self, *args, **kwargs):
"""
Print details about error
"""
return f"The selected proctoring provider, {self.proctoring_provider}, is not a valid provider. " \
f"Please select from one of {self.available_providers}."
| agpl-3.0 | 3,610,771,503,910,648,300 | 29.071429 | 166 | 0.663236 | false | 4.484024 | false | false | false |
stormi/tsunami | src/primaires/scripting/parser/nombre.py | 1 | 3620 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Nombre, détaillée plus bas."""
from fractions import Fraction
from .expression import Expression
from .delimiteurs import DELIMITEURS
class Nombre(Expression):
"""Expression Nombre.
Notez qu'un nombre peut être :
un entier
un flottant
une fraction
Tous ces nombres sont de toute façon convertis en fraction.
"""
nom = "nombre"
def __init__(self):
"""Constructeur de l'expression."""
Expression.__init__(self)
self.nombre = None
def __repr__(self):
return "nombre({})".format(self.nombre)
def __str__(self):
return "|blc|" + str(self.nombre) + "|ff|"
@classmethod
def parsable(cls, chaine):
"""Retourne True si la chaîne est parsable, False sinon."""
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
fin = fins and min(fins) or None
chaine = chaine[:fin]
try:
nombre = Fraction(chaine)
except ValueError:
nombre = None
return nombre is not None
@classmethod
def parser(cls, chaine):
"""Parse la chaîne.
Retourne l'objet créé et la partie non interprétée de la chaîne.
"""
objet = Nombre()
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
if fins:
fin = min(fins)
else:
fin = None
chaine_interpreter = chaine[:fin]
objet.nombre = Fraction(chaine_interpreter)
return objet, chaine[len(chaine_interpreter):]
def get_valeur(self, evt):
"""Retourne le nombre sous la forme d'un objet Fraction."""
return self.nombre
@property
def code_python(self):
"""Retourne le code Python associé."""
return repr(self.nombre)
| bsd-3-clause | -257,016,107,874,898,900 | 33.692308 | 79 | 0.660754 | false | 3.938865 | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/relatedperson_tests.py | 1 | 7168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import relatedperson
from .fhirdate import FHIRDate
class RelatedPersonTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("RelatedPerson", js["resourceType"])
return relatedperson.RelatedPerson(js)
def testRelatedPerson1(self):
inst = self.instantiate_from("relatedperson-example-f001-sarah.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson1(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson1(inst2)
def implRelatedPerson1(self, inst):
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.2.4.6.3")
self.assertEqual(inst.identifier[0].type.text, "BSN")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.name[0].family, "Abels")
self.assertEqual(inst.name[0].given[0], "Sarah")
self.assertEqual(inst.name[0].use, "usual")
self.assertEqual(inst.relationship.coding[0].code, "SIGOTHR")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "mobile")
self.assertEqual(inst.telecom[0].value, "0690383372")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "home")
self.assertEqual(inst.telecom[1].value, "[email protected]")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson2(self):
inst = self.instantiate_from("relatedperson-example-f002-ariadne.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson2(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson2(inst2)
def implRelatedPerson2(self, inst):
self.assertEqual(inst.birthDate.date, FHIRDate("1963").date)
self.assertEqual(inst.birthDate.as_json(), "1963")
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.name[0].text, "Ariadne Bor-Jansma")
self.assertEqual(inst.name[0].use, "usual")
self.assertEqual(inst.period.start.date, FHIRDate("1975").date)
self.assertEqual(inst.period.start.as_json(), "1975")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.relationship.coding[0].code, "SIGOTHR")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "home")
self.assertEqual(inst.telecom[0].value, "+31201234567")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson3(self):
inst = self.instantiate_from("relatedperson-example-peter.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson3(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson3(inst2)
def implRelatedPerson3(self, inst):
self.assertEqual(inst.address[0].city, "PleasantVille")
self.assertEqual(inst.address[0].line[0], "534 Erewhon St")
self.assertEqual(inst.address[0].postalCode, "3999")
self.assertEqual(inst.address[0].state, "Vic")
self.assertEqual(inst.address[0].use, "home")
self.assertEqual(inst.gender, "male")
self.assertEqual(inst.id, "peter")
self.assertEqual(inst.name[0].family, "Chalmers")
self.assertEqual(inst.name[0].given[0], "Peter")
self.assertEqual(inst.name[0].given[1], "James")
self.assertEqual(inst.name[0].use, "official")
self.assertEqual(inst.period.start.date, FHIRDate("2012-03-11").date)
self.assertEqual(inst.period.start.as_json(), "2012-03-11")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.photo[0].url, "Binary/f012")
self.assertEqual(inst.relationship.coding[0].code, "C")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v2/0131")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "(03) 5555 6473")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson4(self):
inst = self.instantiate_from("relatedperson-example.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson4(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson4(inst2)
def implRelatedPerson4(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Paris")
self.assertEqual(inst.address[0].country, "FRA")
self.assertEqual(inst.address[0].line[0], "43, Place du Marché Sainte Catherine")
self.assertEqual(inst.address[0].postalCode, "75004")
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "benedicte")
self.assertEqual(inst.identifier[0].system, "urn:oid:1.2.250.1.61")
self.assertEqual(inst.identifier[0].type.text, "INSEE")
self.assertEqual(inst.identifier[0].use, "usual")
self.assertEqual(inst.identifier[0].value, "272117510400399")
self.assertEqual(inst.name[0].family, "du Marché")
self.assertEqual(inst.name[0].given[0], "Bénédicte")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.photo[0].url, "Binary/f016")
self.assertEqual(inst.relationship.coding[0].code, "N")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v2/0131")
self.assertEqual(inst.relationship.coding[1].code, "WIFE")
self.assertEqual(inst.relationship.coding[1].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "+33 (237) 998327")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | 8,045,643,496,570,569,000 | 47.734694 | 95 | 0.664294 | false | 3.422838 | true | false | false |
peterayeni/django-smsgateway | smsgateway/south_migrations/0006_auto__add_field_queuedsms_using.py | 1 | 2515 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QueuedSMS.using'
db.add_column('smsgateway_queuedsms', 'using', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'QueuedSMS.using'
db.delete_column('smsgateway_queuedsms', 'using')
models = {
'smsgateway.queuedsms': {
'Meta': {'ordering': "('priority', 'created')", 'object_name': 'QueuedSMS'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'2'", 'max_length': '1'}),
'reliable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'using': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'smsgateway.sms': {
'Meta': {'ordering': "('sent',)", 'object_name': 'SMS'},
'backend': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '32', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'direction': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'gateway': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gateway_ref': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['smsgateway']
| bsd-3-clause | 799,461,428,937,790,200 | 51.395833 | 160 | 0.561829 | false | 3.703976 | false | false | false |
Zokol/The-Great-Dalmuti | dalmut.py | 1 | 8497 | import random
"""
THE GREAT DALMUTI
Heikki "Zokol" Juva 2015 - [email protected]
"""
## Exception raised when all players have skipped the round
class SkipException(Exception):
pass
class RestartRound(Exception):
pass
class Card:
def __init__(self, value):
self.value = value
def __repr__(self):
return "Card: " + str(self.value)
def __str__(self):
return str(self.value)
class Player:
def __init__(self, name):
self.name = name
self.hand = []
self.position = "TBD"
self.stats = {"Dalmut": [], "Trader": [], "Slave": []}
def __str__(self):
card_list = []
for card in self.hand:
card_list.append(card.__str__())
return str(self.name) + " " + self.position + " : " + ", ".join(card_list)
def sort_hand(self):
self.hand.sort(key=lambda card: card.value, reverse=True)
def receive_card(self, card):
self.hand.append(card)
self.sort_hand()
def take_card(self, id):
return self.hand.pop(id)
def take_highest_card(self):
self.sort_hand()
return self.take_card(0)
def take_lowest_card(self):
self.sort_hand()
return self.take_card(len(self.hand)-1)
def count_cards(self, order):
return len([card for card in self.hand if card.value == order])
# Return those cards that player has many and those that are as low number as possible
def take_best_cards(self, limit, count):
self.sort_hand()
best = [-1, -1] # First is the card order, second is the 'point-value'
if limit > self.hand[0].value:
highest_card = self.hand[0].value + 1
else:
highest_card = limit
#print(higest_card)
#print(self.count_cards(higest_card))
for i in reversed(range(highest_card)):
if count == -1:
points = self.count_cards(i) * i
if best[1] < points:
best[0] = i
best[1] = points
elif self.count_cards(i) == count:
best[0] = i
break
if best[0] == -1: raise SkipException # No cards -> skip
picked_cards = [card for card in self.hand if card.value == best[0]]
if count != -1: picked_cards = picked_cards[:count]
self.hand = [card for card in self.hand if card not in picked_cards]
self.sort_hand()
return picked_cards
def play_hand(self, table):
if len(table) > 0:
count = len(table[-1])
limit = table[-1][0].value
else:
count = -1
limit = 99
return self.take_best_cards(limit, count)
def empty_hand(self):
self.hand = []
class Stack:
def __init__(self, largest_number):
self.stack = []
for value in range(1, largest_number + 1):
for i in range(value):
self.stack.append(Card(value))
def __str__(self):
card_list = []
for card in self.stack:
card_list.append(card.__str__())
return ", ".join(card_list)
def __len__(self):
return len(self.stack)
def shuffle(self):
random.shuffle(self.stack)
def lift_top_card(self):
return self.stack.pop(0)
def add(self, card):
self.stack.append(card)
class Game:
def __init__(self, number_of_players, number_of_games, number_of_rounds):
self.table = []
self.players = []
for p in range(number_of_players):
self.players.append(Player("Player " + str(p)))
self.reset_stack()
# Determine initial position for players
# Each player lifts one card from stack
# Lowest card holder is the Great Dalmut
# Highest card holder is the slave
# Everyone in between are traders
self.initial_pos()
print("Intial position for players determined")
self.print_players()
# Main loop
#starting_player = self.players[0]
for i in range(number_of_games):
self.reset_stack()
self.play_game(self.players, number_of_rounds)
#self.order_players(starting_player)
print("Game over")
print("RESULTS:")
self.print_stats()
def reset_stack(self):
self.empty_players_hands()
# Create stack
self.stack = Stack(12) # Create stack with the highest number being 12
print("Number of cards:", len(self.stack))
print("Stack")
print(self.stack)
print("-----------------------")
print("")
# Shuffle stack
print("Stack shuffled")
self.stack.shuffle()
print(self.stack)
print("-----------------------")
print("")
def play_game(self, playing_order, number_of_rounds):
print("-----------------------")
print("")
print("Cards dealt")
self.deal_cards()
self.print_players()
print("-----------------------")
print("")
round_i = 0
while round_i < number_of_rounds:
round_i += 1
print("Play round", round_i)
#print(playing_order)
playing_order = self.play_round(playing_order)
#print(playing_order)
playing_order[0].stats["Dalmut"].append(round_i)
for player in playing_order[1: -1]:
player.stats["Trader"].append(round_i)
playing_order[-1].stats["Slave"].append(round_i)
print("Players card count:", self.count_player_cards(playing_order))
self.empty_table()
self.deal_cards()
print("Players card count:", self.count_player_cards(playing_order))
#if not new_order[0].hand: return new_order #XXX ????
self.table = []
self.print_players()
self.print_stats()
def print_players(self):
for p in self.players:
print(p)
def print_stats(self):
for p in self.players:
print (p.name, "Dalmut:", len(p.stats["Dalmut"]), "Trader:", len(p.stats["Trader"]), "Slave:", len(p.stats["Slave"]))
def print_table(self):
top_cards = self.table[-1]
print(str(len(top_cards)), "x", top_cards[0], "on the table")
def initial_pos(self):
for player in self.players:
if len(self.stack) > 0: player.receive_card(self.stack.lift_top_card())
else: print("Too small stack to deal, not enough cards for everyone")
self.players.sort(key = lambda player: player.hand[0].value)
for player in self.players:
player.position = "Trader"
player.stats["Trader"].append(0)
self.players[0].position = "Dalmut"
self.players[-1].position = "Slave"
self.players[0].stats["Dalmut"].append(0)
self.players[-1].stats["Slave"].append(0)
def deal_cards(self):
print("Number of cards in stack:", len(self.stack))
card_id = 0
while card_id < len(self.stack):
for player in self.players:
player.receive_card(self.stack.lift_top_card())
card_id += 1
def count_player_cards(self, players):
total = 0
for player in players:
total += len(player.hand)
return total
def empty_players_hands(self):
for player in self.players:
player.empty_hand()
def empty_table(self):
card_count = 0
for cards in self.table:
for card in cards:
card_count += len(cards)
self.stack.add(cards.pop(cards.index(card)))
print("Number of cards on table", card_count)
self.table = []
def play_round(self, players):
#starting_index = self.players.index(starting_player)
#transposed_players = self.players[starting_index:] + self.players[:starting_index]
new_order = []
skip_counter = 0
new_dalmut = False
while True:
try:
for player in players:
if skip_counter == len(players) - 1:
#return player
## Every other player skipped, transpose player-list to let current player to start the next round
starting_index = self.players.index(player)
transposed_players = self.players[starting_index:] + self.players[:starting_index]
players = transposed_players
skip_counter = 0
self.empty_table()
raise RestartRound
try:
#print(player)
## If someone runs out of cards, here we determine who gets which position for the next game
"""
print("Hand empty:", not player.hand)
print("Player finished:", player in new_order)
print("Is new dalmut found:", new_dalmut)
"""
if player in new_order:
pass
elif not player.hand and not new_dalmut:
#print("New Dalmut found!!")
new_order.append(player) # First player runs out of cards
new_dalmut = True
elif not player.hand and new_dalmut and len(players) - 1 > len(new_order):
new_order.append(player) # Player runs out of cards, who is not the first and not the last
elif not player.hand and len(players) - 1 == len(new_order): # Last player runs out of cards
new_order.append(player)
#print("NEW ORDER:", new_order)
return new_order
else:
self.table.append(player.play_hand(self.table)) ## Let the next playr to play the hand and place it on the table
self.print_table()
#skip_counter = 0
except SkipException:
print("Skip")
skip_counter += 1
except RestartRound:
print("Restarting round with new order")
pass
if __name__ == '__main__':
game = Game(10, 3, 900) | mit | -4,180,557,527,489,499,000 | 26.237179 | 120 | 0.645993 | false | 3.007788 | false | false | false |
hpd/MitsubaForMaya | plug-ins/mitsuba/volumes/volume.py | 1 | 2498 | import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeName = "MitsubaVolume"
kPluginNodeClassify = "shader/volume"
kPluginNodeId = OpenMaya.MTypeId(0x87033)
class volume(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mSourcefile = OpenMaya.MObject()
mGridDims = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == volume.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( volume.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return volume()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
try:
volume.mSourcefile = nAttr.createColor("sourceFile", "sf")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
#nAttr.setDefault(50,50,50)
volume.mGridDims = nAttr.create("gridDimensions", "gd", OpenMaya.MFnNumericData.k3Float)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
volume.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
volume.addAttribute(volume.mSourcefile)
volume.addAttribute(volume.mGridDims)
volume.addAttribute(volume.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
| mit | -3,909,570,042,202,730,500 | 29.463415 | 96 | 0.658927 | false | 3.380244 | false | false | false |
awwong1/CMPUT404LAB9_W2016 | iguana/iguana/urls.py | 1 | 1469 | """iguana URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
from rest_framework.authtoken import views as authtoken_views
from quickstart import views as quickstart_views
from client import views as client_views
router = routers.DefaultRouter()
router.register(r'users', quickstart_views.UserViewSet)
router.register(r'groups', quickstart_views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', authtoken_views.obtain_auth_token),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^', client_views.example_view, name="index")
]
| mit | 7,628,134,095,978,440,000 | 38.702703 | 83 | 0.724983 | false | 3.514354 | false | false | false |
dborzov/practicin | 67-binary-heap/solution.py | 1 | 1403 | class Heap:
def __init__(self):
self.bh = []
def pop(self):
if len(self.bh)==0: raise StandardError('No more elements in the heap')
if len(self.bh)==1:
return self.bh.pop()
return_value, self.bh[0] = self.bh[0], self.bh[-1]
self.bh = self.bh[:len(self.bh)-1]
cur = 0
while True:
left, right = cur*2+1, cur*2+2
get_value = lambda x:self.bh[x] if x<len(self.bh) else None
top_element = max([left, right], key=get_value)
print "Stack:", self.bh
print "Left:{}, right:{}, top element:{}".format(left, right, top_element)
if (get_value(top_element) is None) or (self.bh[top_element] < self.bh[cur]):
return return_value
self.bh[cur], self.bh[top_element] = self.bh[top_element], self.bh[cur]
cur = top_element
def bubble_up(self,cur):
while cur!=0:
parent=(cur-1)//2
if self.bh[parent]>self.bh[cur]:
return
self.bh[parent], self.bh[cur] = self.bh[cur], self.bh[parent]
cur=parent
def add(self, new_value):
self.bh.append(new_value)
self.bubble_up(len(self.bh)-1)
print 'We added {}, and now stack is {}'.format(new_value, self.bh)
new_one = Heap()
new_one.add(3)
new_one.add(2)
new_one.add(12)
new_one.add(9)
print 'Pop: ', new_one.pop()
print 'Pop: ', new_one.pop()
print 'Pop: ', new_one.pop()
| mit | -2,426,201,212,558,269,400 | 30.177778 | 85 | 0.570207 | false | 2.840081 | false | false | false |
sajuptpm/neutron-ipam | neutron/tests/unit/bigswitch/test_capabilities.py | 1 | 2608 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author Kevin Benton
from contextlib import nested
import mock
from neutron.tests.unit.bigswitch import test_router_db
PLUGIN = 'neutron.plugins.bigswitch.plugin'
SERVERMANAGER = PLUGIN + '.servermanager'
SERVERPOOL = SERVERMANAGER + '.ServerPool'
SERVERRESTCALL = SERVERMANAGER + '.ServerProxy.rest_call'
class CapabilitiesTests(test_router_db.RouterDBTestCase):
def test_floating_ip_capability(self):
with nested(
mock.patch(SERVERRESTCALL,
return_value=(200, None, '["floatingip"]', None)),
mock.patch(SERVERPOOL + '.rest_create_floatingip',
return_value=(200, None, None, None)),
mock.patch(SERVERPOOL + '.rest_delete_floatingip',
return_value=(200, None, None, None))
) as (mock_rest, mock_create, mock_delete):
with self.floatingip_with_assoc() as fip:
pass
mock_create.assert_has_calls(
[mock.call(fip['floatingip']['tenant_id'], fip['floatingip'])]
)
mock_delete.assert_has_calls(
[mock.call(fip['floatingip']['tenant_id'],
fip['floatingip']['id'])]
)
def test_floating_ip_capability_neg(self):
with nested(
mock.patch(SERVERRESTCALL,
return_value=(200, None, '[""]', None)),
mock.patch(SERVERPOOL + '.rest_update_network',
return_value=(200, None, None, None))
) as (mock_rest, mock_netupdate):
with self.floatingip_with_assoc() as fip:
pass
updates = [call[0][2]['floatingips']
for call in mock_netupdate.call_args_list]
all_floats = [f['floating_ip_address']
for floats in updates for f in floats]
self.assertIn(fip['floatingip']['floating_ip_address'], all_floats)
| apache-2.0 | -2,216,588,186,026,200,800 | 39.123077 | 79 | 0.61273 | false | 4.126582 | true | false | false |
fastavro/fastavro | fastavro/_write_py.py | 1 | 22640 | # cython: auto_cpdef=True
"""Python code for writing AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
import json
from io import BytesIO
from os import urandom, SEEK_SET
import bz2
import lzma
import zlib
from .const import NAMED_TYPES
from .io.binary_encoder import BinaryEncoder
from .io.json_encoder import AvroJSONEncoder
from .validation import _validate
from .read import HEADER_SCHEMA, SYNC_SIZE, MAGIC, reader
from .logical_writers import LOGICAL_WRITERS
from .schema import extract_record_type, extract_logical_type, parse_schema
from ._write_common import _is_appendable
def write_null(encoder, datum, schema, named_schemas, fname):
"""null is written as zero bytes"""
encoder.write_null()
def write_boolean(encoder, datum, schema, named_schemas, fname):
"""A boolean is written as a single byte whose value is either 0 (false) or
1 (true)."""
encoder.write_boolean(datum)
def write_int(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_int(datum)
def write_long(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_long(datum)
def write_float(encoder, datum, schema, named_schemas, fname):
"""A float is written as 4 bytes. The float is converted into a 32-bit
integer using a method equivalent to Java's floatToIntBits and then encoded
in little-endian format."""
encoder.write_float(datum)
def write_double(encoder, datum, schema, named_schemas, fname):
"""A double is written as 8 bytes. The double is converted into a 64-bit
integer using a method equivalent to Java's doubleToLongBits and then
encoded in little-endian format."""
encoder.write_double(datum)
def write_bytes(encoder, datum, schema, named_schemas, fname):
"""Bytes are encoded as a long followed by that many bytes of data."""
encoder.write_bytes(datum)
def write_utf8(encoder, datum, schema, named_schemas, fname):
"""A string is encoded as a long followed by that many bytes of UTF-8
encoded character data."""
encoder.write_utf8(datum)
def write_crc32(encoder, datum):
"""A 4-byte, big-endian CRC32 checksum"""
encoder.write_crc32(datum)
def write_fixed(encoder, datum, schema, named_schemas, fname):
"""Fixed instances are encoded using the number of bytes declared in the
schema."""
if len(datum) != schema["size"]:
raise ValueError(
f"data of length {len(datum)} does not match schema size: {schema}"
)
encoder.write_fixed(datum)
def write_enum(encoder, datum, schema, named_schemas, fname):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema["symbols"].index(datum)
encoder.write_enum(index)
def write_array(encoder, datum, schema, named_schemas, fname):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_array_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
dtype = schema["items"]
for item in datum:
write_data(encoder, item, dtype, named_schemas, fname)
encoder.end_item()
encoder.write_array_end()
def write_map(encoder, datum, schema, named_schemas, fname):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_map_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
vtype = schema["values"]
for key, val in datum.items():
encoder.write_utf8(key)
write_data(encoder, val, vtype, named_schemas, fname)
encoder.write_map_end()
def write_union(encoder, datum, schema, named_schemas, fname):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union."""
best_match_index = -1
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
extracted_type = extract_record_type(candidate)
if extracted_type in NAMED_TYPES:
schema_name = candidate["name"]
else:
schema_name = extracted_type
if name == schema_name:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
msg = (
f"provided union type name {name} not found in schema "
+ f"{schema} {field}"
)
raise ValueError(msg)
index = best_match_index
else:
pytype = type(datum)
most_fields = -1
# All of Python's floating point values are doubles, so to
# avoid loss of precision, we should always prefer 'double'
# if we are forced to choose between float and double.
#
# If 'double' comes before 'float' in the union, then we'll immediately
# choose it, and don't need to worry. But if 'float' comes before
# 'double', we don't want to pick it.
#
# So, if we ever see 'float', we skim through the rest of the options,
# just to see if 'double' is a possibility, because we'd prefer it.
could_be_float = False
for index, candidate in enumerate(schema):
if could_be_float:
if extract_record_type(candidate) == "double":
best_match_index = index
break
else:
# Nothing except "double" is even worth considering.
continue
if _validate(datum, candidate, named_schemas, raise_errors=False):
record_type = extract_record_type(candidate)
if record_type == "record":
logical_type = extract_logical_type(candidate)
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, candidate)
candidate_fields = set(f["name"] for f in candidate["fields"])
datum_fields = set(datum)
fields = len(candidate_fields.intersection(datum_fields))
if fields > most_fields:
best_match_index = index
most_fields = fields
elif record_type == "float":
best_match_index = index
# Continue in the loop, because it's possible that there's
# another candidate which has record type 'double'
could_be_float = True
else:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
raise ValueError(
f"{repr(datum)} (type {pytype}) do not match {schema} {field}"
)
index = best_match_index
# write data
# TODO: There should be a way to give just the index
encoder.write_index(index, schema[index])
write_data(encoder, datum, schema[index], named_schemas, fname)
def write_record(encoder, datum, schema, named_schemas, fname):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema."""
for field in schema["fields"]:
name = field["name"]
if name not in datum and "default" not in field and "null" not in field["type"]:
raise ValueError(f"no value and no default for {name}")
write_data(
encoder,
datum.get(name, field.get("default")),
field["type"],
named_schemas,
name,
)
WRITERS = {
"null": write_null,
"boolean": write_boolean,
"string": write_utf8,
"int": write_int,
"long": write_long,
"float": write_float,
"double": write_double,
"bytes": write_bytes,
"fixed": write_fixed,
"enum": write_enum,
"array": write_array,
"map": write_map,
"union": write_union,
"error_union": write_union,
"record": write_record,
"error": write_record,
}
def write_data(encoder, datum, schema, named_schemas, fname):
"""Write a datum of data to output stream.
Paramaters
----------
encoder: encoder
Type of encoder (e.g. binary or json)
datum: object
Data to write
schema: dict
Schemda to use
named_schemas: dict
Mapping of fullname to schema definition
"""
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS.get(record_type)
if fn:
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
try:
return fn(encoder, datum, schema, named_schemas, fname)
except TypeError as ex:
if fname:
raise TypeError(f"{ex} on field {fname}")
raise
else:
return write_data(encoder, datum, named_schemas[record_type], named_schemas, "")
def write_header(encoder, metadata, sync_marker):
header = {
"magic": MAGIC,
"meta": {key: value.encode() for key, value in metadata.items()},
"sync": sync_marker,
}
write_data(encoder, header, HEADER_SCHEMA, {}, "")
def null_write_block(encoder, block_bytes, compression_level):
"""Write block in "null" codec."""
encoder.write_long(len(block_bytes))
encoder._fo.write(block_bytes)
def deflate_write_block(encoder, block_bytes, compression_level):
"""Write block in "deflate" codec."""
# The first two characters and last character are zlib
# wrappers around deflate data.
if compression_level is not None:
data = zlib.compress(block_bytes, compression_level)[2:-1]
else:
data = zlib.compress(block_bytes)[2:-1]
encoder.write_long(len(data))
encoder._fo.write(data)
def bzip2_write_block(encoder, block_bytes, compression_level):
"""Write block in "bzip2" codec."""
data = bz2.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
def xz_write_block(encoder, block_bytes, compression_level):
"""Write block in "xz" codec."""
data = lzma.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
BLOCK_WRITERS = {
"null": null_write_block,
"deflate": deflate_write_block,
"bzip2": bzip2_write_block,
"xz": xz_write_block,
}
def _missing_codec_lib(codec, library):
def missing(encoder, block_bytes, compression_level):
raise ValueError(
f"{codec} codec is supported but you need to install {library}"
)
return missing
def snappy_write_block(encoder, block_bytes, compression_level):
"""Write block in "snappy" codec."""
data = snappy.compress(block_bytes)
encoder.write_long(len(data) + 4) # for CRC
encoder._fo.write(data)
encoder.write_crc32(block_bytes)
try:
import snappy
except ImportError:
BLOCK_WRITERS["snappy"] = _missing_codec_lib("snappy", "python-snappy")
else:
BLOCK_WRITERS["snappy"] = snappy_write_block
def zstandard_write_block(encoder, block_bytes, compression_level):
"""Write block in "zstandard" codec."""
data = zstd.ZstdCompressor().compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import zstandard as zstd
except ImportError:
BLOCK_WRITERS["zstandard"] = _missing_codec_lib("zstandard", "zstandard")
else:
BLOCK_WRITERS["zstandard"] = zstandard_write_block
def lz4_write_block(encoder, block_bytes, compression_level):
"""Write block in "lz4" codec."""
data = lz4.block.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import lz4.block
except ImportError:
BLOCK_WRITERS["lz4"] = _missing_codec_lib("lz4", "lz4")
else:
BLOCK_WRITERS["lz4"] = lz4_write_block
class GenericWriter:
def __init__(self, schema, metadata=None, validator=None):
self._named_schemas = {}
self.schema = parse_schema(schema, self._named_schemas)
self.validate_fn = _validate if validator is True else validator
self.metadata = metadata or {}
if isinstance(schema, dict):
schema = {
key: value
for key, value in schema.items()
if key not in ("__fastavro_parsed", "__named_schemas")
}
elif isinstance(schema, list):
schemas = []
for s in schema:
if isinstance(s, dict):
schemas.append(
{
key: value
for key, value in s.items()
if key
not in (
"__fastavro_parsed",
"__named_schemas",
)
}
)
else:
schemas.append(s)
schema = schemas
self.metadata["avro.schema"] = json.dumps(schema)
class Writer(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.metadata["avro.codec"] = codec
if isinstance(fo, BinaryEncoder):
self.encoder = fo
else:
self.encoder = BinaryEncoder(fo)
self.io = BinaryEncoder(BytesIO())
self.block_count = 0
self.sync_interval = sync_interval
self.compression_level = compression_level
if _is_appendable(self.encoder._fo):
# Seed to the beginning to read the header
self.encoder._fo.seek(0)
avro_reader = reader(self.encoder._fo)
header = avro_reader._header
file_writer_schema = parse_schema(avro_reader.writer_schema)
if self.schema != file_writer_schema:
raise ValueError(
f"Provided schema {self.schema} does not match "
+ f"file writer_schema {file_writer_schema}"
)
codec = avro_reader.metadata.get("avro.codec", "null")
self.sync_marker = header["sync"]
# Seek to the end of the file
self.encoder._fo.seek(0, 2)
self.block_writer = BLOCK_WRITERS[codec]
else:
self.sync_marker = sync_marker or urandom(SYNC_SIZE)
try:
self.block_writer = BLOCK_WRITERS[codec]
except KeyError:
raise ValueError(f"unrecognized codec: {codec}")
write_header(self.encoder, self.metadata, self.sync_marker)
def dump(self):
self.encoder.write_long(self.block_count)
self.block_writer(self.encoder, self.io._fo.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
self.io._fo.truncate(0)
self.io._fo.seek(0, SEEK_SET)
self.block_count = 0
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.io, record, self.schema, self._named_schemas, "")
self.block_count += 1
if self.io._fo.tell() >= self.sync_interval:
self.dump()
def write_block(self, block):
# Clear existing block if there are any records pending
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder.write_long(block.num_records)
self.block_writer(self.encoder, block.bytes_.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
def flush(self):
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder._fo.flush()
class JSONWriter(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.encoder = fo
self.encoder.configure(self.schema, self._named_schemas)
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.encoder, record, self.schema, self._named_schemas, "")
def flush(self):
self.encoder.flush()
def writer(
fo,
schema,
records,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
schema: dict
Writer schema
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
codec_compression_level: int, optional
Compression level to use with the specified codec (if the codec
supports it)
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
The `fo` argument is a file-like object so another common example usage
would use an `io.BytesIO` object like so::
from io import BytesIO
from fastavro import writer
fo = BytesIO()
writer(fo, schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
if isinstance(fo, AvroJSONEncoder):
writer_class = JSONWriter
else:
# Assume a binary IO if an encoder isn't given
writer_class = Writer
fo = BinaryEncoder(fo)
output = writer_class(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
codec_compression_level,
)
for record in records:
output.write(record)
output.flush()
def schemaless_writer(fo, schema, record):
"""Write a single record without the schema or header information
Parameters
----------
fo: file-like
Output file
schema: dict
Schema
record: dict
Record to write
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file', 'rb') as fp:
fastavro.schemaless_writer(fp, parsed_schema, record)
Note: The ``schemaless_writer`` can only write a single record.
"""
named_schemas = {}
schema = parse_schema(schema, named_schemas)
encoder = BinaryEncoder(fo)
write_data(encoder, record, schema, named_schemas, "")
encoder.flush()
| mit | 5,944,205,814,396,213,000 | 31.906977 | 88 | 0.60371 | false | 4.008499 | false | false | false |
songmonit/CTTMSONLINE_V8 | openerp/release.py | 1 | 2596 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
RELEASE_LEVELS = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL] = ['alpha', 'beta', 'candidate', 'final']
RELEASE_LEVELS_DISPLAY = {ALPHA: ALPHA,
BETA: BETA,
RELEASE_CANDIDATE: 'rc',
FINAL: ''}
# version_info format: (MAJOR, MINOR, MICRO, RELEASE_LEVEL, SERIAL)
# inspired by Python's own sys.version_info, in order to be
# properly comparable using normal operarors, for example:
# (6,1,0,'beta',0) < (6,1,0,'candidate',1) < (6,1,0,'candidate',2)
# (6,1,0,'candidate',2) < (6,1,0,'final',0) < (6,1,2,'final',0)
version_info = (2, 8, 0, BETA, 0)
version = '.'.join(map(str, version_info[:2])) + RELEASE_LEVELS_DISPLAY[version_info[3]] + str(version_info[4] or '')
series = serie = major_version = '.'.join(map(str, version_info[:2]))
product_name = 'CTTMS'
description = 'CTTMS Server'
long_desc = '''CTTMS is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU Affero General Public License v3
Programming Language :: Python
"""
url = 'https://www.cttms.com'
author = 'OpenERP S.A.'
author_email = '[email protected]'
license = 'AGPL-3'
nt_service_name = "CTTMS-server-" + series
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,346,447,488,891,795,000 | 45.357143 | 117 | 0.645223 | false | 3.556164 | false | false | false |
orcmkit/ORCmKit | Python27/ORCSim/LiquidReceiver.py | 1 | 4981 | from __future__ import division
from CoolProp.CoolProp import PropsSI
import pylab
from ACHPTools import Write2CSV
from matplotlib.pyplot import plot, show, figure, semilogy, xlim, ylim, title, xlabel, ylabel, legend
from math import pi,exp,log,sqrt,tan,cos,sin
from scipy.optimize import brentq
from scipy.constants import g
import numpy as np
from PHEX_ASME2015 import PHEHXClass
from LineSet import LineSetClass
class LiquidReceiverClass():
"Create Refrigerant buffer tank class"
def __init__(self,**kwargs):
#Load up the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
self.Condenser=PHEHXClass()
def Update(self,**kwargs):
#Update the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
def OutputList(self):
"""
Return a list of parameters for this component for further output
It is a list of tuples, and each tuple is formed of items with indices:
[0] Description of value
[1] Units of value
[2] The value itself
"""
return [
('Liquid Receiver Total Volume','m3',self.Volume_tank),
('Liquid Receiver Total Charge','Kg',self.Charge_Tank),
('Inlet Temperature','K',self.Tin),
('Outlet Temperature','K',self.Tout),
('Inlet Pressure','kPa',self.pin),
('Inlet Density', 'kg/m3',self.rho_in),
('Outlet Pressure','kPa',self.pout)
]
def Calculate(self):
"""
The liquid receiver acts as a damper in the cycle, absorbing the the mass flow rate
fluctuations. More concretely, a different explanation can be given.
When the liquid receiver gets subcooled or saturated liquid at its top, it can be assumed to be
in thermodynamic equilibrium at each time, because liquid and vapor have the same pressure when
they enter it (indeed, if the reservoir isn't full, the vapor contained in it must be saturated, as it is in
presence of liquid). In the inferior part of the tank, the mix of saturated and subcooled liquid (already
present) allows the working fluid to exit it in a subcooled liquid state. The saturation pressure and
temperature then reign then in the superior part of the reservoir. Thus, with this component, the
charge fluctuations are litteraly absorbed, put to an equilibrium value, and the subcooling becomes
null (this fact can't be stated in the presence of non-condensable gases).
level = (h_v_sat - h)/(h_v_sat - h_l_sat)*(rho/rho_l_sat)
"""
# Density [kg/m^3]
self.rho_in=PropsSI('D','T',self.Tin, 'P', self.pin*1000+100, self.Ref)
#Static pressure (rho*g*h) between inlet and outlet of the tank"
self.pout=self.pin #+ (self.rho_in*g*self.h_ports)/1000
# print 'LiquidReceiver.pout', self.pout
self.Tout = self.Tin #no temperature gradient is observed in the reservoir.
self.hin = PropsSI('H','T',self.Tin,'P',self.pin*1000+100,self.Ref) #J/kg
"""
"Calculations"
"x_ex_tank=0" "due to the presence of non condensable gas (air, due to leakage) in the working fluid,
"the liquid at the exit of the tank is not saturated..."
#h_su_tank=h_ex_cd
#V_ex_tank = m_dot/rho_ex_tank "Check V_dot_su_pump at the beginning of the file!!"
"""
self.hout = PropsSI('H','T',self.Tout, 'P', self.pout*1000+100, self.Ref) #J/kg
#print 'LiquidReceiver.hout', self.hout
self.sout = PropsSI('S','T',self.Tout, 'P', self.pout*1000+100, self.Ref) #J/kg
#Calculate saturated values
#Charge of the tank [kg]
"""
The tank is characterized by an internal diameter and heigth (ID,h)
and by the maximum level of refrigerant inside
"""
self.Volume_tank = pi*self.ID**2/4.0*self.h_receiver
self.Charge_Tank = self.Volume_tank * self.rho_in
#self.Volume_ref = self.Charge_Tank/self.LiquidReceiver.rho_in
if __name__=='__main__':
pin_list=[527.374817]
Tin_list=[15.48]
zip(pin_list,Tin_list)
for pin,Tin in zip(pin_list,Tin_list):
kwds={
'Ref':'R134A',
'pin':pin,
'Tin':Tin+273.15,
'ID':0.3,
'h_receiver': 1,
'h_ports':0.5
}
LiquidReceiver=LiquidReceiverClass(**kwds)
LiquidReceiver.Calculate()
print 'Charge [kg]',LiquidReceiver.Charge_Tank
print 'pin [kPa]', LiquidReceiver.pin
print 'pout [kPa]',LiquidReceiver.pout
print 'Receiver Volume [cm3]', LiquidReceiver.Volume_tank*1e6 | mit | 3,989,023,316,795,235,300 | 37.323077 | 116 | 0.598474 | false | 3.552782 | false | false | false |
jar3k/django-model-options | model_options/mixins.py | 1 | 1737 | from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, IntegrityError, transaction
from .utils import detect_type
from .models import Option
class OptionsMixin(models.Model):
options = GenericRelation(Option)
class Meta:
abstract = True
def delete_option(self, key):
self.options.get(key=key).delete()
def get_option(self, key, default=None):
try:
option = self.options.get(key=key)
return detect_type(option.value)
except Option.DoesNotExist:
return default
def has_option(self, key):
return bool(self.options.filter(key=key).exists())
def set_option(self, key, value=True):
try:
with transaction.atomic():
self.options.create(key=key, value=value)
except IntegrityError:
option = self.options.get(key=key)
option.value = value
option.save()
class CachedOptionsMixin(object):
@property
def cache_key_prefix(self):
return "{}-{}".format(self._meta.app_label, self._meta.model_name)
def delete_option(self, key):
cache.delete(self._get_cache_key(key))
def get_option(self, key, default=None):
option = self._get_option(key)
return detect_type(option) if option else default
def has_option(self, key):
return bool(self._get_option(key))
def set_option(self, key, value=True):
cache.set(self._get_cache_key(key), value)
def _get_cache_key(self, key):
return "{}-{}".format(self.cache_key_prefix, key)
def _get_option(self, key):
return cache.get(self._get_cache_key(key))
| mit | -4,955,214,051,976,959,000 | 27.016129 | 74 | 0.633851 | false | 3.817582 | false | false | false |
ericholscher/djangoembed | oembed/views.py | 1 | 4618 | import re
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, get_resolver
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.template import defaultfilters
from django.utils import simplejson
from django.utils.encoding import smart_str
import oembed
from oembed.consumer import OEmbedConsumer
from oembed.exceptions import OEmbedMissingEndpoint
from oembed.providers import DjangoProvider, HTTPProvider
resolver = get_resolver(None)
def json(request, *args, **kwargs):
"""
The oembed endpoint, or the url to which requests for metadata are passed.
Third parties will want to access this view with URLs for your site's
content and be returned OEmbed metadata.
"""
# coerce to dictionary
params = dict(request.GET.items())
callback = params.pop('callback', None)
url = params.pop('url', None)
if not url:
return HttpResponseBadRequest('Required parameter missing: URL')
try:
provider = oembed.site.provider_for_url(url)
if not provider.provides:
raise OEmbedMissingEndpoint()
except OEmbedMissingEndpoint:
raise Http404('No provider found for %s' % url)
query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v])
try:
resource = oembed.site.embed(url, **query)
except OEmbedException, e:
raise Http404('Error embedding %s: %s' % (url, str(e)))
response = HttpResponse(mimetype='application/json')
json = resource.json
if callback:
response.write('%s(%s)' % (defaultfilters.force_escape(callback), json))
else:
response.write(json)
return response
def consume_json(request):
"""
Extract and return oembed content for given urls.
Required GET params:
urls - list of urls to consume
Optional GET params:
width - maxwidth attribute for oembed content
height - maxheight attribute for oembed content
template_dir - template_dir to use when rendering oembed
Returns:
list of dictionaries with oembed metadata and renderings, json encoded
"""
client = OEmbedConsumer()
urls = request.GET.getlist('urls')
width = request.GET.get('width')
height = request.GET.get('height')
template_dir = request.GET.get('template_dir')
output = {}
for url in urls:
try:
provider = oembed.site.provider_for_url(url)
except OEmbedMissingEndpoint:
oembeds = None
rendered = None
else:
oembeds = url
rendered = client.parse_text(url, width, height, template_dir=template_dir)
output[url] = {
'oembeds': oembeds,
'rendered': rendered,
}
return HttpResponse(simplejson.dumps(output), mimetype='application/json')
def oembed_schema(request):
"""
A site profile detailing valid endpoints for a given domain. Allows for
better auto-discovery of embeddable content.
OEmbed-able content lives at a URL that maps to a provider.
"""
current_domain = Site.objects.get_current().domain
url_schemes = [] # a list of dictionaries for all the urls we can match
endpoint = reverse('oembed_json') # the public endpoint for our oembeds
providers = oembed.site.get_providers()
for provider in providers:
# first make sure this provider class is exposed at the public endpoint
if not provider.provides:
continue
match = None
if isinstance(provider, DjangoProvider):
# django providers define their regex_list by using urlreversing
url_pattern = resolver.reverse_dict.get(provider._meta.named_view)
# this regex replacement is set to be non-greedy, which results
# in things like /news/*/*/*/*/ -- this is more explicit
if url_pattern:
regex = re.sub(r'%\(.+?\)s', '*', url_pattern[0][0][0])
match = 'http://%s/%s' % (current_domain, regex)
elif isinstance(provider, HTTPProvider):
match = provider.url_scheme
else:
match = provider.regex
if match:
url_schemes.append({
'type': provider.resource_type,
'matches': match,
'endpoint': endpoint
})
url_schemes.sort(key=lambda item: item['matches'])
response = HttpResponse(mimetype='application/json')
response.write(simplejson.dumps(url_schemes))
return response
| mit | -8,876,648,113,996,773,000 | 31.293706 | 87 | 0.64097 | false | 4.32397 | false | false | false |
schreiberx/sweet | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi/rexi_benchmarks.py | 1 | 8037 | #! /usr/bin/env python3
import os
import sys
import math
from itertools import product
# REXI
from mule_local.rexi.REXICoefficients import *
from mule_local.rexi.pcirexi.BeanREXI import BeanREXI
from mule_local.rexi.pcirexi.LRREXI import LRREXI
from mule_local.rexi.trexi.TREXI import *
from mule_local.rexi.cirexi.CIREXI import *
from mule_local.rexi.elrexi.ELREXI import *
from mule_local.rexi.brexi.BREXI import *
# EFloat
efloat_mode = "float"
def get_rexi_benchmarks(jg):
# Accumulator of all REXI methods
# rexi_method['rexi_method'] = 'file' # Choose REXI method which is typically 'file' for all file-based ones
# rexi_method['rexi_files_coefficients'] = None # List with approximations for different 'phi' functions
rexi_methods = []
#
# CI REXI
#
if True:
# REXI stuff
def fun_params_ci_N(ci_max_real, ci_max_imag):
if ci_max_imag >= 7:
return 128
else:
return 32
params_ci_max_imag = [30.0]
params_ci_max_real = [10.0]
#
# Scale the CI circle radius relative to this time step size
# We do this simply to get a consistent time stepping method
# Otherwise, CI would not behave consistently
# Yes, that's ugly, but simply how it goes :-)
#
params_ci_max_imag_scaling_relative_to_timestep_size = 480
# params_ci_max_imag_scaling_relative_to_timestep_size = None
params_ci_min_imag = 5.0
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for ci_max_imag, ci_max_real in product(params_ci_max_imag, params_ci_max_real):
if params_ci_max_imag_scaling_relative_to_timestep_size != None:
ci_max_imag *= (jg.runtime.timestep_size / params_ci_max_imag_scaling_relative_to_timestep_size)
# "phi0"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi0 = cirexi.setup(
function_name="phi0",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag,
lambda_max_real=ci_max_real
).toFloat()
# "phi1"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi1 = cirexi.setup(
function_name="phi1",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag,
lambda_max_real=ci_max_real
).toFloat()
# "phi2"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi2 = cirexi.setup(
function_name="phi2",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag, lambda_max_real=ci_max_real
).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# EL-REXI
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi0 = elrexi.setup(
function_name="phi0",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
# "phi1"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi1 = elrexi.setup(
function_name="phi1",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
# "phi2"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi2 = elrexi.setup(
function_name="phi2",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# LR-REXI (Rectangle contour with Gauss-Legendre Quadrature)
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi0 = lrrexi.setup(
function_name="phi0",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
# "phi1"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi1 = lrrexi.setup(
function_name="phi1",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
# "phi2"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi2 = lrrexi.setup(
function_name="phi2",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# Bean-REXI
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi0 = beanrexi.setup(
function_name="phi0",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
# "phi1"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi1 = beanrexi.setup(
function_name="phi1",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
# "phi2"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi2 = beanrexi.setup(
function_name="phi2",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
return rexi_methods
if __name__ == "__main__":
pass
| mit | -5,738,522,908,586,182,000 | 32.348548 | 126 | 0.520717 | false | 3.506545 | false | false | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/mapnik_animated_map.py | 1 | 7430 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from opus_core.logger import logger
from opus_core.store.attribute_cache import AttributeCache
from opus_core.simulation_state import SimulationState
from opus_core.session_configuration import SessionConfiguration
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.mapnik_map import MapnikMap
class MapnikAnimation(MapnikMap):
def get_file_extension(self):
return 'gif'
def visualize(self,
indicators_to_visualize,
computed_indicators):
"""Create a map for the given indicator, save it to the cache
directory's 'indicators' sub-directory."""
#TODO: eliminate this example indicator stuff
example_indicator = computed_indicators[indicators_to_visualize[0]]
source_data = example_indicator.source_data
dataset_to_attribute_map = {}
package_order = source_data.get_package_order()
self._create_input_stores(years = source_data.years)
for name, computed_indicator in computed_indicators.items():
if name not in indicators_to_visualize: continue
if computed_indicator.source_data != source_data:
raise Exception('result templates in indicator batch must all be the same.')
dataset_name = computed_indicator.indicator.dataset_name
if dataset_name not in dataset_to_attribute_map:
dataset_to_attribute_map[dataset_name] = []
dataset_to_attribute_map[dataset_name].append(name)
viz_metadata = []
for dataset_name, indicator_names in dataset_to_attribute_map.items():
attributes = [(name,computed_indicators[name].get_computed_dataset_column_name())
for name in indicator_names]
for year in source_data.years:
SessionConfiguration(
new_instance = True,
package_order = package_order,
in_storage = AttributeCache())
SimulationState().set_cache_directory(source_data.cache_directory)
SimulationState().set_current_time(year)
dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
dataset.load_dataset()
if dataset.get_coordinate_system() is not None:
dataset.compute_variables(names = dataset.get_coordinate_system())
for indicator_name, computed_name in attributes:
indicator = computed_indicators[indicator_name]
table_data = self.input_stores[year].load_table(
table_name = dataset_name,
column_names = [computed_name])
if computed_name in table_data:
table_name = self.get_name(
dataset_name = dataset_name,
years = [year],
attribute_names = [indicator_name])
if self.scale:
min_value, max_value = self.scale
else:
min_value, max_value = (None, None)
file_path = os.path.join(self.storage_location,
'anim_' + table_name + '.' + MapnikMap.get_file_extension(self))
dataset.add_attribute(name = str(computed_name),
data = table_data[computed_name])
dataset.plot_map(
name = str(computed_name),
min_value = min_value,
max_value = max_value,
file = str(file_path),
my_title = str(indicator_name),
color_list = self.color_list,
range_list = self.range_list,
label_list = self.label_list,
is_animation = True,
year = year,
resolution = self.resolution,
page_dims = self.page_dims,
map_lower_left = self.map_lower_left,
map_upper_right = self.map_upper_right,
legend_lower_left = self.legend_lower_left,
legend_upper_right = self.legend_upper_right
#filter = where(table_data[computed_name] != -1)
#filter = 'urbansim.gridcell.is_fully_in_water'
)
#metadata = ([indicator_name], table_name, [year])
#viz_metadata.append(metadata)
else:
logger.log_warning('There is no computed indicator %s'%computed_name)
for indicator_name, computed_name in attributes:
self.create_animation(
dataset_name = dataset_name,
year_list = source_data.years,
indicator_name = str(indicator_name),
viz_metadata = viz_metadata
)
visualization_representations = []
for indicator_names, table_name, years in viz_metadata:
visualization_representations.append(
self._get_visualization_metadata(
computed_indicators = computed_indicators,
indicators_to_visualize = indicator_names,
table_name = table_name,
years = years)
)
return visualization_representations
# precondition: year_list must always have at least one element
# this function is called by the visualize function
def create_animation(self, dataset_name, year_list, indicator_name, viz_metadata):
map_file_list = []
for year in year_list:
map_file_list.append(os.path.join(self.storage_location,'anim_'+dataset_name+'_map_'+str(year)+'_'+indicator_name+'.'+MapnikMap.get_file_extension(self)))
table_name = dataset_name+'_animated_map_'+str(min(year_list))+'_'+indicator_name
animation_file_name = str(os.path.join(self.storage_location,table_name+'.'+self.get_file_extension()))
os.system('convert -delay 100 %s -loop 0 %s' % (' '.join(map_file_list), animation_file_name))
# delete intermediate png files
for i in range(map_file_list.__len__()):
os.remove(map_file_list[i])
metadata = ([indicator_name], table_name, [min(year_list)])
viz_metadata.append(metadata)
if __name__ == '__main__':
try:
import mapnik
except:
logger.log_warning('could not import mapnik')
| agpl-3.0 | 8,432,913,646,352,047,000 | 46.025316 | 166 | 0.517766 | false | 4.790458 | false | false | false |
won0089/oppia | core/domain/skins_services.py | 1 | 3513 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides services for HTML skins for the reader view."""
__author__ = 'Sean Lip'
import copy
import inspect
from extensions.skins import skin_classes
class Registry(object):
"""Registry of all skins."""
# Dict mapping skin ids to their classes.
_skins_dict = {}
@classmethod
def _refresh_registry(cls):
cls._skins_dict.clear()
# Add new skin classes to the registry.
for name, clazz in inspect.getmembers(skin_classes, inspect.isclass):
if name.endswith('_test') or name == 'BaseSkin':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseSkin' not in ancestor_names:
continue
cls._skins_dict[clazz.skin_id] = clazz
@classmethod
def get_skin_by_id(cls, skin_id):
"""Get a skin class instance by id."""
if not cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id]
@classmethod
def get_all_skin_ids(cls):
"""Get a list of all skin ids."""
if not cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict.keys()
@classmethod
def get_all_skin_classes(cls):
"""Get a dict mapping skin ids to skin classes."""
if not cls._skins_dict:
cls._refresh_registry()
return copy.deepcopy(cls._skins_dict)
@classmethod
def get_all_specs(cls):
"""Get a dict mapping skin ids to their gadget panels properties."""
if not cls._skins_dict:
cls._refresh_registry()
specs_dict = {}
classes_dict = cls.get_all_skin_classes()
for skin_id in classes_dict:
specs_dict[skin_id] = classes_dict[skin_id].panels_properties
return specs_dict
@classmethod
def get_skin_templates(cls, skin_ids):
"""Returns the concatanated HTML for the given skins.
Raises an error if any of the skins is not found.
"""
cls._refresh_registry()
return '\n'.join([
cls._skins_dict[skin_id].get_html() for skin_id in skin_ids])
@classmethod
def get_skin_js_url(cls, skin_id):
"""Returns the URL to the directive JS code for a given skin.
Refreshes once if the skin id is not found; subsequently, throws an
error.
"""
if skin_id not in cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id].get_js_url()
@classmethod
def get_skin_tag(cls, skin_id):
"""Returns an HTML tag corresponding to the given skin.
Refreshes once if the skin id is not found; subsequently, throws an
error.
"""
if skin_id not in cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id].get_tag()
| apache-2.0 | -8,853,533,049,173,117,000 | 30.648649 | 77 | 0.619129 | false | 3.890365 | false | false | false |
georgebv/coastlib | coastlib/stats/extreme.py | 1 | 165750 | # coastlib, a coastal engineering Python library
# Copyright (C), 2019 Georgii Bocharov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pickle
import corner
import emcee
import matplotlib.pyplot as plt
import matplotlib.ticker
import mpmath
import numpy as np
import pandas as pd
import scipy.stats
import statsmodels.api as sm
import coastlib.math.derivatives
import coastlib.stats.distributions
# Helper function used to handle quantiles of empty arrays
def empty_quantile(array, *args, **kwargs):
if len(array) > 0:
return np.nanquantile(array, *args, **kwargs)
else:
return np.nan
class EVA:
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Extracts extreme values. Provides assistance in threshold value selection for the POT method.
Estimates parameters of distributions for given data using Maximum Likelihood Estimate (MLE)
or estimates posterior distributions of parameters of distributions using Markov chain Monte Carlo (MCMC).
For given return periods gives estimates of return values and associated confidence intervals.
Generates various statistical plots such as return value plot and QQ/PP plots.
Provides multiple goodness-of-fit (GOF) statistics and tests.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Public Attributes
-----------------
self.__init__()
self.dataframe : pd.DataFrame
self.column : str
self.block_size : float
self.gap_length : float
self.number_of_blocks : float
self.dataframe_declustered : np.ndarray
self.get_extremes()
self.extremes_method : str
self.extremes_type : str
self.threshold : float
self.block_boundaries : np.ndarray
self.extremes : pd.DataFrame
self.extremes_rate : float
self.plotting_position : str
self.fit()
self.distribution_name : str
self.fit_method : str
self.fit_parameters : tuple
self.scipy_fit_options : dict
self.sampler : emcee.EnsembleSampler
self.mcmc_chain : np.ndarray
self.fixed_parameters : np.ndarray
self.generate_results()
self.results : pd.DataFrame
Private Attributes
------------------
self.__init__()
self.__status : dict
Public Methods
--------------
self.to_pickle
self.read_pickle
self.get_extremes
self.plot_extremes
self.plot_mean_residual_life
self.plot_parameter_stability
self.test_extremes
self.fit
self.plot_trace
self.plot_corner
self.plot_posterior
self.return_value
self.confidence_interval
self.generate_results
self.plot_summary
self.pdf
self.cdf
self.ppf
self.isf
self.plot_qq
self.goodness_of_fit
Private Methods
---------------
self.__init__
self.__get_blocks
self.__update
self.__repr__
self.__get_return_period
self.__run_mcmc
self._kernel_fit_parameters
self.__monte_carlo
self.__delta
self.__get_property
"""
def __init__(self, dataframe, column=None, block_size=365.2425, gap_length=24):
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Calculates number of blocks with <block_size>, accounting for gaps if <gap_length> is given.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
"""
# Ensure passed <dataframe> is a pd.Dataframe object or can be converted to one
if isinstance(dataframe, pd.DataFrame):
self.dataframe = dataframe
elif isinstance(dataframe, pd.Series):
self.dataframe = dataframe.to_frame()
else:
raise TypeError(f'<dataframe> must be {pd.DataFrame} or {pd.Series}, {type(dataframe)} was passed')
# Ensure <dataframe> index is pd.DatetimeIndex object
if not isinstance(dataframe.index, pd.DatetimeIndex):
raise TypeError(f'<dataframe> index must be {pd.DatetimeIndex}, {type(dataframe.index)} was passed')
self.dataframe.sort_index(ascending=True, inplace=True)
# Ensure passed <column> represents a column within <dataframe>
if column is not None:
if isinstance(column, int):
if column < len(self.dataframe.columns):
self.column = self.dataframe.columns[column]
else:
raise ValueError(f'<column> with index {column} is not valid for '
f'dataframe with {len(self.dataframe.columns)} columns')
elif isinstance(column, str):
if column in self.dataframe.columns:
self.column = column
else:
raise ValueError(f'Column {column} is not valid for given dataframe.\n'
f'Valid columns are {self.dataframe.columns}')
else:
raise TypeError(f'Column must be {str} or {int}, {type(column)} was passed.')
else:
self.column = self.dataframe.columns[0]
# Ensure no nans are present in the <dataframe> <column>
nancount = np.sum(np.isnan(self.dataframe[self.column].values))
if nancount > 0:
raise ValueError(f'<dataframe> contains {nancount} NaN values in column {self.column}.'
f'\nNaN values must be removed or filled before performing analysis.')
# Ensure values in <dataframe> <column> are real numbers
if not np.all(np.isreal(self.dataframe[self.column].values)):
raise ValueError(f'Values in <dataframe> <column> must be real numbers,'
f' {self.dataframe[self.column].values.dtype} was passed')
# Calculate number of blocks of <block_size> in <dataframe>
self.block_size = block_size
self.gap_length = gap_length
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Separate data into clusters using gap_length and plot each cluster independently
# This way distant clusters are not connected on the plot
if self.gap_length is not None:
cluster_values = [[self.dataframe[self.column].values.copy()[0]]]
cluster_indexes = [[self.dataframe.index.values.copy()[0]]]
for index, value in zip(self.dataframe.index, self.dataframe[self.column].values):
# New cluster encountered
if index - cluster_indexes[-1][-1] > np.timedelta64(pd.Timedelta(hours=self.gap_length)):
cluster_values.append([value])
cluster_indexes.append([index])
# Continuing within current cluster
else:
cluster_values[-1].append(value)
cluster_indexes[-1].append(index)
cluster_indexes = np.array(cluster_indexes)
cluster_values = np.array(cluster_values)
self.dataframe_declustered = np.array([cluster_indexes, cluster_values])
else:
self.dataframe_declustered = None
# Initialize internal status
# Internal status is used to delete calculation results when earlier methods are called
# e.g. removes fit data and results when extreme events are exctracted. This prevents conflicts and errors
self.__status = dict(
extremes=False,
fit=False,
results=False
)
# Extremes extraction
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
# Extremes fit
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
# Results
self.results = None
def __get_blocks(self, gap_length):
"""
Calculates number of blocks of size <self.block_size> in <self.dataframe> <self.column>.
Parameters
----------
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Returns
-------
n : float
Number of blocks.
"""
# Calculate number of blocks with gaps accounted for
if gap_length is not None:
timedelta = np.timedelta64(pd.Timedelta(hours=gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > timedelta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=gap_length/10))
series_range = np.float64(new_index[-1] - new_index[0])
# Calculate number of blocks with gaps not accounted for
else:
series_range = np.float64((self.dataframe.index[-1] - self.dataframe.index[0]).value)
return series_range / 1e9 / 60 / 60 / 24 / self.block_size
def __update(self):
"""
Updates internal state of the EVA class instance object.
This method is used to delete calculation results when earlier methods are called.
For example, removes all data related to fit and results when extreme events are extracted.
"""
if not self.__status['extremes']:
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
if not self.__status['fit']:
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
if not self.__status['results']:
self.results = None
def __repr__(self):
"""
Generates a string with a summary of the EVA class instance object state.
"""
series_range = (self.dataframe.index[-1] - self.dataframe.index[0]).value / 1e9 / 60 / 60 / 24
summary = str(
f'{" "*35}Extreme Value Analysis Summary\n'
f'{"="*100}\n'
f'Analyzed parameter{self.column:>29}{" "*6}Series length{series_range:29.2f} days\n'
f'Gap length{self.gap_length:31.2f} hours{" "*6}'
f'Adjusted series length{self.number_of_blocks*self.block_size:20.2f} days\n'
f'Block size{self.block_size:32.2f} days{" "*6}Number of blocks{self.number_of_blocks:31.2f}\n'
f'{"="*100}\n'
)
if self.__status['extremes']:
summary += str(
f'Number of extreme events{len(self.extremes):23}{" "*6}Extraction method{self.extremes_method:>30}\n'
f'Extreme event rate{self.extremes_rate:16.2f} events/block{" "*6}'
f'Plotting position{self.plotting_position:>30}\n'
f'Threshold{self.threshold:38.2f}{" "*6}Extreme values type{self.extremes_type:>28}\n'
f'{"="*100}\n'
)
else:
summary += str(
f'Number of extreme events{"N/A":>23}{" " * 6}Extraction method{"N/A":>30}\n'
f'Extreme event rate{"N/A":>16} events/block{" " * 6}'
f'Plotting position{"N/A":>30}\n'
f'Threshold{"N/A":>38}{" "*6}Extreme values type{"N/A":>28}\n'
f'{"=" * 100}\n'
)
if self.__status['fit']:
if self.fit_method == 'MCMC':
fit_parameters = self._kernel_fit_parameters(
burn_in=int(self.mcmc_chain.shape[1] / 2),
kernel_steps=100
)
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Markov chain Monte Carlo":>37}\n'
f'MCMC fit parameters (approximate){str(np.round(fit_parameters, 3)):>14}\n'
f'{"=" * 100}'
)
elif self.fit_method == 'MLE':
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Maximum Likelihood Estimate":>37}\n'
f'MLE fit parameters{str(np.round(self.fit_parameters, 3)):>29}\n'
f'{"=" * 100}'
)
else:
summary += str(
f'Distribution{"N/A":>35}{" " * 6}Fit method{"N/A":>37}\n'
f'Fit parameters{"N/A":>33}\n'
f'{"=" * 100}'
)
return summary
def to_pickle(self, path):
"""
Exports EVA object to a .pyc file. Preserves all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def read_pickle(path):
"""
Reads a .pyc file with EVA object. Loads all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
Returns
-------
file : EVA class instance object
Saved EVA object with all data and internal state preserved.
"""
with open(path, 'rb') as f:
file = pickle.load(f)
return file
def get_extremes(self, method='BM', plotting_position='Weibull', extremes_type='high', **kwargs):
"""
Extracts extreme values from <self.dataframe> <self.column> using the BM (Block Maxima)
or the POT (Peaks Over Threshold) methods. If method is POT, also declusters extreme values using
the runs method (aka minimum distance between independent events).
Parameters
----------
method : str, optional
Peak extraction method. 'POT' for Peaks Over Threshold and 'BM' for Block Maxima (default='BM').
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
kwargs
for method='POT'
threshold : float
Threshold for extreme value extraction.
Only values above (below, if <extremes_type='low'>) this threshold are extracted.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
Returns
-------
Creates a <self.extremes> dataframe with extreme values and return periods determined using
the given plotting position as p=(rank-alpha)/(N+1-alpha-beta) and T=1/(1-p).
"""
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if extremes_type not in ['high', 'low']:
raise ValueError(f'<extremes_type> must be high or low, {extremes_type} was passed')
self.extremes_type = extremes_type
# Block Maxima method
if method == 'BM':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Set threshold to 0 for compatibility between BM and POT formulas
self.extremes_method = 'Block Maxima'
self.threshold = 0
# Generate new index with gaps eliminated
if self.gap_length is not None:
gap_delta = np.timedelta64(pd.Timedelta(hours=self.gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > gap_delta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=self.gap_length/10))
else:
new_index = self.dataframe.index.values.copy()
# Create local reindexed dataframe with <new_index> and <id> column to get original datetime later
local_dataframe = pd.DataFrame(
data=self.dataframe[self.column].values.copy(),
columns=[self.column], index=new_index
)
local_dataframe['id'] = np.arange(len(local_dataframe))
# Find boundaries of blocks of <self.block_size>
block_delta = np.timedelta64(pd.Timedelta(days=self.block_size))
block_boundaries = [(new_index[0], new_index[0] + block_delta)]
self.block_boundaries = [self.dataframe.index.values.copy()[0]]
while block_boundaries[-1][-1] < local_dataframe.index.values[-1]:
block_boundaries.append(
(block_boundaries[-1][-1], block_boundaries[-1][-1] + block_delta)
)
self.block_boundaries.append(
self.dataframe.index.values.copy()[
local_dataframe.truncate(before=block_boundaries[-1][0])['id'].values[0]
]
)
self.block_boundaries.append(self.block_boundaries[-1] + block_delta)
self.block_boundaries = np.array(self.block_boundaries)
block_boundaries = np.array(block_boundaries)
# Update number_of_blocks
self.number_of_blocks = len(self.block_boundaries) - 1
# Find extreme values within each block and associated datetime indexes from original dataframe
extreme_values, extreme_indexes = [], []
for i, block_boundary in enumerate(block_boundaries):
if i == len(block_boundaries) - 1:
local_data = local_dataframe[local_dataframe.index >= block_boundary[0]]
else:
local_data = local_dataframe[
(local_dataframe.index >= block_boundary[0]) & (local_dataframe.index < block_boundary[1])
]
if len(local_data) != 0:
if self.extremes_type == 'high':
extreme_values.append(local_data[self.column].values.copy().max())
else:
extreme_values.append(local_data[self.column].values.copy().min())
local_index = self.dataframe.index.values.copy()[
local_data[local_data[self.column].values == extreme_values[-1]]['id']
]
if np.isscalar(local_index):
extreme_indexes.append(local_index)
else:
extreme_indexes.append(local_index[0])
self.extremes = pd.DataFrame(data=extreme_values, columns=[self.column], index=extreme_indexes)
# Peaks Over Threshold method
elif method == 'POT':
self.threshold = kwargs.pop('threshold')
r = kwargs.pop('r', 24)
adjust_threshold = kwargs.pop('adjust_threshold', True)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
self.extremes_method = 'Peaks Over Threshold'
# Make sure correct number of blocks is used (overrides previously created BM values)
if isinstance(self.number_of_blocks, int):
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Extract raw extremes
if self.extremes_type == 'high':
self.extremes = self.dataframe[self.dataframe[self.column] > self.threshold][self.column].to_frame()
else:
self.extremes = self.dataframe[self.dataframe[self.column] < self.threshold][self.column].to_frame()
# Decluster raw extremes using runs method
if r is not None:
r = np.timedelta64(pd.Timedelta(hours=r))
last_cluster_index = self.extremes.index.values.copy()[0]
peak_cluster_values = [self.extremes[self.column].values.copy()[0]]
peak_cluster_indexes = [self.extremes.index.values.copy()[0]]
for index, value in zip(self.extremes.index, self.extremes[self.column].values):
# New cluster encountered
if index - last_cluster_index > r:
peak_cluster_values.append(value)
peak_cluster_indexes.append(index)
# Continuing within current cluster
else:
# Update cluster peak
if self.extremes_type == 'high':
if value > peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
else:
if value < peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
# Index of previous cluster - lags behind <index> by 1
last_cluster_index = index
self.extremes = pd.DataFrame(
data=peak_cluster_values, index=peak_cluster_indexes, columns=[self.column]
)
# Update threshold to smallest/largest extreme value in order to fix the GPD location parameter at 0.
# GPD is very unstable with non-zero location.
if adjust_threshold:
if self.extremes_type == 'high':
self.threshold = self.extremes[self.column].values.min()
else:
self.threshold = self.extremes[self.column].values.max()
else:
raise ValueError(f'Method {method} not recognized')
self.extremes.index.name = self.dataframe.index.name
# Calculate rate of extreme events (events/block)
self.extremes_rate = len(self.extremes) / self.number_of_blocks
# Assign ranks to data with duplicate values having average of ranks they would have individually
self.plotting_position = plotting_position
self.extremes['Return Period'] = self.__get_return_period(plotting_position=self.plotting_position)
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
def __get_return_period(self, plotting_position, return_cdf=False):
"""
Assigns return periods to extracted extreme events and updates the <self.extremes> index.
Parameters
----------
plotting_position : str
Plotting position. Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
return_cdf : bool, optional
If True, returns cdf of extracted extremes (default=False).
"""
# Assign ranks to data with duplicate values having average of ranks they would have individually
if self.extremes_type == 'high':
ranks = scipy.stats.rankdata(self.extremes[self.column].values, method='average')
else:
ranks = len(self.extremes) + 1 - scipy.stats.rankdata(self.extremes[self.column].values, method='average')
# Calculate return periods using a specified plotting position
# https://matplotlib.org/mpl-probscale/tutorial/closer_look_at_plot_pos.html
plotting_positions = {
'ECDF': (0, 1),
'Hazen': (0.5, 0.5),
'Weibull': (0, 0),
'Laplace': (-1, -1),
'Tukey': (1 / 3, 1 / 3),
'Blom': (3 / 8, 3 / 8),
'Median': (0.3175, 0.3175),
'Cunnane': (0.4, 0.4),
'Gringorten': (0.44, 0.44),
'Gumbel': (1, 1)
}
if plotting_position not in plotting_positions:
raise ValueError(f'Plotting position {plotting_position} not recognized')
alpha, beta = plotting_positions[plotting_position][0], plotting_positions[plotting_position][1]
cdf = (ranks - alpha) / (len(self.extremes) + 1 - alpha - beta)
if return_cdf:
return cdf
# Survival function - aka upper tail probability or probability of exceedance
sf = 1 - cdf
return 1 / sf / self.extremes_rate
def plot_extremes(self):
"""
Plots extracted extreme values on top of <self.dataframe> <self.column> observed time series.
Shows boundaries of blocks for the Block Maxima method and threshold level for the Peaks Over Threshold method.
Returns
-------
tuple(fig, ax)
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Run self.get_extremes() first')
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
self.extremes.index, self.extremes[self.column],
edgecolors='white', marker='s', facecolors='k', s=40, lw=1, zorder=15
)
if self.gap_length is None:
ax.plot(
self.dataframe.index, self.dataframe[self.column],
color='#3182bd', lw=.5, alpha=.8, zorder=5
)
else:
for x, y in zip(self.dataframe_declustered[0], self.dataframe_declustered[1]):
ax.plot(x, y, color='#3182bd', lw=.5, alpha=.8, zorder=5)
if self.extremes_method == 'Block Maxima':
for _block in self.block_boundaries:
ax.axvline(_block, color='k', ls='--', lw=1, zorder=10)
elif self.extremes_method == 'Peaks Over Threshold':
ax.axhline(self.threshold, color='k', ls='--', lw=1, zorder=10)
ax.set_title(f'Extreme Values Time Series, {self.extremes_method}')
if len(self.dataframe.index.name) > 0:
ax.set_xlabel(f'{self.dataframe.index.name}')
else:
ax.set_xlabel('Date')
ax.set_ylabel(f'{self.column}')
annot = ax.annotate(
'', xy=(self.extremes.index[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
self.extremes.index[0], self.extremes[self.column].values[0],
edgecolors='white', marker='s', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return period : {self.extremes["Return Period"].values[n]:.2f}\n'
f'Plotting position : {self.plotting_position}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
def plot_mean_residual_life(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True):
"""
Plots means of residuals against thresholds.
Threshold should be chosen as the smallest threshold in a region where the mean residuals' plot
is approximately linear. Generalized Pareto Distribution is asymptotically valid in this region.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
POT method only: minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False : tuple(thresholds, residuals, confidence_low, confidence_top)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
# Find mean residuals and 95% confidence interval for each threshold
residuals, confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
residuals.append(exceedances.mean())
# Ubiased estimator of sample variance of mean s^2/n
confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=exceedances.mean(),
scale=exceedances.std(ddof=1)/np.sqrt(len(exceedances))
)
)
else:
residuals.append(np.nan)
confidence.append((np.nan, np.nan))
residuals = np.array(residuals)
confidence = np.array(confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
residuals = residuals[mask]
confidence = confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
# Generate mean residual life plot
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_title('Mean Residual Life Plot')
ax.plot(thresholds, residuals, color='k', zorder=10, label='Mean residual life', lw=2)
ax.plot(thresholds, confidence.T[0], ls='--', color='k', lw=0.5, zorder=10)
ax.plot(thresholds, confidence.T[1], ls='--', color='k', lw=0.5, zorder=10)
ax.fill_between(
thresholds, confidence.T[0], confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval', zorder=5
)
ax.legend()
ax.set_xlabel('Threshold')
ax.set_ylabel('Mean Residual')
fig.tight_layout()
return fig, ax
else:
return thresholds, residuals, confidence.T[0], confidence.T[1]
def plot_parameter_stability(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True, dx='1e-10', precision=100):
"""
Plots shape and modified scale paramters of the Generalized Pareto Distribution (GPD) against thresholds.
GPD is asymptotically valid in a region where these parameters are approximately linear.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
if alpha is None : tuple(thresholds, shapes, modified_scales)
if alpha is passed : tuple(thresholds, shapes, modified_scales, shapes_confidence, scales_confidence)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
shapes, modified_scales = [], []
shapes_confidence, scales_confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
shape, loc, scale = scipy.stats.genpareto.fit(exceedances, floc=0)
shapes.append(shape)
# Define modified scale function (used as scalar function for delta method)
if extremes_type == 'high':
def mod_scale_function(*theta):
return theta[1] - theta[0] * true_thresholds[-1]
else:
def mod_scale_function(*theta):
return theta[1] + theta[0] * true_thresholds[-1]
modified_scales.append(mod_scale_function(shape, scale))
if alpha is not None:
with mpmath.workdps(precision):
# Define modified log_likehood function
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=_x, shape=theta[0], loc=0, scale=theta[1]
)
) for _x in exceedances
]
)
# Calculate delta (gradient) of scalar_function
if extremes_type == 'high':
delta_scalar = np.array(
[
[-true_thresholds[-1]],
[1]
]
)
else:
delta_scalar = np.array(
[
[true_thresholds[-1]],
[1]
]
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, coordinates=[shape, scale], dx=dx, precision=precision
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Estimate modified scale parameter confidence interval using delta method
variance = np.dot(
np.dot(delta_scalar.T, covariance), delta_scalar
).flatten()[0]
scales_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=modified_scales[-1], scale=np.sqrt(variance)
)
)
# Estimate shape parameter confidence interval directly from covariance matrix
shapes_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=shape, scale=np.sqrt(covariance[0][0])
)
)
# Number of exceedances below the limit
else:
shapes.append(np.nan)
modified_scales.append(np.nan)
if alpha is not None:
shapes_confidence.append((np.nan, np.nan))
scales_confidence.append((np.nan, np.nan))
# Convert results to np.ndarray objects
shapes = np.array(shapes)
modified_scales = np.array(modified_scales)
if alpha is not None:
shapes_confidence = np.array(shapes_confidence)
scales_confidence = np.array(scales_confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
shapes = shapes[mask]
modified_scales = modified_scales[mask]
if alpha is not None:
shapes_confidence = shapes_confidence[mask]
scales_confidence = scales_confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if plot:
with plt.style.context('bmh'):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8), sharex='all')
ax1.set_title('Parameter Stability Plot')
ax1.plot(thresholds, shapes, color='k', zorder=10, label='Shape parameter')
ax2.plot(thresholds, modified_scales, color='k', zorder=10, label='Modified scale parameter', lw=2)
if alpha is not None:
ax1.plot(thresholds, shapes_confidence.T[0], ls='--', color='k', lw=0.5)
ax1.plot(thresholds, shapes_confidence.T[1], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[0], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[1], ls='--', color='k', lw=0.5)
ax1.fill_between(
thresholds, shapes_confidence.T[0], shapes_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.fill_between(
thresholds, scales_confidence.T[0], scales_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.set_xlabel('Threshold')
ax1.set_ylabel('Shape parameter')
ax2.set_ylabel('Modified scale parameter')
ax1.legend()
ax2.legend()
fig.tight_layout()
return fig, (ax1, ax2)
else:
if alpha is None:
return thresholds, shapes, modified_scales
else:
return thresholds, shapes, modified_scales, shapes_confidence, scales_confidence
def test_extremes(self, method, **kwargs):
"""
Provides multiple methods to test independece of extracted extreme values.
Parameters
----------
method : str
Method for testing extreme values' independence.
Accepted methods:
'autocorrelation' - generates an autocorrelation plot
http://www.statsmodels.org/stable/generated/
statsmodels.tsa.stattools.acf.html#statsmodels.tsa.stattools.acf
'lag plot' - generates a lag plot for a given lag
'runs test' - return runs test statistic
https://en.wikipedia.org/wiki/Wald%E2%80%93Wolfowitz_runs_test
kwargs
for autocorrelation:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
nlags : int, optional
Number of lags to return autocorrelation for (default for all possible lags).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
unbiased : bool, optional
If True, then denominators for autocovariance are n-k, otherwise n (default=False)
for lag plot:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
lag : int, optional
Lag value (default=1).
for runs test:
alpha : float, optional
Significance level (default=0.05).
Returns
-------
for autocorrelation:
if plot=True : tuple(fig, ax)
if plot=False : tuple(lags, acorr, ci_low, ci_top)
for lag plot:
if plot=True : tuple(fig, ax)
if plot=False : tuple(x, y)
for runs test:
str(test summary)
"""
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to test')
if method == 'autocorrelation':
plot = kwargs.pop('plot', True)
nlags = kwargs.pop('nlags', len(self.extremes) - 1)
alpha = kwargs.pop('alpha', .95)
unbiased = kwargs.pop('unbiased', False)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
acorr, ci = sm.tsa.stattools.acf(
x=self.extremes[self.column].values, alpha=1-alpha, nlags=nlags, unbiased=unbiased
)
ci_low, ci_top = ci.T[0] - acorr, ci.T[1] - acorr
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.vlines(np.arange(nlags+1), [0], acorr, lw=1, color='k', zorder=15)
points = ax.scatter(
np.arange(nlags+1), acorr, marker='o', s=40, lw=1,
facecolor='k', edgecolors='white', zorder=20, label='Autocorrelation value'
)
ax.plot(np.arange(nlags+1)[1:], ci_low[1:], color='k', lw=.5, ls='--', zorder=15)
ax.plot(np.arange(nlags+1)[1:], ci_top[1:], color='k', lw=.5, ls='--', zorder=15)
ax.fill_between(
np.arange(nlags+1)[1:], ci_low[1:], ci_top[1:],
color='k', alpha=.1, zorder=5, label=f'{alpha*100:.0f}% confidence interval'
)
ax.axhline(0, color='k', lw=1, ls='--', zorder=10)
ax.legend()
ax.set_title('Autocorrelation plot')
ax.set_xlabel('Lag')
ax.set_ylabel('Correlation coefficient')
annot = ax.annotate(
'', xy=(0, 0),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
0, 0,
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=25
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Lag : {np.arange(nlags+1)[n]:d}\n'
f'Correlation : {acorr[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return np.arange(nlags+1), acorr, ci_low, ci_top
elif method == 'lag plot':
plot = kwargs.pop('plot', True)
lag = kwargs.pop('lag', 1)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if lag == 0:
x = self.extremes[self.column].values
else:
x = self.extremes[self.column].values[:-lag]
y = self.extremes[self.column].values[lag:]
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
x, y, marker='o', facecolor='k', s=40, edgecolors='white', lw=1, zorder=5
)
ax.set_xlabel(f'{self.column} i')
ax.set_ylabel(f'{self.column} i+{lag}')
ax.set_title('Extreme Values Lag Plot')
annotation = ax.annotate(
"", xy=(0, 0), xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
np.nanmean(x), np.nanmean(y),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annotation.set_visible(False)
def update_annotation(ind):
pos = points.get_offsets()[ind['ind'][0]]
annotation.xy = pos
point.set_offsets(pos)
text = "{}".format(" ".join(
[
f'{self.extremes.index[n]} : {ind["ind"][0]}\n'
f'{self.extremes.index[n+lag]} : {ind["ind"][0]+lag}'
for n in ind['ind']
]))
annotation.set_text(text)
def hover(event):
vis = annotation.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annotation(ind)
annotation.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annotation.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return x, y
elif method == 'runs test':
alpha = kwargs.pop('alpha', .05)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Calculate number of runs of shifted series
s = self.extremes[self.column].values - np.quantile(self.extremes[self.column].values, .5)
n_plus = np.sum(s > 0)
n_minus = np.sum(s < 0)
n_runs = 1
for i in range(1, len(s)):
# Change of sign
if s[i] * s[i-1] < 0:
n_runs += 1
mean = 2 * n_plus * n_minus / len(s) + 1
variance = (mean - 1) * (mean - 2) / (len(s) - 1)
test_statistic = (n_runs-mean)/np.sqrt(variance)
return str(
f'Ho : data is random\n'
f'Ha : data is not random\n\n'
f'Test statistic : N = {test_statistic:.2f}\n'
f'Significanse level : alpha = {alpha}\n'
f'Critical value : Nalpha = {scipy.stats.norm.ppf(1 - alpha / 2):.2f}\n'
f'Reject Ho if |N| > Nalpha'
)
else:
raise ValueError(f'Method {method} not recognized. Try: autocorrelation')
def fit(self, distribution_name, fit_method='MLE', **kwargs):
"""
Depending on fit method, either creates a tuple with maximum likelihood estimate (MLE)
or an array with samples drawn from posterior distribution of parameters (MCMC).
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
fit_method : str, optional
Fit method - MLE (Maximum Likelihood Estimate, scipy)
or Markov chain Monte Carlo (MCMC, emcee) (default='MLE').
kwargs:
for MLE:
scipy_fit_options : dict, optional
Special scipy fit options like <fc>, <loc>, or <floc>.
For GPD scipy_fit_options=dict(floc=0) by default (fixed location parameter at 0).
This parameter is carried over to further calculations, such as confidence interval.
for MCMC:
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to fit')
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
if fit_method == 'MLE':
if distribution_name == 'genpareto':
self.scipy_fit_options = kwargs.pop('scipy_fit_options', dict(floc=0))
else:
self.scipy_fit_options = kwargs.pop('scipy_fit_options', {})
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Create local distribution object
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
self.fit_parameters = distribution_object.fit(exceedances, **self.scipy_fit_options)
elif fit_method == 'MCMC':
self.mcmc_chain = self.__run_mcmc(distribution_name, **kwargs)
else:
raise ValueError(f'Fit method {fit_method} not recognized')
# On successful fit assign the fit_ variables
self.fit_method = fit_method
self.distribution_name = distribution_name
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
def __run_mcmc(self, distribution_name, nsamples=1000, nwalkers=200, **kwargs):
"""
Runs emcee Ensemble Sampler to sample posteriot probability of fit parameters given observed data.
Returns sampler chain with <nsamples> for each parameter for each <nwalkers>.
See http://dfm.io/emcee/current/
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
kwargs
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
Returns
-------
Generates an np.ndarray in self.mcmc_chain
Ensemble Sampler chain with <nsamples> for each parameter for each <nwalkers>.
"""
log_prior = kwargs.pop('log_prior', None)
log_likelihood = kwargs.pop('log_likelihood', None)
starting_bubble = kwargs.pop('starting_bubble', 1e-2)
starting_position = kwargs.pop('starting_position', None)
if distribution_name == 'genpareto':
self.fixed_parameters = kwargs.pop('fixed_parameters', [(1, 0)])
else:
self.fixed_parameters = kwargs.pop('fixed_parameters', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if self.fixed_parameters == [(1, 0)] and distribution_name == 'genpareto':
pass
else:
if self.fixed_parameters is not None:
if (log_prior is None) or (log_likelihood is None) or (starting_position is None):
raise ValueError(
'<fixed_parameter> only works with custom prior and likelihood functions.\n'
'Starting position should be provided for the fixed_parameters case'
)
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Define log_prior probability function (uniform by default)
if log_prior is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_prior(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return 0
else:
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-prior function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_prior=>.'
)
# Define log_likelihood function
if log_likelihood is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_likelihood(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return np.sum(distribution_object.logpdf(exceedances, shape, 0, scale))
else:
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-likelihood function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_likelihood=>.'
)
# Define log_posterior probability function (not exact - excludes marginal evidence probability)
def log_posterior(theta):
return log_likelihood(theta) + log_prior(theta)
# Set MCMC walkers' starting positions to 0
# (setting to MLE makes algorithm unstable due to being stuck in local maxima)
if starting_position is None:
if distribution_name == 'genpareto' and self.fixed_parameters == [(1, 0)]:
theta_0 = np.array([0, 0])
elif distribution_name in ['genextreme', 'genpareto']:
theta_0 = np.array([0, 0, 0])
else:
theta_0 = distribution_object.fit(exceedances)
starting_position = [[0] * len(theta_0) for _ in range(nwalkers)]
# Randomize starting positions to force walkers explore the parameter space
starting_position = [
np.array(sp) + starting_bubble * np.random.randn(len(starting_position[0]))
for sp in starting_position
]
if len(starting_position) != nwalkers:
raise ValueError(f'Number of starting positions {len(starting_position)} '
f'must be equal to number of walkers {nwalkers}')
ndim = len(starting_position[0])
# Setup the Ensemble Sampler and draw samples from posterior distribution for specified number of walkers
self.__sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior)
self.__sampler.run_mcmc(starting_position, nsamples)
# Fill in fixed parameter values
sampler_chain = self._EVA__sampler.chain.copy()
if self.fixed_parameters is not None:
fp = np.transpose(self.fixed_parameters)
ndim = sampler_chain.shape[-1] + len(self.fixed_parameters)
mcmc_chain = np.array(
[
[
[np.nan] * ndim for _ in range(sampler_chain.shape[1])
] for _ in range(sampler_chain.shape[0])
]
)
for i in range(mcmc_chain.shape[0]):
for j in range(mcmc_chain.shape[1]):
counter = 0
for k in range(mcmc_chain.shape[2]):
if k in fp[0]:
mcmc_chain[i][j][k] = fp[1][fp[0] == k][0]
else:
mcmc_chain[i][j][k] = sampler_chain[i][j][counter]
counter += 1
sampler_chain = np.array(mcmc_chain)
return sampler_chain
def _kernel_fit_parameters(self, burn_in, kernel_steps=1000):
"""
Estimate mode of each parameter as peaks of gaussian kernel.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Returns
-------
np.ndarray
Modes of parameters.
"""
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method != 'MCMC':
raise ValueError('Fit method must be MCMC')
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
# Estimate mode of each parameter as peaks of gaussian kernel.
parameters = []
for i, p in enumerate(samples.T):
if self.fixed_parameters is None or (i not in np.transpose(self.fixed_parameters)[0]):
p_filtered = p[~np.isnan(p)]
kernel = scipy.stats.gaussian_kde(p_filtered)
support = np.linspace(
np.quantile(p_filtered, .1), np.quantile(p_filtered, .9),
kernel_steps
)
density = kernel.evaluate(support)
parameters.append(support[density.argmax()])
else:
parameters.append(p[0])
return np.array(parameters)
def plot_trace(self, burn_in, true_theta=None, labels=None):
"""
Plots traces for each parameter. Each trace plot shows all samples for each walker
after first <burn_in> samples are discarded. This method is used to verify fit stability
and to determine the optimal <burn_in> value.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
true_theta : array_like, optional
Array with true (known) values of parameters (default=None). If given, are shown on trace plots.
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
Returns
-------
tuple(fig, axes)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if labels is None:
labels = [f'Parameter {i+1}' for i in range(self.__sampler.chain.shape[-1])]
# Generate trace plot
ndim = self.__sampler.chain.shape[-1]
with plt.style.context('bmh'):
fig, axes = plt.subplots(ndim, 1, figsize=(12, 8), sharex='all')
if ndim == 1:
axes.set_title('MCMC Trace Plot')
axes.set_xlabel('Sample number')
else:
axes[0].set_title('MCMC Trace Plot')
axes[-1].set_xlabel('Sample number')
for i in range(ndim):
for swalker in self.__sampler.chain:
if ndim == 1:
axes.plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes.set_ylabel(labels[i])
else:
axes[i].plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes[i].set_ylabel(labels[i])
if true_theta is not None:
if ndim == 1:
axes.axhline(true_theta[i], color='orangered', lw=2, zorder=10)
else:
axes[i].axhline(true_theta[i], color='orangered', lw=2, zorder=10)
fig.tight_layout()
return fig, axes
def plot_corner(self, burn_in, bins=100, labels=None, figsize=(12, 12), **kwargs):
"""
Generate corner plot showing the projections of a data set in a multi-dimensional space.
See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
bins : int, optional
See https://corner.readthedocs.io/en/latest/api.html#corner.corner (default=50).
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
figsize : tuple, optional
Figure size (default=(12, 12)).
kwargs
Corner plot keywords. See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Returns
-------
tuple(fig, ax)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('mcmc_chain attribute not found')
# Generate labels
ndim = self.__sampler.chain.shape[-1]
if labels is None:
labels = np.array([f'Parameter {i + 1}' for i in range(ndim)])
samples = self.__sampler.chain[:, burn_in:, :].reshape((-1, ndim)).copy()
# Generate corner plot
fig, ax = plt.subplots(ndim, ndim, figsize=figsize)
fig = corner.corner(samples, bins=bins, labels=labels, fig=fig, **kwargs)
return fig, ax
def plot_posterior(self, rp, burn_in, alpha=.95, plot=True, kernel_steps=1000, bins=100):
"""
Returns posterior distribution of return value for a specific return period.
Can be used to explore the posterior distribution p(rv|self.extremes).
Parameters
----------
rp : float
Return period (1/rp represents probability of exceedance over self.block_size).
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
alpha : float, optional
Shows confidence bounds for given interval alpha (default=.95). Doesn't show if None.
plot : bool, optional
If True, plots histogram of return value (default=True). If False, return data
kernel_steps : int, optional
Number of bins (kernel support points) used to plot kernel density (default=1000).
bins : int, optional
Number of bins in historgram (default=100). Only when plot=True.
Returns
-------
Distribution of return value for a given return period
if plot = True : tuple(fig, ax)
if plot = Fale : np.ndarray
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if not np.isscalar(rp):
raise ValueError('rp must be scalar')
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return value for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Set up gaussian kernel
support = np.linspace(return_values.min(), return_values.max(), kernel_steps)
kernel = scipy.stats.gaussian_kde(return_values)
density = kernel.evaluate(support)
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=.5, ls='--', zorder=10
)
ax.plot(
support, density,
color='k', lw=2, zorder=15
)
if alpha is not None:
ax.axvline(np.nanquantile(return_values, (1 - alpha) / 2), lw=1, color='k', ls='--')
ax.axvline(np.nanquantile(return_values, (1 + alpha) / 2), lw=1, color='k', ls='--')
if self.extremes_type == 'high':
ax.set_xlim(right=np.nanquantile(return_values, .999))
else:
ax.set_xlim(left=np.nanquantile(return_values, .001))
ax.set_title(f'{rp}-year Return Period Posterior Distribution')
ax.set_xlabel('Return value')
ax.set_ylabel('Probability density')
fig.tight_layout()
return fig, ax
else:
return return_values
def return_value(self, rp, **kwargs):
"""
Calculates return values for given return periods.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
float or array of floats
Return values for given return periods.
"""
return self.isf(1 / rp / self.extremes_rate, **kwargs)
def confidence_interval(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals for given return periods.
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
if self.fit_method == 'MLE':
method = kwargs.pop('method', 'Monte Carlo')
if method == 'Monte Carlo':
return self.__monte_carlo(rp=rp, alpha=alpha, **kwargs)
elif method == 'Delta':
return self.__delta(rp=rp, alpha=alpha, **kwargs)
elif method in ['Profile Likelihood']:
# TODO - implement Profile Likelihood mehtod
raise NotImplementedError(f'Method {method} not implemented')
else:
raise ValueError(f'Method {method} not recognized')
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
alpha = kwargs.pop('alpha', .95)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return values for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Calculate quantiles for lower and upper confidence bounds for each return period
if np.isscalar(rp):
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def __monte_carlo(self, rp, alpha=.95, **kwargs):
"""
Runs the Monte Carlo confidence interval estimation method.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
k = kwargs.pop('k', 1e4)
sampling_method = kwargs.pop('sampling_method', 'constant')
source = kwargs.pop('source', 'data')
assume_normality = kwargs.pop('assume_normality', False)
# TODO - implement a discard rule (discard bad samples)
# discard_rule = kwargs.pop('discard_rule', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
# Sample from data case
if source == 'data':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
elif sampling_method == 'jacknife':
sample_rate = (len(self.extremes) - 1) / self.number_of_blocks
return_values = []
for i in range(len(self.extremes)):
sample = np.delete(arr=exceedances, obj=i)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=data> the sampling method must be <constant>, <poisson>, or <jacknife>,'
f' <{sampling_method}> was passed')
# Sample from distribution (parametric) case
elif source == 'parametric':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=parametric> the sampling method must be <constant> or <poisson>,'
f' <{sampling_method}> was passed')
else:
raise ValueError(f'source must be either <data> or <parametric>, <{source}> was passed')
# Estimate confidence bounds for sampled return values
return_values = np.array(return_values)
if np.isscalar(rp):
if assume_normality:
return scipy.stats.norm.interval(
alpha=alpha, loc=np.nanmean(return_values), scale=np.nanstd(return_values, ddof=1)
)
else:
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
if assume_normality:
locations = np.array([np.nanmean(row) for row in return_values.T])
scales = np.array([np.nanstd(row, ddof=1) for row in return_values.T])
return np.transpose(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=scale)
for loc, scale in zip(locations, scales)
]
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
def __delta(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals using the delta method. Assumes asymptotic normality.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
dx = kwargs.pop('dx', '1e-10')
precision = kwargs.pop('precision', 100)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
# Check if a custom distribution with mpmath backend is defined
if self.distribution_name in coastlib.stats.distributions.distributions:
distribution_object = getattr(coastlib.stats.distributions, self.distribution_name)
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
# Account for custom fit parameters (custom genextreme has negative shape in scipy)
if self.distribution_name == 'genextreme':
fit_parameters = self.fit_parameters * np.array([-1, 1, 1])
elif self.distribution_name in ['genpareto']:
fit_parameters = self.fit_parameters
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Generalized Pareto Distribution
if self.distribution_name == 'genpareto':
if self.scipy_fit_options != dict(floc=0):
raise ValueError(
f'Delta method for genpareto is implemented only for the case of '
f'fixed location parameter {dict(floc=0)}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
with mpmath.workdps(precision):
# Define modified log_likehood function (only shape and scale, location is fixed)
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=x, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
) for x in exceedances
]
)
# Calculate covariance matrix of shape and scale
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, dx=dx, precision=precision,
coordinates=(fit_parameters[0], fit_parameters[2])
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Modify covariance matrix to include uncertainty in threshold exceedance probability
modified_covariance = np.zeros((3, 3))
modified_covariance[1:, 1:] = covariance
# Probability of exceeding threshold for all observations
eta_0 = len(self.extremes) / len(self.dataframe)
# Number of observations per year
ny = len(self.dataframe) / self.number_of_blocks
modified_covariance[0][0] = eta_0 * (1 - eta_0) / len(self.dataframe)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(eta, *theta):
q = 1 / (rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2])
)
loc = np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
variance = np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(eta, *theta):
q = 1 / (_rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2]),
)
locs.append(
np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
)
variances.append(
np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
# Generalized Extreme Distribtuion
elif self.distribution_name == 'genextreme':
if self.scipy_fit_options != {}:
raise ValueError(
f'Delta method for genextreme is implemented only for the case of '
f'unbound parameters {dict()}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = distribution_object.observed_information(
exceedances, *fit_parameters, dx=dx, precision=precision
).astype(np.float64)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(*theta):
q = 1 / rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
loc = np.float64(scalar_function(*fit_parameters))
variance = np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(*theta):
q = 1 / _rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
locs.append(np.float64(scalar_function(*fit_parameters)))
variances.append(
np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
def generate_results(self, rp=None, alpha=.95, **kwargs):
"""
Generates a self.results dataframe with return values and, optionally, confidence intervals.
Used to generate data for output and reporting purpose (run the self.restuls.to_excel()) and to
produce a probability plot (summary).
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
By default is an array of return periods equally spaced on a log-scale from 0.001 to 1000.
alpha : float, optional
Confidence interval bounds (default=.95). Doesn't estimate confidence intervals if None.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
ci_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
ci_kwargs
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to
number of extracted extreme values
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
Creates a <self.results> dataframe with return values and, optionally, confidence intervals
for each given return period.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if rp is None:
rp = np.unique(
np.append(
np.logspace(-3, 3, 200),
[1/12, 7/365.2425, 1, 2, 5, 10, 25, 50, 100, 200, 250, 500, 1000]
)
)
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
if np.isscalar(rp):
rp = np.array([rp])
else:
rp = np.array(rp)
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
ci_kwargs = kwargs.pop('ci_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
ci_kwargs = kwargs.pop('ci_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_values = self.return_value(rp, **rv_kwargs)
self.results = pd.DataFrame(
data=return_values, index=rp, columns=['Return Value']
)
self.results.index.name = 'Return Period'
if alpha is not None:
ci_lower, ci_upper = self.confidence_interval(rp=rp, alpha=alpha, **ci_kwargs)
if np.isscalar(ci_lower):
ci_lower, ci_upper = np.array([ci_lower]), np.array([ci_upper])
else:
ci_lower, ci_upper = np.array(ci_lower), np.array(ci_upper)
self.results[f'{alpha*100:.0f}% CI Lower'] = ci_lower
self.results[f'{alpha*100:.0f}% CI Upper'] = ci_upper
# Remove bad values from the results
if self.extremes_type == 'high':
mask = self.results['Return Value'].values >= self.extremes[self.column].values.min()
else:
mask = self.results['Return Value'].values <= self.extremes[self.column].values.max()
self.results = self.results[mask]
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=True
)
self.__update()
def pdf(self, x, **kwargs):
"""
Estimates probability density at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of probability densities at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='pdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='pdf', **kwargs)
def cdf(self, x, **kwargs):
"""
Estimates cumulative probability at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the cumulative probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of cumulative probability at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='cdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='cdf', **kwargs)
def ppf(self, q, **kwargs):
"""
Estimates ppf (inverse cdf or quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the ppf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of ppf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='ppf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='ppf', **kwargs)
def isf(self, q, **kwargs):
"""
Estimates isf (inverse survival or upper quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the isf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of isf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='isf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='isf', **kwargs)
def ___get_property(self, x, prop, **kwargs):
"""
Estimates property (pdf, cdf, ppf, etc.) at value <x> using the fitted distribution parameters.
Parameters
----------
x : float or iterable
Value at which the property is estimated.
prop : str
Scipy property to be estimated (pdf, ppf, isf, cdf, rvs, etc.).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of property at <x>
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
distribution_object = getattr(scipy.stats, self.distribution_name)
property_function = getattr(distribution_object, prop)
if not np.isscalar(x):
x = np.array(x)
if self.fit_method == 'MLE':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return property_function(x, *self.fit_parameters)
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
estimate_method = kwargs.pop('estimate_method', 'parameter mode')
if estimate_method not in ['parameter mode', 'value mode', 'value quantile']:
raise ValueError(f'Estimate method <{estimate_method}> not recognized')
if estimate_method in ['parameter mode', 'value mode']:
kernel_steps = kwargs.pop('kernel_steps', 1000)
else:
kernel_steps = None
if estimate_method == 'value quantile':
quantile = kwargs.pop('quantile', .5)
else:
quantile = None
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Estimate mode of each parameter as peaks of gaussian kernel.
# Use estimated parameters to calculate property function
if estimate_method == 'parameter mode':
parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
return property_function(x, *parameters)
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
property_samples = np.array([property_function(x, *_theta) for _theta in samples])
# Estimate property function as mode of distribution of property value
# for all samples in self.mcmc_chain as peaks of gaussian kernel.
if estimate_method == 'value mode':
if np.isscalar(x):
if np.all(np.isnan(property_samples)):
return np.nan
else:
ps_filtered = property_samples[~np.isnan(property_samples)]
if np.all(ps_filtered == ps_filtered[0]):
return np.nan
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
return support[density.argmax()]
else:
estimates = []
for ps in property_samples.T:
if np.all(np.isnan(ps)):
estimates.append(np.nan)
else:
ps_filtered = ps[~np.isnan(ps)]
if np.all(ps_filtered == ps_filtered[0]):
estimates.append(np.nan)
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
estimates.append(support[density.argmax()])
return np.array(estimates)
# Estimate property function as quantile of distribution of property value
# for all samples in self.mcmc_chain.
elif estimate_method == 'value quantile':
if np.isscalar(quantile):
if quantile <= 0 or quantile > 1:
raise ValueError(f'Quantile must be in range (0,1], quantile={quantile} was passed')
else:
raise ValueError(f'Quantile must be scalar, {type(quantile)} was passed')
if np.isscalar(x):
return np.nanquantile(a=property_samples, q=quantile)
else:
return np.array(
[
np.nanquantile(a=row, q=quantile) for row in property_samples.T
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def plot_summary(self, support=None, bins=10, plotting_position='Weibull', **kwargs):
"""
Plots projected return values, pdf, and cdf values against observed.
Parameters
----------
support : array_like, optional
Values used to estimate pdf and cdf. By default is 100 linearly spaced min to max extreme values.
bins : int, optional
Number of bins used to plot cdf and pdf histograms (default=10).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
tuple(fig, ax1, ax2, ax3)
Figure, return value, pdf, cdf axes.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['results']:
raise UnboundLocalError('No data found. Generate results by runing self.generate_results() method first')
if support is None:
support = np.linspace(
self.extremes[self.column].values.min(), self.extremes[self.column].values.max(), 100
)
if self.fit_method == 'MCMC':
rv_kwargs = kwargs.pop('rv_kwargs')
else:
rv_kwargs = {}
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_period = self.__get_return_period(plotting_position=plotting_position)
with plt.style.context('bmh'):
# Setup canvas
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0))
ax3 = plt.subplot2grid((2, 2), (1, 1))
# Plot return values
ax1.set_title('Return Value Plot')
ax1.set_ylabel(f'{self.column}')
ax1.set_xlabel(f'Return period')
ax1.plot(
self.results.index, self.results['Return Value'].values,
color='k', lw=2, zorder=15, label='Central estimate'
)
if len(self.results.columns) == 3:
ax1.plot(
self.results.index, self.results[self.results.columns[1]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.plot(
self.results.index, self.results[self.results.columns[2]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.fill_between(
self.results.index, self.results[self.results.columns[1]],
self.results[self.results.columns[2]],
alpha=.1, color='k',
label=f'{self.results.columns[1].split("%")[0]}% confidence interval', zorder=5
)
points = ax1.scatter(
return_period, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=15,
label=f'Observed extreme event\n{plotting_position} plotting position'
)
ax1.semilogx()
ax1.grid(b=True, which='minor', axis='x')
ax1.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%.0f'))
ax1.legend()
annot = ax1.annotate(
"", xy=(self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean()),
xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax1.scatter(
self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean(),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_period[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax1:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
# Plot PDF
ax2.set_ylabel('Probability density')
ax2.set_xlabel(f'{self.column}')
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax2.plot(
support, self.pdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax2.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax2.set_ylim(0)
# Plot CDF
ax3.set_ylabel('Cumulative probability')
ax3.set_xlabel(f'{self.column}')
if self.extremes_type == 'high':
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
else:
_, boundaries = np.histogram(self.extremes[self.column].values, bins)
centers = np.array([(boundaries[i] + boundaries[i - 1]) / 2 for i in range(1, len(boundaries))])
densities = []
for i, c in enumerate(centers):
mask = self.extremes[self.column].values >= boundaries[i]
densities.append(np.sum(mask) / len(self.extremes))
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', alpha=0.2, zorder=5
)
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax3.plot(
support, self.cdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax3.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax3.set_ylim(0)
fig.tight_layout()
return fig, ax1, ax2, ax3
def plot_qq(self, k, plot=True, plotting_position='Weibull', quantiles=True, **kwargs):
"""
Plots theoretical quantiles (probabilites) agains observed quantiles (probabilites).
Parameters
----------
k : int
Number of estimated (non-fixed) parameters in the distribution.
plot : bool, optional
Generates plot if True, returns data if False (default=True).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
quantiles : bool, optional
If True, produces a quantile plot (Q-Q, ppf) (default=True).
If False, produces a probability plot (P-P, cdf).
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
tuple((theoretical, observed), (r, p))
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
ecdf = self.__get_return_period(plotting_position=plotting_position, return_cdf=True)
return_periods = self.__get_return_period(plotting_position=plotting_position)
# Estimate theoretical values based on returned quantiles
if quantiles:
theoretical = self.ppf(ecdf, **rv_kwargs)
else:
theoretical = self.cdf(self.extremes[self.column].values, **rv_kwargs)
theoretical[np.isinf(theoretical)] = np.nan
mask = ~np.isnan(theoretical)
if quantiles:
r, p = scipy.stats.pearsonr(self.extremes[self.column].values[mask], theoretical[mask])
else:
r, p = scipy.stats.pearsonr(ecdf, theoretical[mask])
r = np.sqrt(
1 - (1 - r ** 2) * (len(theoretical[mask]) - 1) / (len(theoretical[mask]) - (k + 1))
)
if plot:
with plt.style.context('bmh'):
# Quantile plot
if quantiles:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Quantile Plot')
plt.xlabel(r'Theoretical quantiles')
plt.ylabel(rf'Observed quantiles, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0]+self.threshold, self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
# Probability plot
else:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, ecdf,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Probability Plot')
plt.xlabel(r'Theoretical probabilities')
plt.ylabel(rf'Observed probabilities, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0], self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
if quantiles:
return (
(theoretical, self.extremes[self.column].values),
(r, p)
)
else:
return (
(theoretical, ecdf),
(r, p)
)
def goodness_of_fit(self, method, **kwargs):
"""
Calculates various goodness-of-fit statistics for selected model.
Parameters
----------
method : str
Goodness of fit statistic method.
Supported methods:
'AIC' - Akaike information criterion
Lower value corresponds to a better fit.
see https://en.wikipedia.org/wiki/Akaike_information_criterion
'log-likelihood' - log-likelihood
Higher value corresponds to a better fit.
'KS' - Kolmogorov Smirnot test
Null hypothesis - both samples come from the same distribution.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html
'chi-square' - Chi-Square test
Null hypothesis - both samples come from the same distribution.
Calculates theoretical counts for given quantile ranges and compares to theoretical.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
for AIC
order : int, optional
Order of AIC (1 for regular, 2 for small samples) (default=2).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
fot KS
mode : str, optional
See scipy docs (default='approx').
alternative : str, optional
See scipy docs (default='two-sided').
for chi-square
chi_quantiles : int, optional
Number of equal slices (quantiles) into which observed data is split
to calculate the stitistic(default=4).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
Returns
-------
if method = 'log-likelihood' : float, log-likelihood
if method = 'AIC' : float, AIC statistic
if method = 'KS' : tuple(statistic, p-value)
if method = 'chi-square' : tuple(statistic, p-value)
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
fit_parameters = self.fit_parameters
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
kernel_steps = kwargs.pop('kernel_steps', 1000)
fit_parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
else:
raise RuntimeError(f'Unexpected fit_method {self.fit_method}')
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
log_likelihood = np.sum(
distribution_object.logpdf(exceedances, *fit_parameters)
)
if method == 'log-likelihood':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return log_likelihood
elif method == 'AIC':
order = kwargs.pop('order', 2)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
aic = 2 * k - 2 * log_likelihood
if order == 1:
return aic
elif order == 2:
return aic + (2 * k ** 2 + 2 * k) / (len(self.extremes) - k - 1)
else:
raise ValueError(f'order must be 1 or 2, {order} was passed')
elif method == 'KS':
mode = kwargs.pop('mode', 'approx')
alternative = kwargs.pop('alternative', 'two-sided')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
ks, p = scipy.stats.kstest(
rvs=exceedances, cdf=distribution_object.cdf, args=fit_parameters,
alternative=alternative, mode=mode
)
return ks, p
elif method == 'chi-square':
chi_quantiles = kwargs.pop('chi_quantiles', 4)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
chi_quantile_ranges = [1 / chi_quantiles * (i + 1) for i in np.arange(-1, chi_quantiles)]
observed_counts, expected_counts = [], []
for i in range(chi_quantiles):
bot = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i]
)
top = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i + 1]
)
if i + 1 == chi_quantiles:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] <= top)
]
)
)
else:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] < top)
]
)
)
expected_counts.append(
len(self.extremes) * (self.cdf(top) - self.cdf(bot))
)
if min(observed_counts) <= 5 or min(expected_counts) <= 5:
raise ValueError(f'Too few observations in observed counts {min(observed_counts)} '
f'or expected counts {min(expected_counts):.0f}, reduce chi_quantiles')
cs, p = scipy.stats.chisquare(f_obs=observed_counts, f_exp=expected_counts, ddof=k)
return cs, p
else:
raise ValueError(f'Method {method} not recognized')
if __name__ == "__main__":
# Load data and initialize EVA
import os
df = pd.read_csv(
os.path.join(os.getcwd(), r'test data\Battery_residuals.csv'),
index_col=0, parse_dates=True
)
self = EVA(dataframe=df, column='Residuals (ft)', block_size=365.25, gap_length=24)
# Set up test parameters
etype = 'high'
extremes_method = 'POT'
_method = 'MCMC'
mle_ci = 'Delta'
if extremes_method == 'POT':
_distribution = 'genpareto'
elif extremes_method == 'BM':
_distribution = 'genextreme'
else:
raise RuntimeError
# Run a series of methods to assist in finding optimal threshold
if extremes_method == 'POT':
if etype == 'high':
self.plot_mean_residual_life(
thresholds=np.arange(2, 8, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
self.plot_parameter_stability(
thresholds=np.arange(3, 8, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
elif etype == 'low':
self.plot_mean_residual_life(
thresholds=np.arange(-8, -2, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='low'
)
self.plot_parameter_stability(
thresholds=np.arange(-8, -2.5, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=20, extremes_type='low'
)
# Extract extreme values
if extremes_method == 'BM':
self.get_extremes(method='BM', plotting_position='Weibull', extremes_type=etype)
elif extremes_method == 'POT':
if etype == 'high':
self.get_extremes(method='POT', threshold=3, r=24*7, plotting_position='Weibull', extremes_type='high')
elif etype == 'low':
self.get_extremes(method='POT', threshold=-2.8, r=24*7, plotting_position='Weibull', extremes_type='low')
self.plot_extremes()
# Test independence of POT extremes
if extremes_method == 'POT':
self.test_extremes(method='autocorrelation')
self.test_extremes(method='lag plot', lag=1)
print(self.test_extremes(method='runs test', alpha=0.05))
# Fit distribution
if _method == 'MLE':
if _distribution == 'genpareto':
# Shape (f0) and location (floc) are both 0 => equivalent to exponential distribution (expon with floc=0)
self.fit(distribution_name=_distribution, fit_method='MLE', scipy_fit_options=dict(floc=0))
elif _distribution == 'genextreme':
self.fit(distribution_name=_distribution, fit_method='MLE')
elif _method == 'MCMC':
self.fit(
distribution_name=_distribution, fit_method='MCMC',
nsamples=1000, nwalkers=200, starting_bubble=.01
)
# Trace plot
if _distribution == 'genpareto':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\sigma$'])
elif _distribution == 'genextreme':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'])
if _distribution == 'genpareto':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\sigma$'], smooth=1)
elif _distribution == 'genextreme':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'], smooth=1)
# Test quality of fit
if _method == 'MLE':
print(self.goodness_of_fit(method='AIC', k=1))
self.plot_qq(k=2, plotting_position='Weibull', quantiles=True)
self.plot_qq(k=2, plotting_position='Weibull', quantiles=False)
else:
_burn_in = 200
print(self.goodness_of_fit(method='AIC', k=2, burn_in=_burn_in, kernel_steps=100))
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=True,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=False,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
# Generate results
if _method == 'MCMC':
_burn_in = 200
self.generate_results(
alpha=.95,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100),
ci_kwargs=dict(burn_in=_burn_in)
)
elif _method == 'MLE':
if mle_ci == 'Monte Carlo':
self.generate_results(
alpha=.95,
ci_kwargs=dict(
method='Monte Carlo', k=100, source='data', sampling_method='constant', assume_normality=False
)
)
elif mle_ci == 'Delta':
self.generate_results(alpha=.95, ci_kwargs=dict(method='Delta', dx='1e-10', precision=100))
# Plot extremes return plot
if _method == 'MCMC':
_burn_in = 200
self.plot_summary(
bins=10, plotting_position='Gringorten',
rv_kwargs=dict(burn_in=200, estimate_method='parameter mode', kernel_steps=100)
)
elif _method == 'MLE':
self.plot_summary(bins=10, plotting_position='Gringorten')
| gpl-3.0 | -9,124,196,757,702,102,000 | 45.572071 | 120 | 0.527463 | false | 4.534512 | false | false | false |
harikishen/addons-server | src/olympia/editors/tests/test_sql_model.py | 1 | 10512 | # -*- coding: utf-8 -*-
"""Tests for SQL Model.
Currently these tests are coupled tighly with MySQL
"""
from datetime import datetime
from django.db import connection, models
from django.db.models import Q
import pytest
from olympia.amo.tests import BaseTestCase
from olympia.editors.sql_model import RawSQLModel
def execute_all(statements):
with connection.cursor() as cursor:
for sql in statements:
if not sql.strip():
continue
cursor.execute(sql, [])
class Summary(RawSQLModel):
category = models.CharField(max_length=255)
total = models.IntegerField()
latest_product_date = models.DateTimeField()
def base_query(self):
return {
'select': {
'category': 'c.name',
'total': 'count(*)',
'latest_product_date': 'max(p.created)'
},
'from': [
'sql_model_test_product p',
'join sql_model_test_product_cat x on x.product_id=p.id',
'join sql_model_test_cat c on x.cat_id=c.id'],
'where': [],
'group_by': 'category'
}
class ProductDetail(RawSQLModel):
product = models.CharField(max_length=255)
category = models.CharField(max_length=255)
def base_query(self):
return {
'select': {
'product': 'p.name',
'category': 'c.name'
},
'from': [
'sql_model_test_product p',
'join sql_model_test_product_cat x on x.product_id=p.id',
'join sql_model_test_cat c on x.cat_id=c.id'],
'where': []
}
class TestSQLModel(BaseTestCase):
@pytest.fixture(autouse=True)
def setup(self, request):
sql = """
create table if not exists sql_model_test_product (
id int(11) not null auto_increment primary key,
name varchar(255) not null,
created datetime not null
);
create table if not exists sql_model_test_cat (
id int(11) not null auto_increment primary key,
name varchar(255) not null
);
create table if not exists sql_model_test_product_cat (
id int(11) not null auto_increment primary key,
cat_id int(11) not null references sql_model_test_cat (id),
product_id int(11) not null references sql_model_test_product (id)
);
insert into sql_model_test_product (id, name, created)
values (1, 'defilbrilator', UTC_TIMESTAMP());
insert into sql_model_test_cat (id, name)
values (1, 'safety');
insert into sql_model_test_product_cat (product_id, cat_id)
values (1, 1);
insert into sql_model_test_product (id, name, created)
values (2, 'life jacket', UTC_TIMESTAMP());
insert into sql_model_test_product_cat (product_id, cat_id)
values (2, 1);
insert into sql_model_test_product (id, name, created)
values (3, 'snake skin jacket',UTC_TIMESTAMP());
insert into sql_model_test_cat (id, name)
values (2, 'apparel');
insert into sql_model_test_product_cat (product_id, cat_id)
values (3, 2);
""".split(';')
def teardown():
try:
sql = """
drop table if exists sql_model_test_product_cat;
drop table if exists sql_model_test_cat;
drop table if exists sql_model_test_product;
""".split(';')
execute_all(sql)
except:
pass # No failing here.
teardown()
execute_all(sql)
request.addfinalizer(teardown)
def test_all(self):
assert sorted([s.category for s in Summary.objects.all()]) == (
['apparel', 'safety'])
def test_count(self):
assert Summary.objects.all().count() == 2
def test_one(self):
c = Summary.objects.all().order_by('category')[0]
assert c.category == 'apparel'
def test_get_by_index(self):
qs = Summary.objects.all().order_by('category')
assert qs[0].category == 'apparel'
assert qs[1].category == 'safety'
def test_get(self):
c = Summary.objects.all().having('total =', 1).get()
assert c.category == 'apparel'
def test_get_no_object(self):
with self.assertRaises(Summary.DoesNotExist):
Summary.objects.all().having('total =', 999).get()
def test_get_many(self):
with self.assertRaises(Summary.MultipleObjectsReturned):
Summary.objects.all().get()
def test_slice1(self):
qs = Summary.objects.all()[0:1]
assert [c.category for c in qs] == ['apparel']
def test_slice2(self):
qs = Summary.objects.all()[1:2]
assert [c.category for c in qs] == ['safety']
def test_slice3(self):
qs = Summary.objects.all()[:2]
assert sorted([c.category for c in qs]) == ['apparel', 'safety']
def test_slice4(self):
qs = Summary.objects.all()[0:]
assert sorted([c.category for c in qs]) == ['apparel', 'safety']
def test_slice5(self):
assert ['defilbrilator'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[0:1]]
assert ['life jacket'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[1:2]]
assert ['snake skin jacket'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[2:3]]
def test_negative_slices_not_supported(self):
with self.assertRaises(IndexError):
Summary.objects.all()[:-1]
def test_order_by(self):
c = Summary.objects.all().order_by('category')[0]
assert c.category == 'apparel'
c = Summary.objects.all().order_by('-category')[0]
assert c.category == 'safety'
def test_order_by_alias(self):
c = ProductDetail.objects.all().order_by('product')[0]
assert c.product == 'defilbrilator'
c = ProductDetail.objects.all().order_by('-product')[0]
assert c.product == 'snake skin jacket'
def test_order_by_injection(self):
with self.assertRaises(ValueError):
Summary.objects.order_by('category; drop table foo;')[0]
def test_filter(self):
c = Summary.objects.all().filter(category='apparel')[0]
assert c.category == 'apparel'
def test_filter_raw_equals(self):
c = Summary.objects.all().filter_raw('category =', 'apparel')[0]
assert c.category == 'apparel'
def test_filter_raw_in(self):
qs = Summary.objects.all().filter_raw('category IN',
['apparel', 'safety'])
assert [c.category for c in qs] == ['apparel', 'safety']
def test_filter_raw_non_ascii(self):
uni = 'フォクすけといっしょ'.decode('utf8')
qs = (Summary.objects.all().filter_raw('category =', uni)
.filter_raw(Q('category =', uni) | Q('category !=', uni)))
assert [c.category for c in qs] == []
def test_combining_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter(Q(product='life jacket') | Q(product='defilbrilator')))
assert sorted([r.product for r in qs]) == [
'defilbrilator', 'life jacket']
def test_combining_raw_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('product =', 'life jacket') |
Q('product =', 'defilbrilator')))
assert sorted([r.product for r in qs]) == [
'defilbrilator', 'life jacket']
def test_nested_raw_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('category =', 'apparel',
'product =', 'defilbrilator') |
Q('product =', 'life jacket')))
assert sorted([r.product for r in qs]) == ['life jacket']
def test_crazy_nesting(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('category =', 'apparel',
'product =', 'defilbrilator',
Q('product =', 'life jacket') |
Q('product =', 'snake skin jacket'),
'category =', 'safety')))
# print qs.as_sql()
assert sorted([r.product for r in qs]) == ['life jacket']
def test_having_gte(self):
c = Summary.objects.all().having('total >=', 2)[0]
assert c.category == 'safety'
def test_invalid_raw_filter_spec(self):
with self.assertRaises(ValueError):
Summary.objects.all().filter_raw(
"""category = 'apparel'; drop table foo;
select * from foo where category = 'apparel'""",
'apparel')[0]
def test_filter_field_injection(self):
f = ("c.name = 'apparel'; drop table foo; "
"select * from sql_model_test_cat where c.name = 'apparel'")
with self.assertRaises(ValueError):
c = Summary.objects.all().filter(**{f: 'apparel'})[0]
assert c.category == 'apparel'
def test_filter_value_injection(self):
v = ("'apparel'; drop table foo; "
"select * from sql_model_test_cat where c.name")
query = Summary.objects.all().filter(**{'c.name': v})
try:
query[0]
except IndexError:
pass
# NOTE: this reaches into MySQLdb's cursor :(
executed = query._cursor.cursor._executed
assert "c.name = '\\'apparel\\'; drop table foo;" in executed, (
'Expected query to be escaped: %s' % executed)
def check_type(self, val, types):
assert isinstance(val, types), (
'Unexpected type: %s for %s' % (type(val), val))
def test_types(self):
row = Summary.objects.all().order_by('category')[0]
self.check_type(row.category, unicode)
self.check_type(row.total, (int, long))
self.check_type(row.latest_product_date, datetime)
def test_values(self):
row = Summary.objects.all().order_by('category')[0]
assert row.category == 'apparel'
assert row.total == 1
assert row.latest_product_date.timetuple()[0:3] == (
datetime.utcnow().timetuple()[0:3])
| bsd-3-clause | -8,566,747,079,214,111,000 | 35.430556 | 78 | 0.551754 | false | 3.801449 | true | false | false |
takmid/inasafe | safe_qgis/impact_functions_doc_base.py | 1 | 9760 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'impact_functions_doc_base.ui'
#
# Created: Fri Sep 14 14:43:14 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ImpactFunctionsDocBase(object):
def setupUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setObjectName(_fromUtf8("ImpactFunctionsDocBase"))
ImpactFunctionsDocBase.resize(821, 733)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/inasafe/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ImpactFunctionsDocBase.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(ImpactFunctionsDocBase)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.myButtonBox = QtGui.QDialogButtonBox(ImpactFunctionsDocBase)
self.myButtonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.myButtonBox.setAutoFillBackground(False)
self.myButtonBox.setOrientation(QtCore.Qt.Horizontal)
self.myButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Close|QtGui.QDialogButtonBox.Help|QtGui.QDialogButtonBox.Reset)
self.myButtonBox.setCenterButtons(False)
self.myButtonBox.setObjectName(_fromUtf8("myButtonBox"))
self.gridLayout.addWidget(self.myButtonBox, 1, 1, 1, 1)
self.gridLayoutMain = QtGui.QGridLayout()
self.gridLayoutMain.setHorizontalSpacing(0)
self.gridLayoutMain.setObjectName(_fromUtf8("gridLayoutMain"))
self.label_title = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_title.sizePolicy().hasHeightForWidth())
self.label_title.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setObjectName(_fromUtf8("label_title"))
self.gridLayoutMain.addWidget(self.label_title, 1, 0, 1, 1)
self.label_id = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_id.sizePolicy().hasHeightForWidth())
self.label_id.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_id.setFont(font)
self.label_id.setObjectName(_fromUtf8("label_id"))
self.gridLayoutMain.addWidget(self.label_id, 1, 1, 1, 1)
self.label_subcategory = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_subcategory.sizePolicy().hasHeightForWidth())
self.label_subcategory.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_subcategory.setFont(font)
self.label_subcategory.setObjectName(_fromUtf8("label_subcategory"))
self.gridLayoutMain.addWidget(self.label_subcategory, 1, 3, 1, 1)
self.label_category = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_category.sizePolicy().hasHeightForWidth())
self.label_category.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_category.setFont(font)
self.label_category.setObjectName(_fromUtf8("label_category"))
self.gridLayoutMain.addWidget(self.label_category, 1, 2, 1, 1)
self.label_layertype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_layertype.sizePolicy().hasHeightForWidth())
self.label_layertype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_layertype.setFont(font)
self.label_layertype.setObjectName(_fromUtf8("label_layertype"))
self.gridLayoutMain.addWidget(self.label_layertype, 1, 4, 1, 1)
self.comboBox_id = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_id.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_id.setObjectName(_fromUtf8("comboBox_id"))
self.gridLayoutMain.addWidget(self.comboBox_id, 3, 1, 1, 1)
self.comboBox_title = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_title.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_title.setMinimumContentsLength(0)
self.comboBox_title.setObjectName(_fromUtf8("comboBox_title"))
self.gridLayoutMain.addWidget(self.comboBox_title, 3, 0, 1, 1)
self.comboBox_category = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_category.setObjectName(_fromUtf8("comboBox_category"))
self.gridLayoutMain.addWidget(self.comboBox_category, 3, 2, 1, 1)
self.label_unit = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_unit.sizePolicy().hasHeightForWidth())
self.label_unit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_unit.setFont(font)
self.label_unit.setObjectName(_fromUtf8("label_unit"))
self.gridLayoutMain.addWidget(self.label_unit, 1, 6, 1, 1)
self.label_datatype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_datatype.sizePolicy().hasHeightForWidth())
self.label_datatype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_datatype.setFont(font)
self.label_datatype.setObjectName(_fromUtf8("label_datatype"))
self.gridLayoutMain.addWidget(self.label_datatype, 1, 5, 1, 1)
self.comboBox_subcategory = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_subcategory.setObjectName(_fromUtf8("comboBox_subcategory"))
self.gridLayoutMain.addWidget(self.comboBox_subcategory, 3, 3, 1, 1)
self.comboBox_layertype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_layertype.setObjectName(_fromUtf8("comboBox_layertype"))
self.gridLayoutMain.addWidget(self.comboBox_layertype, 3, 4, 1, 1)
self.comboBox_datatype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_datatype.setObjectName(_fromUtf8("comboBox_datatype"))
self.gridLayoutMain.addWidget(self.comboBox_datatype, 3, 5, 1, 1)
self.comboBox_unit = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_unit.setObjectName(_fromUtf8("comboBox_unit"))
self.gridLayoutMain.addWidget(self.comboBox_unit, 3, 6, 1, 1)
self.webView = QtWebKit.QWebView(ImpactFunctionsDocBase)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.gridLayoutMain.addWidget(self.webView, 4, 0, 1, 7)
self.gridLayout.addLayout(self.gridLayoutMain, 0, 1, 1, 1)
self.retranslateUi(ImpactFunctionsDocBase)
QtCore.QObject.connect(self.myButtonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ImpactFunctionsDocBase.reject)
QtCore.QMetaObject.connectSlotsByName(ImpactFunctionsDocBase)
def retranslateUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setWindowTitle(QtGui.QApplication.translate("ImpactFunctionsDocBase", "InaSAFE Impact Functions", None, QtGui.QApplication.UnicodeUTF8))
self.label_title.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Title", None, QtGui.QApplication.UnicodeUTF8))
self.label_id.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "ID", None, QtGui.QApplication.UnicodeUTF8))
self.label_subcategory.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Subcategory", None, QtGui.QApplication.UnicodeUTF8))
self.label_category.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Category", None, QtGui.QApplication.UnicodeUTF8))
self.label_layertype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Layer Type", None, QtGui.QApplication.UnicodeUTF8))
self.label_unit.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Unit", None, QtGui.QApplication.UnicodeUTF8))
self.label_datatype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Data Type", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
import resources_rc
| gpl-3.0 | 9,172,954,171,871,963,000 | 57.795181 | 167 | 0.727254 | false | 3.837987 | false | false | false |
robertnishihara/ray | python/ray/tune/examples/ax_example.py | 1 | 2352 | """This test checks that AxSearch is functional.
It also checks that it is usable with a separate scheduler.
"""
import numpy as np
import time
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.ax import AxSearch
def hartmann6(x):
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
])
P = 10**(-4) * np.array([
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
])
y = 0.0
for j, alpha_j in enumerate(alpha):
t = 0
for k in range(6):
t += A[j, k] * ((x[k] - P[j, k])**2)
y -= alpha_j * np.exp(-t)
return y
def easy_objective(config):
for i in range(config["iterations"]):
x = np.array([config.get("x{}".format(i + 1)) for i in range(6)])
tune.report(
timesteps_total=i,
hartmann6=hartmann6(x),
l2norm=np.sqrt((x**2).sum()))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
tune_kwargs = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
"x1": tune.uniform(0.0, 1.0),
"x2": tune.uniform(0.0, 1.0),
"x3": tune.uniform(0.0, 1.0),
"x4": tune.uniform(0.0, 1.0),
"x5": tune.uniform(0.0, 1.0),
"x6": tune.uniform(0.0, 1.0),
},
"stop": {
"timesteps_total": 100
}
}
algo = AxSearch(
max_concurrent=4,
metric="hartmann6",
mode="min",
parameter_constraints=["x1 + x2 <= 2.0"], # Optional.
outcome_constraints=["l2norm <= 1.25"], # Optional.
)
scheduler = AsyncHyperBandScheduler(metric="hartmann6", mode="min")
tune.run(
easy_objective,
name="ax",
search_alg=algo,
scheduler=scheduler,
**tune_kwargs)
| apache-2.0 | -7,999,556,598,531,771,000 | 26.670588 | 79 | 0.517857 | false | 2.980989 | false | false | false |
wesm/ibis | ibis/expr/datatypes.py | 1 | 19843 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple, OrderedDict
import six
import ibis.common as com
import ibis.util as util
class Schema(object):
"""
Holds table schema information
"""
def __init__(self, names, types):
if not isinstance(names, list):
names = list(names)
self.names = names
self.types = [validate_type(x) for x in types]
self._name_locs = dict((v, i) for i, v in enumerate(self.names))
if len(self._name_locs) < len(self.names):
raise com.IntegrityError('Duplicate column names')
def __repr__(self):
space = 2 + max(map(len, self.names))
return "ibis.Schema {{{0}\n}}".format(
util.indent(
''.join(
'\n{0}{1}'.format(name.ljust(space), str(tipo))
for name, tipo in zip(self.names, self.types)
),
2
)
)
def __len__(self):
return len(self.names)
def __iter__(self):
return iter(self.names)
def __contains__(self, name):
return name in self._name_locs
def __getitem__(self, name):
return self.types[self._name_locs[name]]
def delete(self, names_to_delete):
for name in names_to_delete:
if name not in self:
raise KeyError(name)
new_names, new_types = [], []
for name, type_ in zip(self.names, self.types):
if name in names_to_delete:
continue
new_names.append(name)
new_types.append(type_)
return Schema(new_names, new_types)
@classmethod
def from_tuples(cls, values):
if not isinstance(values, (list, tuple)):
values = list(values)
if len(values):
names, types = zip(*values)
else:
names, types = [], []
return Schema(names, types)
@classmethod
def from_dict(cls, values):
names = list(values.keys())
types = values.values()
return Schema(names, types)
def equals(self, other, cache=None):
return self.names == other.names and self.types == other.types
def __eq__(self, other):
return self.equals(other)
def get_type(self, name):
return self.types[self._name_locs[name]]
def append(self, schema):
names = self.names + schema.names
types = self.types + schema.types
return Schema(names, types)
def items(self):
return zip(self.names, self.types)
class HasSchema(object):
"""
Base class representing a structured dataset with a well-defined
schema.
Base implementation is for tables that do not reference a particular
concrete dataset or database table.
"""
def __init__(self, schema, name=None):
assert isinstance(schema, Schema)
self._schema = schema
self._name = name
def __repr__(self):
return self._repr()
def _repr(self):
return "%s(%s)" % (type(self).__name__, repr(self.schema))
@property
def schema(self):
return self._schema
def get_schema(self):
return self._schema
def has_schema(self):
return True
@property
def name(self):
return self._name
def equals(self, other, cache=None):
if type(self) != type(other):
return False
return self.schema.equals(other.schema, cache=cache)
def root_tables(self):
return [self]
class DataType(object):
def __init__(self, nullable=True):
self.nullable = nullable
def __call__(self, nullable=True):
return self._factory(nullable=nullable)
def _factory(self, nullable=True):
return type(self)(nullable=nullable)
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(type(self))
def __repr__(self):
name = self.name.lower()
if not self.nullable:
name = '{0}[non-nullable]'.format(name)
return name
@property
def name(self):
return type(self).__name__
def equals(self, other, cache=None):
if isinstance(other, six.string_types):
other = validate_type(other)
return (isinstance(other, type(self)) and
self.nullable == other.nullable)
def can_implicit_cast(self, other):
return self.equals(other)
def scalar_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Scalar'.format(type(self).__name__))
def array_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Column'.format(type(self).__name__))
class Any(DataType):
pass
class Primitive(DataType):
pass
class Null(DataType):
pass
class Variadic(DataType):
pass
class Boolean(Primitive):
pass
Bounds = namedtuple('Bounds', ('upper', 'lower'))
class Integer(Primitive):
@property
def bounds(self):
exp = self._nbytes * 8 - 1
lower = -1 << exp
return Bounds(lower=lower, upper=~lower)
def can_implicit_cast(self, other):
return (
isinstance(other, Integer) and
(type(self) is Integer or other._nbytes <= self._nbytes)
)
class String(Variadic):
pass
class Date(Primitive):
pass
class Timestamp(Primitive):
pass
class SignedInteger(Integer):
pass
class Floating(Primitive):
def can_implicit_cast(self, other):
if isinstance(other, Integer):
return True
elif isinstance(other, Floating):
# return other._nbytes <= self._nbytes
return True
else:
return False
class Int8(Integer):
_nbytes = 1
class Int16(Integer):
_nbytes = 2
class Int32(Integer):
_nbytes = 4
class Int64(Integer):
_nbytes = 8
class Float(Floating):
_nbytes = 4
class Double(Floating):
_nbytes = 8
def parametric(cls):
type_name = cls.__name__
array_type_name = '{0}Column'.format(type_name)
scalar_type_name = '{0}Scalar'.format(type_name)
def array_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, array_type_name)(op, self, name=name)
return constructor
def scalar_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, scalar_type_name)(op, self, name=name)
return constructor
cls.array_type = array_type
cls.scalar_type = scalar_type
return cls
@parametric
class Decimal(DataType):
# Decimal types are parametric, we store the parameters in this object
def __init__(self, precision, scale, nullable=True):
super(Decimal, self).__init__(nullable=nullable)
self.precision = precision
self.scale = scale
def __repr__(self):
return '{0}(precision={1:d}, scale={2:d})'.format(
self.name,
self.precision,
self.scale,
)
def __str__(self):
return '{0}({1:d}, {2:d})'.format(
self.name.lower(),
self.precision,
self.scale,
)
def __hash__(self):
return hash((self.precision, self.scale))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return (
isinstance(other, Decimal) and
self.precision == other.precision and
self.scale == other.scale
)
@classmethod
def can_implicit_cast(cls, other):
return isinstance(other, (Floating, Decimal))
@parametric
class Category(DataType):
def __init__(self, cardinality=None, nullable=True):
super(Category, self).__init__(nullable=nullable)
self.cardinality = cardinality
def __repr__(self):
if self.cardinality is not None:
cardinality = self.cardinality
else:
cardinality = 'unknown'
return 'category(K={0})'.format(cardinality)
def __hash__(self):
return hash(self.cardinality)
def __eq__(self, other):
if not isinstance(other, Category):
return False
return self.cardinality == other.cardinality
def to_integer_type(self):
cardinality = self.cardinality
if cardinality is None:
return int64
elif cardinality < int8.bounds.upper:
return int8
elif cardinality < int16.bounds.upper:
return int16
elif cardinality < int32.bounds.upper:
return int32
else:
return int64
@parametric
class Struct(DataType):
def __init__(self, names, types, nullable=True):
super(Struct, self).__init__(nullable=nullable)
self.names = names
self.types = types
def __repr__(self):
return '{0}({1})'.format(
self.name,
list(zip(self.names, self.types))
)
def __str__(self):
return '{0}<{1}>'.format(
self.name.lower(),
', '.join(
'{0}: {1}'.format(n, t) for n, t in zip(self.names, self.types)
)
)
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.names == other.names and
self.types == other.types)
@classmethod
def from_tuples(self, pairs):
return Struct(*map(list, zip(*pairs)))
@parametric
class Array(Variadic):
def __init__(self, value_type, nullable=True):
super(Array, self).__init__(nullable=nullable)
self.value_type = value_type
def __repr__(self):
return '{0}({1})'.format(self.name, repr(self.value_type))
def __str__(self):
return '{0}<{1}>'.format(self.name.lower(), self.value_type)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.value_type == other.value_type
)
@parametric
class Enum(DataType):
def __init__(self, rep_type, value_type, nullable=True):
super(Enum, self).__init__(nullable=nullable)
self.rep_type = rep_type
self.value_type = value_type
@parametric
class Map(DataType):
def __init__(self, key_type, value_type, nullable=True):
super(Map, self).__init__(nullable=nullable)
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return '{0}({1}, {2})'.format(
self.name,
repr(self.key_type),
repr(self.value_type),
)
def __str__(self):
return '{0}<{1}, {2}>'.format(
self.name.lower(),
self.key_type,
self.value_type,
)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.key_type == other.key_type and
self.value_type == other.value_type
)
# ---------------------------------------------------------------------
any = Any()
null = Null()
boolean = Boolean()
int_ = Integer()
int8 = Int8()
int16 = Int16()
int32 = Int32()
int64 = Int64()
float = Float()
double = Double()
string = String()
date = Date()
timestamp = Timestamp()
_primitive_types = {
'any': any,
'null': null,
'boolean': boolean,
'int8': int8,
'int16': int16,
'int32': int32,
'int64': int64,
'float': float,
'double': double,
'string': string,
'date': date,
'timestamp': timestamp
}
class Tokens(object):
"""Class to hold tokens for lexing
"""
__slots__ = ()
ANY = 0
NULL = 1
PRIMITIVE = 2
DECIMAL = 3
VARCHAR = 4
CHAR = 5
ARRAY = 6
MAP = 7
STRUCT = 8
INTEGER = 9
FIELD = 10
COMMA = 11
COLON = 12
LPAREN = 13
RPAREN = 14
LBRACKET = 15
RBRACKET = 16
@staticmethod
def name(value):
return _token_names[value]
_token_names = dict(
(getattr(Tokens, n), n)
for n in dir(Tokens) if n.isalpha() and n.isupper()
)
Token = namedtuple('Token', ('type', 'value'))
_TYPE_RULES = OrderedDict(
[
# any, null
('(?P<ANY>any)', lambda token: Token(Tokens.ANY, any)),
('(?P<NULL>null)', lambda token: Token(Tokens.NULL, null)),
] + [
# primitive types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, value=value: Token(Tokens.PRIMITIVE, value)
) for token, value in _primitive_types.items()
if token != 'any' and token != 'null'
] + [
# decimal + complex types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, toktype=toktype: Token(toktype, token)
) for token, toktype in zip(
('decimal', 'varchar', 'char', 'array', 'map', 'struct'),
(
Tokens.DECIMAL,
Tokens.VARCHAR,
Tokens.CHAR,
Tokens.ARRAY,
Tokens.MAP,
Tokens.STRUCT
),
)
] + [
# numbers, for decimal spec
(r'(?P<INTEGER>\d+)', lambda token: Token(Tokens.INTEGER, int(token))),
# struct fields
(
r'(?P<FIELD>[a-zA-Z_][a-zA-Z_0-9]*)',
lambda token: Token(Tokens.FIELD, token)
),
('(?P<COMMA>,)', lambda token: Token(Tokens.COMMA, token)),
('(?P<COLON>:)', lambda token: Token(Tokens.COLON, token)),
(r'(?P<LPAREN>\()', lambda token: Token(Tokens.LPAREN, token)),
(r'(?P<RPAREN>\))', lambda token: Token(Tokens.RPAREN, token)),
('(?P<LBRACKET><)', lambda token: Token(Tokens.LBRACKET, token)),
('(?P<RBRACKET>>)', lambda token: Token(Tokens.RBRACKET, token)),
(r'(?P<WHITESPACE>\s+)', None),
]
)
_TYPE_KEYS = tuple(_TYPE_RULES.keys())
_TYPE_PATTERN = re.compile('|'.join(_TYPE_KEYS), flags=re.IGNORECASE)
def _generate_tokens(pat, text):
"""Generate a sequence of tokens from `text` that match `pat`
Parameters
----------
pat : compiled regex
The pattern to use for tokenization
text : str
The text to tokenize
"""
rules = _TYPE_RULES
keys = _TYPE_KEYS
groupindex = pat.groupindex
for m in iter(pat.scanner(text).match, None):
func = rules[keys[groupindex[m.lastgroup] - 1]]
if func is not None:
assert callable(func), 'func must be callable'
yield func(m.group(m.lastgroup))
class TypeParser(object):
"""A type parser for complex types.
Parameters
----------
text : str
The text to parse
Notes
-----
Adapted from David Beazley's and Brian Jones's Python Cookbook
"""
def __init__(self, text):
self.text = text
self.tokens = _generate_tokens(_TYPE_PATTERN, text)
self.tok = None
self.nexttok = None
def _advance(self):
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
if self.nexttok is not None and self.nexttok.type == toktype:
self._advance()
return True
return False
def _expect(self, toktype):
if not self._accept(toktype):
raise SyntaxError('Expected {0} after {1!r} in {2!r}'.format(
Tokens.name(toktype),
self.tok.value,
self.text,
))
def parse(self):
self._advance()
# any and null types cannot be nested
if self._accept(Tokens.ANY) or self._accept(Tokens.NULL):
return self.tok.value
t = self.type()
if self.nexttok is None:
return t
else:
# additional junk was passed at the end, throw an error
additional_tokens = []
while self.nexttok is not None:
additional_tokens.append(self.nexttok.value)
self._advance()
raise SyntaxError(
'Found additional tokens {0}'.format(additional_tokens)
)
def type(self):
"""
type : primitive
| decimal
| array
| map
| struct
primitive : "any"
| "null"
| "boolean"
| "int8"
| "int16"
| "int32"
| "int64"
| "float"
| "double"
| "string"
| "timestamp"
decimal : "decimal"
| "decimal" "(" integer "," integer ")"
integer : [0-9]+
array : "array" "<" type ">"
map : "map" "<" type "," type ">"
struct : "struct" "<" field ":" type ("," field ":" type)* ">"
field : [a-zA-Z_][a-zA-Z_0-9]*
"""
if self._accept(Tokens.PRIMITIVE):
return self.tok.value
elif self._accept(Tokens.DECIMAL):
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
precision = self.tok.value
self._expect(Tokens.COMMA)
self._expect(Tokens.INTEGER)
scale = self.tok.value
self._expect(Tokens.RPAREN)
else:
precision = 9
scale = 0
return Decimal(precision, scale)
elif self._accept(Tokens.VARCHAR) or self._accept(Tokens.CHAR):
# VARCHAR, VARCHAR(n), CHAR, and CHAR(n) all parse as STRING
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
self._expect(Tokens.RPAREN)
return string
return string
elif self._accept(Tokens.ARRAY):
self._expect(Tokens.LBRACKET)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Array(value_type)
elif self._accept(Tokens.MAP):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.PRIMITIVE)
key_type = self.tok.value
self._expect(Tokens.COMMA)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Map(key_type, value_type)
elif self._accept(Tokens.STRUCT):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.FIELD)
names = [self.tok.value]
self._expect(Tokens.COLON)
types = [self.type()]
while self._accept(Tokens.COMMA):
self._expect(Tokens.FIELD)
names.append(self.tok.value)
self._expect(Tokens.COLON)
types.append(self.type())
self._expect(Tokens.RBRACKET)
return Struct(names, types)
else:
raise SyntaxError('Type cannot be parsed: {0}'.format(self.text))
def validate_type(t):
if isinstance(t, DataType):
return t
return TypeParser(t).parse()
def array_type(t):
# compatibility
return validate_type(t).array_type()
def scalar_type(t):
# compatibility
return validate_type(t).scalar_type()
| apache-2.0 | -6,465,158,278,819,873,000 | 23.407134 | 79 | 0.545835 | false | 3.969394 | false | false | false |
Mariaanisimova/pythonintask | IVTp/2014/Shcherbakov_R_A/task_09_22.py | 1 | 1636 | # Задача 9. Вариант 22.
# Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен
# его отгадать. Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток
# узнать, есть ли какая-либо буква в слове, причем программа может отвечать только
# "Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово.
# Щербаков Р.А.
# 22.05.2016
import random
words="Сессия","Питон","Автомат","РГСУ","Расписание"
rand=random.randint(0,4)
massiv=list(words[rand].lower())
print("Ты попал на поле чудес, только тут мы не говорим где находится буква которую \
угадаешь.\nТема: Учеба\nБукв: "+str(len(massiv)))
popitka=5
inp=""
text="Угадали"
while popitka!=0:
if input("У тебя "+str(popitka)+" попыток\nВведите букву: ") in massiv:
print("Да")
else:
print("Нет")
popitka-=1
while inp.lower()!=words[rand].lower():
inp=input("Введите слово: ")
if(inp.lower()=="я слабак"):
inp=words[rand]
text="Слабак"
elif(inp.lower()==words[rand].lower()):
text="Угадали"
else:
print("Попытайтесь еще раз\nНаберите 'Я слабак' для выхода")
input("\nВы "+text)
| apache-2.0 | 3,770,639,365,246,442,000 | 31.571429 | 85 | 0.67807 | false | 1.503958 | false | false | false |
dsonbill/DMPHive | xmlrsa.py | 1 | 3988 | import rsa
import base64
import math
import xml.etree.ElementTree as ET
# Utility functions
def bytes_to_int(byte_data):
return int.from_bytes(byte_data, 'big')
def bytes_from_int(integer):
byte_length = math.ceil(integer.bit_length() / 8)
return integer.to_bytes(byte_length, 'big')
class RSA():
def __init__(self, key_size=None):
if key_size is not None:
self.public_key, self.private_key = rsa.newkeys(key_size)
self.public_key_xml, self.private_key_xml = self.get_keys_xml_string(self.private_key)
self.initialized = True
def sign(self, message, hash):
if self.initialized:
return rsa.sign(message, self.private_key, hash)
def verify(self, message, signature):
if self.initialized:
return rsa.verify(message, signature, self.public_key)
def load_keys_xml(self, filename_private_key):
# Build public and private key object
rsa_xml = ET.parse(filename_private_key).getroot()
modulus_xml = rsa_xml.find('Modulus')
exponent_xml = rsa_xml.find('Exponent')
d_xml = rsa_xml.find('D')
p_xml = rsa_xml.find('P')
q_xml = rsa_xml.find('Q')
modulus_int = bytes_to_int(base64.standard_b64decode(modulus_xml.text))
modulus_bytes = base64.standard_b64decode(modulus_xml.text)
modulus_bytes_tested = bytes_from_int(bytes_to_int(modulus_bytes))
if modulus_bytes != modulus_bytes_tested:
raise Exception('A modulus mismatch was encountered with xmlrsa. Please check your rsa key modulus!')
exponent_int = bytes_to_int(base64.standard_b64decode(exponent_xml.text))
d_int = bytes_to_int(base64.standard_b64decode(d_xml.text))
p_int = bytes_to_int(base64.standard_b64decode(p_xml.text))
q_int = bytes_to_int(base64.standard_b64decode(q_xml.text))
# Set key objects
self.public_key = rsa.PublicKey(modulus_int, exponent_int)
self.private_key = rsa.PrivateKey(modulus_int, exponent_int, d_int, p_int, q_int)
# Set key xml strings
self.public_key_xml, self.private_key_xml = self.get_keys_xml_string(self.private_key)
# Set initialized flag
self.initialized = True
def save_keys_xml(self, filename_private_key):
if self.initialized:
with open(filename_private_key, 'w') as file:
file.write(self.private_key_xml)
@staticmethod
def get_keys_xml_string(private_key):
rsa_key_value_xml = ET.Element('RSAKeyValue')
modulus_xml = ET.SubElement(rsa_key_value_xml, 'Modulus')
exponent_xml = ET.SubElement(rsa_key_value_xml, 'Exponent')
modulus_xml.text = base64.standard_b64encode(bytes_from_int(private_key.n)).decode('utf-8')
exponent_xml.text = base64.standard_b64encode(bytes_from_int(private_key.e)).decode('utf-8')
pubkey = ET.tostring(rsa_key_value_xml).decode('utf-8')
d_xml = ET.SubElement(rsa_key_value_xml, 'D')
p_xml = ET.SubElement(rsa_key_value_xml, 'P')
q_xml = ET.SubElement(rsa_key_value_xml, 'Q')
dp_xml = ET.SubElement(rsa_key_value_xml, 'DP')
dq_xml = ET.SubElement(rsa_key_value_xml, 'DQ')
inverseq_xml = ET.SubElement(rsa_key_value_xml, 'InverseQ')
d_xml.text = base64.standard_b64encode(bytes_from_int(private_key.d)).decode('utf-8')
p_xml.text = base64.standard_b64encode(bytes_from_int(private_key.p)).decode('utf-8')
q_xml.text = base64.standard_b64encode(bytes_from_int(private_key.q)).decode('utf-8')
dp_xml.text = base64.standard_b64encode(bytes_from_int(private_key.exp1)).decode('utf-8')
dq_xml.text = base64.standard_b64encode(bytes_from_int(private_key.exp2)).decode('utf-8')
inverseq_xml.text = base64.standard_b64encode(bytes_from_int(private_key.coef)).decode('utf-8')
privkey = ET.tostring(rsa_key_value_xml).decode('utf-8')
return pubkey, privkey
| cc0-1.0 | -5,861,631,085,719,822,000 | 40.978947 | 113 | 0.650201 | false | 3.185304 | false | false | false |
mice-software/maus | bin/scifi/GenerateMomentumCorrections.py | 1 | 2173 | #!/usr/bin/env python
"""
Generate an MC data file and calculate the required Pattern Recognition
momentum corrections required for the track reconstruction.
This will simulate MICE spills through the entirety of MICE using Geant4, then
digitize and reconstruct tracker hits to space points. Finally a
reducer is used to analysis the MC truth and reconstructed tracks in order to
calculate the required corrections.
"""
import os
import MAUS # MAUS libraries
# pylint: disable = C0103
config_file = os.path.join(os.getenv("MAUS_ROOT_DIR"),
"bin/scifi/Conf_PR_Momentum_Corrections.py")
def run():
""" Run the macro
"""
# This input generates empty spills, to be filled by the beam maker later on
my_input = MAUS.InputPySpillGenerator()
# Create an empty array of mappers, then populate it
# with the functionality you want to use.
my_map = MAUS.MapPyGroup()
# GEANT4
my_map.append(MAUS.MapPyBeamMaker()) # beam construction
my_map.append(MAUS.MapCppSimulation()) # geant4 simulation
# Pre detector set up
# my_map.append(MAUS.MapPyMCReconSetup()) # geant4 simulation
my_map.append(MAUS.MapCppMCReconSetup()) # geant4 simulation
# SciFi
my_map.append(MAUS.MapCppTrackerMCDigitization()) # SciFi electronics model
my_map.append(MAUS.MapCppTrackerClusterRecon()) # SciFi channel clustering
my_map.append(MAUS.MapCppTrackerSpacePointRecon()) # SciFi spacepoint recon
my_map.append(MAUS.MapCppTrackerPatternRecognition()) # SciFi track finding
# my_map.append(MAUS.MapCppTrackerTrackFit()) # SciFi track fit
# Momentum Corrections Reducer
my_reduce = MAUS.ReduceCppSciFiMomentumCorrections()
# Then construct a MAUS output component - filename comes from datacards
my_output = MAUS.OutputCppRoot()
# can specify datacards here or by using appropriate command line calls
datacards = open(config_file, 'r')
# The Go() drives all the components you pass in, then check the file
# (default simulation.out) for output
MAUS.Go(my_input, my_map, my_reduce, my_output, datacards)
if __name__ == '__main__':
run()
| gpl-3.0 | 8,634,506,744,995,396,000 | 34.622951 | 80 | 0.716521 | false | 3.544861 | false | false | false |
Savvysherpa/provenance | provenance/migrations/env.py | 1 | 2032 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from provenance import models
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
connectable = config.attributes.get('connection', None)
if connectable is None:
# only create Engine if we don't have a Connection
# from the outside
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
# when connectable is already a Connection object, calling
# connect() gives us a *branched connection*.
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | -6,248,009,298,735,235,000 | 28.449275 | 69 | 0.692913 | false | 4.172485 | true | false | false |
novapost/python-pussycache | setup.py | 1 | 2924 | # -*- coding: utf-8 -*-
"""Python packaging."""
from os.path import abspath, dirname, join
from setuptools import setup
def read_relative_file(filename):
"""Returns contents of the given file, which path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read()
def packages(project_name):
"""Return list of packages distributed by project based on its name.
>>> packages('foo')
['foo']
>>> packages('foo.bar')
['foo', 'foo.bar']
>>> packages('foo.bar.baz')
['foo', 'foo.bar', 'foo.bar.baz']
>>> packages('FooBar')
['foobar']
Implements "Use a single name" convention described in :pep:`423`.
"""
name = str(project_name).lower()
if '.' in name: # Using namespace packages.
parts = name.split('.')
return ['.'.join(parts[0:i]) for i in range(1, len(parts) + 1)]
else: # One root package or module.
return [name]
def namespace_packages(project_name):
"""Return list of namespace packages distributed in this project, based on
project name.
>>> namespace_packages('foo')
[]
>>> namespace_packages('foo.bar')
['foo']
>>> namespace_packages('foo.bar.baz')
['foo', 'foo.bar']
>>> namespace_packages('Foo.BaR.BAZ') == namespace_packages('foo.bar.baz')
True
Implements "Use a single name" convention described in :pep:`423`.
"""
package_list = packages(project_name)
package_list.pop() # Ignore last element.
# Remaining packages are supposed to be namespace packages.
return package_list
NAME = 'pussycache'
version = read_relative_file('VERSION').strip()
readme = read_relative_file('README.md')
requirements = []
dependency_links = []
entry_points = {
}
if __name__ == '__main__': # ``import setup`` doesn't trigger setup().
setup(name=NAME,
version=version,
description="""Cache Backend system for python objects""",
long_description=readme,
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Framework :: Django',
],
keywords='cache',
author='Novapost Team',
author_email='[email protected]',
url='https://github.com/novapost/%s' % NAME,
license='BSD',
packages=packages(NAME),
namespace_packages=namespace_packages(NAME),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
dependency_links=dependency_links,
entry_points=entry_points,
test_suite='nose.collector',
setup_requires=['nose'],
tests_require=['redis', 'django'])
| mit | -3,581,475,447,714,496,500 | 29.778947 | 78 | 0.588577 | false | 4.106742 | false | false | false |
jokey2k/pyClanSphere | pyClanSphere/plugins/bulletin_board/database.py | 1 | 3321 | # -*- coding: utf-8 -*-
"""
pyClanSphere.plugins.bulletin_board.database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Our needed tables are declared here (now)
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from pyClanSphere.database import db, metadata
# Mapping these out from db module to increases readability further down
for var in ['Table', 'Column', 'String', 'Integer', 'Boolean', 'DateTime', 'ForeignKey', 'Text']:
globals()[var] = getattr(db,var)
board_categories = Table('board_categories', metadata,
Column('category_id', Integer, primary_key=True),
Column('name', String(50)),
Column('ordering', Integer)
)
board_forums = Table('board_forums', metadata,
Column('forum_id', Integer, primary_key=True),
Column('category_id', ForeignKey('board_categories.category_id')),
Column('name', String(50)),
Column('description', String(255)),
Column('ordering', Integer),
Column('allow_anonymous', Boolean),
Column('is_public', Boolean),
Column('is_public', Boolean),
Column('topiccount', Integer),
Column('postcount', Integer),
Column('modification_date', DateTime),
Column('lasttopic_id', Integer, ForeignKey('board_topics.topic_id', name="forum_lasttopic", use_alter=True)),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="forum_lastpost", use_alter=True))
)
board_topics = Table('board_topics', metadata,
Column('topic_id', Integer, primary_key=True),
Column('forum_id', ForeignKey('board_forums.forum_id')),
Column('name', String(255)),
Column('date', DateTime, default=datetime.utcnow()),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('is_sticky', Boolean),
Column('is_locked', Boolean),
Column('is_global', Boolean),
Column('is_solved', Boolean),
Column('is_external', Boolean),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="topic_lastpost", use_alter=True)),
Column('postcount', Integer),
Column('modification_date', DateTime)
)
board_posts = Table('board_posts', metadata,
Column('post_id', Integer, primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id')),
Column('text', Text),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('date', DateTime, default=datetime.utcnow()),
Column('ip', String(40)),
)
board_global_lastread = Table('board_global_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
board_local_lastread = Table('board_local_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
def init_database(app):
""" This is for inserting our new table"""
engine = app.database_engine
metadata.create_all(engine)
__all__ = ['board_categories', 'board_forums', 'board_topics', 'board_posts',
'board_local_lastread', 'board_global_lastread']
| bsd-3-clause | -5,117,481,362,407,669,000 | 37.616279 | 113 | 0.662752 | false | 3.673673 | false | false | false |
franek/weboob | weboob/capabilities/gauge.py | 1 | 3325 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, StringField, FloatField, DateField, Field, UserError
__all__ = ['Gauge', 'GaugeSensor', 'GaugeMeasure', 'ICapGauge', 'SensorNotFound']
class SensorNotFound(UserError):
"""
Not found a sensor
"""
class Gauge(CapBaseObject):
"""
Gauge class.
"""
name = StringField('Name of gauge')
city = StringField('City of the gauge')
object = StringField('What is evaluate') # For example, name of a river
sensors = Field('List of sensors on the gauge', list)
class GaugeMeasure(CapBaseObject):
"""
Measure of a gauge sensor.
"""
level = FloatField('Level of measure')
date = DateField('Date of measure')
alarm = StringField('Alarm level')
def __init__(self):
CapBaseObject.__init__(self, None)
def __repr__(self):
if self.level:
return "<GaugeMeasure level=%f alarm=%s>" % (self.level, self.alarm)
else:
return "<GaugeMeasure NotAvailable>"
class GaugeSensor(CapBaseObject):
"""
GaugeSensor class.
"""
name = StringField('Name of the sensor')
unit = StringField('Unit of values')
forecast = StringField('Forecast')
lastvalue = Field('Last value', GaugeMeasure)
history = Field('Value history', list) # lastvalue not included
gaugeid = StringField('Id of the gauge')
def __repr__(self):
return "<GaugeSensor id=%s name=%s>" % (self.id, self.name)
class ICapGauge(IBaseCap):
def iter_gauges(self, pattern=None):
"""
Iter gauges.
:param pattern: if specified, used to search gauges.
:type pattern: str
:rtype: iter[:class:`Gauge`]
"""
raise NotImplementedError()
def iter_sensors(self, id, pattern=None):
"""
Iter instrument of a gauge.
:param: ID of the gauge
:param pattern: if specified, used to search sensors.
:type pattern: str
:rtype: iter[:class:`GaugeSensor`]
"""
raise NotImplementedError()
def iter_gauge_history(self, id):
"""
Get history of a gauge sensor.
:param id: ID of the gauge sensor
:type id: str
:rtype: iter[:class:`GaugeMeasure`]
"""
raise NotImplementedError()
def get_last_measure(self, id):
"""
Get last measures of a censor.
:param id: ID of the censor.
:type id: str
:rtype: :class:`GaugeMeasure`
"""
raise NotImplementedError()
| agpl-3.0 | 1,626,210,802,505,838,600 | 27.663793 | 95 | 0.623459 | false | 3.93026 | false | false | false |
Centurion89/ModLogin | modules/Square.py | 1 | 2724 | import requests
import json
from lxml import html
from BaseModule import BaseModule
class Square(BaseModule):
def login(self, username, password, useragent):
useragent = BaseModule().define_user_agent(useragent)
headers = {'user-agent': useragent}
session = requests.Session()
login_page = session.get(
'https://www.squareup.com/login',
headers=headers)
login_page_html = html.fromstring(login_page.content)
# Load up CSRF token from cookies
csrf_token = session.cookies["_js_csrf"]
# Set POST payload
payload = {'email': username, 'password': password}
headers = {
'User-Agent': useragent,
'Host': 'api.squareup.com',
'Content-Type': 'application/json',
'Origin': 'https://squareup.com',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8',
'Referer': 'https://squareup.com/login',
'X-Csrf-Token': csrf_token
}
login_attempt = session.post(
'https://api.squareup.com/mp/login',
data=json.dumps(payload),
headers=headers, cookies=session.cookies, allow_redirects=False
)
auth_results = login_attempt.json()
# If API returns 200 and JSON key with "trusted_devices", login was
# successful
if (login_attempt.status_code == 200 and
'trusted_device' in auth_results):
return {
'module': self.__class__.__name__,
'auth_result': 'SUCCESS',
'display_name': '',
'display_handle': ''
}
# If JSON value contains error message, login failed
elif login_attempt.status_code == 401 or 'error' in auth_results:
return {
'module': self.__class__.__name__,
'auth_result': 'FAILED',
'display_name': '',
'display_handle': ''
}
else:
# If none of the above occur, must be unknown issue
# Output a copy of the HTML that was returned for debugging
debug_filename = str(self.__class__.__name__) + \
"_" + username + "_debug.html"
with open("./debug/" + debug_filename, "a+") as f:
f.write(json.dumps(auth_results))
return {
'module': self.__class__.__name__,
'auth_result': 'ERROR',
'display_name': '',
'display_handle': ''
}
square = Square()
| mit | 6,636,884,052,703,252,000 | 35.810811 | 75 | 0.517254 | false | 4.32381 | false | false | false |
gjost/django-linkpile | runtests.py | 1 | 1091 | import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="linkpile.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"linkpile",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:]) | bsd-3-clause | 8,139,445,616,342,015,000 | 20 | 85 | 0.549954 | false | 3.981752 | true | false | false |
procool/mygw | web/apps/cabinet/views.py | 1 | 2966 | import logging
import datetime
from sqlalchemy import func, and_, or_, not_
from flask import url_for, session
from misc.mixins import myTemplateView, JSONView
from utils.arp_list import get_mac_by_ip
from models.all_models import InetEther, ARPCache
from models.session import session
from utils.server.http_client import HTTPClient
from libs.pfctl import PFCtl
class checkIPMixin(object):
def check_for_ip(self):
self.request.remote_ether = session.query(ARPCache).filter(ARPCache.ip==self.request.remote_addr).first()
if self.request.remote_ether is None:
logging.error('IP: %s not found in cached arp list!' % self.request.remote_addr)
self.request.remote_ether = get_mac_by_ip(self.request.remote_addr)
else:
self.request.remote_ether = self.request.remote_ether.mac
if self.request.remote_ether is None or self.request.remote_addr is None:
return None
addr = session.query(InetEther).filter(InetEther.mac==self.request.remote_ether).first()
if addr is None:
logging.error('XXX4.1')
addr = InetEther()
addr.mac = self.request.remote_ether
if addr.ip != self.request.remote_addr or not addr.is_active:
logging.error('XXX4.2')
addr.ip = self.request.remote_addr
addr.is_active = True
addr.lastupdate = func.now()
session.add(addr)
addrs = session.query(InetEther).filter(not_(InetEther.mac==self.request.remote_ether))
addrs = addrs.filter(InetEther.ip==self.request.remote_addr)
addrs.update({"is_active": False})
return addr
class cabinetView(checkIPMixin, myTemplateView):
template='cabinet/cabinet-ajax.tpl'
def get_context_data(self, **kwargs):
addr = self.check_for_ip()
context = super(cabinetView, self).get_context_data(**kwargs)
context['addr_obj'] = addr
if addr is None:
context['access_type'] = 'UNDEFINED'
elif addr.access_type == 'tor':
context['access_type'] = 'TOR'
else:
context['access_type'] = 'DIRECT'
return context
class setIPView(checkIPMixin, JSONView):
__ctlsrv = HTTPClient(port=6999)
def get_context_data(self, **kwargs):
context = super(setIPView, self).get_context_data(**kwargs)
addr = self.check_for_ip()
if addr is None:
return context
addr.access_type = self.__type
session.add(addr)
session.commit()
r = self.__ctlsrv.call_handler('ip/%s/access' % self.request.remote_addr)
context['result'] = r
return context
def dispatch(self, request, access_type, *args, **kwargs):
if access_type in PFCtl.ip_proxy_types:
self.__type = access_type
else:
self.__type = None
return super(setIPView, self).dispatch(self, request, *args, **kwargs)
| bsd-2-clause | -5,029,251,846,019,144,000 | 34.73494 | 113 | 0.633176 | false | 3.648216 | false | false | false |
rdegges/django-twilio | setup.py | 1 | 3277 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from os.path import abspath, dirname, join, normpath
from setuptools import find_packages, setup
import sys
INSTALL_PYTHON_REQUIRES = []
# We are intending to keep up to date with the supported Django versions.
# For the official support, please visit:
# https://docs.djangoproject.com/en/3.0/faq/install/#what-python-version-can-i-use-with-django and you may change the version in the URL to suit your needs, and we will try to update that here too as we upgrade with django.
if sys.version_info[1] == 5:
# py3.5 can run 1.11 < 2.2
django_python_version_install = 'Django>=2.2,<3.0',
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 6:
# py3.6 can run 1.11 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2,<3.2',
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 7:
# py3.7 is 1.11.17 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2,<3.2'
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 8:
# py3.8 is 2.2.8 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2.8,<3.2'
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
setup(
# Basic package information:
name='django-twilio',
version='0.13.1.post0',
packages=find_packages(),
# Packaging options:
zip_safe=False,
include_package_data=True,
# Package dependencies:
install_requires=[
'setuptools>=36.2',
'twilio>=6.3.0,<7',
'django-phonenumber-field>=0.6',
'phonenumbers>=8.10.22',
] + INSTALL_PYTHON_REQUIRES,
# Metadata for PyPI:
author='Randall Degges',
author_email='[email protected]',
maintainer="Jason Held",
maintainer_email="[email protected]",
license='UNLICENSE',
url='https://github.com/rdegges/django-twilio',
keywords='twilio telephony call phone voip sms django django-twilio',
description='Build Twilio functionality into your Django apps.',
long_description=open(
normpath(join(dirname(abspath(__file__)), 'README.rst'))
).read(),
project_urls={
"Documentation": "https://django-twilio.readthedocs.io/en/latest/",
"Code": "https://github.com/rdegges/django-twilio",
"Tracker": "https://github.com/rdegges/django-twilio/issues",
},
classifiers=[
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
]
)
| unlicense | 6,593,338,256,977,965,000 | 37.552941 | 223 | 0.646628 | false | 3.43501 | false | false | false |
xjchensz/LSFS | LSFS/LSFS_TEST.py | 1 | 1803 | #!usr/bin/python
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import scipy as sp
import os
import random
import time
import sys
def append_module_path():
import sys
paths = [ \
"../gen_data",
"../evaluate",
"../read_data"
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
append_module_path()
import gen_data
import evaluate
import read_data
def test_H():
"""
expected
array([[ 0.66666667, -0.33333333, -0.33333333],
[-0.33333333, 0.66666667, -0.33333333],
[-0.33333333, -0.33333333, 0.66666667]])
"""
return compute_H(3)
def test_norm_2_1():
"""
expected 4.2426406871192857
"""
W = np.array([[1,1],[2,2]])
return norm_2_1(W)
def test_Q():
"""
(np.sqrt(2) + np.sqrt(8)) / [np.sqrt(2), np.sqrt(8)]
expected [[ 3. , 0. ],
[ 0. , 1.5]]
"""
W = np.array([[1,1],[2,2]])
return compute_Q(W)
def print_W(W):
with open("W.txt", "a+") as f:
for w in W:
print(w, file=f)
print("\n========================\n", file=f)
def run_accuracy(fun, XL_train,YL_train,XU_train,YU_train, sel_num=5, output_file_name="feature_order"):
XL, YL, XU, YU = XL_train.copy(), YL_train.copy(), XU_train.copy(), YU_train.copy()
if fun.__name__.lower() == "lsfs":
YL = read_data.label_n1_to_nc(YL)
YU = read_data.label_n1_to_nc(YU)
feature_order, time_dual = fun(XL, YL, XU, output_file_name=output_file_name)
X,Y = evaluate.select_data(XL_train, YL_train, XU_train, YU_train,\
feature_order, sel_num=sel_num)
a = evaluate.run_acc(X,Y)
print("accuracy", ":", a)
return feature_order, time_dual, a | gpl-3.0 | -6,408,282,040,506,266,000 | 21 | 104 | 0.533555 | false | 2.871019 | false | false | false |
vvinuv/pymorph | pymorph/yetbackfunc.py | 1 | 3453 | import os
import sys
import numpy as np
import pymconvolve
import numpy.ma as ma
import fitsio
from mask_or_fit import GetSExObj
from runsexfunc import RunSex
def QuarterMask(z, zm, xcntr, ycntr, bbya, pa, quarter):
nxpts, nypts = z.shape
zmm = np.ones_like(z)
co = np.cos(pa * np.pi / 180.0)
si = np.sin(pa * np.pi / 180.0)
one_minus_eg_sq = (bbya)**2.0
x, y = np.meshgrid(np.arange(nxpts), np.arange(nypts))
xrot = (x - xcntr) * co + (y - ycntr) * si
xrot = xrot.T
xsq = xrot**2.0
yrot = (xcntr - x) * si + (y - ycntr) * co
yrot = yrot.T
ysq = yrot**2.0
r = np.sqrt(xsq + ysq / one_minus_eg_sq)
if quarter == 0:
condition = xrot > -1e5
if quarter == 1:
condition = (xrot - 0 >= 0) & (yrot - 0 >= 0)
if quarter == 2:
condition = (xrot - 0 < 0) & (yrot - 0 >= 0)
if quarter == 3:
condition = (xrot - 0 < 0) & (yrot - 0 < 0)
if quarter == 4:
condition = (xrot - 0 >= 0) & (yrot - 0 < 0)
zmm[condition] = 0
zmm = zm + zmm
zmm[np.where(zmm > 0)] = 1
return np.median(ma.masked_array(z, zmm).compressed())
def FindYetSky(fstring, sex_params, SEX_PATH, gimg, wimg, scat,
X0, Y0, check_fits, SEx_GAIN,
center_err=5., median_std=1.3, sconfig='seg', verbose=False):
#from astropy.io import fits
if verbose:
print(scat)
RunSex(sex_params, SEX_PATH, gimg, wimg, scat, SEx_GAIN,
check_fits=check_fits, sconfig='seg')
f = fitsio.FITS(gimg)
z = f[0].read()
f.close()
if verbose:
print(z.shape)
print(gimg)
fseg = fitsio.FITS(check_fits)
zm = fseg[0].read()
fseg.close()
#f = fits.open(gimg)
#z = f[0].data
#f.close()
#fseg = fits.open(check_fits)
#zm = fseg[0].data
#fseg.close()
if verbose:
print(zm.shape)
SexSky, SkyYet = 9999, 9999
SkyMed, SkyMin = 9999, 9999
SkyQua, SkySig = 9999, 9999
for l_s in open(scat):
v_s = [float(l) for l in l_s.split()]
obj = GetSExObj(NXPTS=None, NYPTS=None, values=v_s)
#sys.exit()
SexId = obj.sex_num
xcntr = obj.xcntr
ycntr = obj.ycntr
pa = obj.pos_ang
bbya = obj.bbya
a = obj.maj_axis
b = a * bbya
hr = obj.radius
sky = obj.sky
if np.abs(X0 - obj.xcntr) < center_err and np.abs(Y0 - obj.ycntr) < center_err:
boxcar = np.reshape(np.ones(3 * 3), (3, 3))
zm = pymconvolve.Convolve(zm, boxcar)
zm[np.where(zm > 0)] = 1
SkyQua = []
for ii in np.arange(1, 5):
SkyQua.append(QuarterMask(z, zm,
obj.xcntr - 1.0, obj.ycntr - 1.0,
bbya, pa, ii))
SkyQua = np.array(SkyQua)
SexSky = obj.sky
tmpstd = np.std(ma.masked_array(z, zm).compressed())
tmpmed = np.median(ma.masked_array(z, zm).compressed())
zm[np.where((z - tmpmed) > median_std * tmpstd)] = 1
SkyYet = np.median(ma.masked_array(z, zm).compressed())
SkyMed = np.median(SkyQua)
SkyMin = np.min(SkyQua)
SkySig = np.std(ma.masked_array(z, zm).compressed())
# os.system('rm -f SegCat.cat default_seg.sex seg.fits')
break
return SexSky, SkyYet, SkyMed, SkyMin, SkyQua, SkySig
| gpl-2.0 | 7,017,160,199,089,343,000 | 27.073171 | 87 | 0.523313 | false | 2.664352 | false | false | false |
DataDog/integrations-core | network/datadog_checks/network/network.py | 1 | 47373 | # (C) Datadog, Inc. 2010-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
Collects network metrics.
"""
import array
import distutils.spawn
import os
import re
import socket
import struct
from collections import defaultdict
import psutil
from six import PY3, iteritems, itervalues
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.common import pattern_filter
from datadog_checks.base.utils.platform import Platform
from datadog_checks.base.utils.subprocess_output import SubprocessOutputEmptyError, get_subprocess_output
try:
import datadog_agent
except ImportError:
from datadog_checks.base.stubs import datadog_agent
try:
import fcntl
except ImportError:
fcntl = None
if PY3:
long = int
BSD_TCP_METRICS = [
(re.compile(r"^\s*(\d+) data packets \(\d+ bytes\) retransmitted\s*$"), 'system.net.tcp.retrans_packs'),
(re.compile(r"^\s*(\d+) packets sent\s*$"), 'system.net.tcp.sent_packs'),
(re.compile(r"^\s*(\d+) packets received\s*$"), 'system.net.tcp.rcv_packs'),
]
SOLARIS_TCP_METRICS = [
(re.compile(r"\s*tcpRetransSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.retrans_segs'),
(re.compile(r"\s*tcpOutDataSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.in_segs'),
(re.compile(r"\s*tcpInSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.out_segs'),
]
# constants for extracting ethtool data via ioctl
SIOCETHTOOL = 0x8946
ETHTOOL_GSTRINGS = 0x0000001B
ETHTOOL_GSSET_INFO = 0x00000037
ETHTOOL_GSTATS = 0x0000001D
ETH_SS_STATS = 0x1
ETH_GSTRING_LEN = 32
# ENA metrics that we're collecting
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-network-performance.html
ENA_METRIC_PREFIX = "aws.ec2."
ENA_METRIC_NAMES = [
"bw_in_allowance_exceeded",
"bw_out_allowance_exceeded",
"conntrack_allowance_exceeded",
"linklocal_allowance_exceeded",
"pps_allowance_exceeded",
]
class Network(AgentCheck):
SOURCE_TYPE_NAME = 'system'
PSUTIL_TYPE_MAPPING = {socket.SOCK_STREAM: 'tcp', socket.SOCK_DGRAM: 'udp'}
PSUTIL_FAMILY_MAPPING = {socket.AF_INET: '4', socket.AF_INET6: '6'}
def check(self, instance):
if instance is None:
instance = {}
self._excluded_ifaces = instance.get('excluded_interfaces', [])
if not isinstance(self._excluded_ifaces, list):
raise ConfigurationError(
"Expected 'excluded_interfaces' to be a list, got '{}'".format(type(self._excluded_ifaces).__name__)
)
self._collect_cx_state = instance.get('collect_connection_state', False)
self._collect_cx_queues = instance.get('collect_connection_queues', False)
self._collect_rate_metrics = instance.get('collect_rate_metrics', True)
self._collect_count_metrics = instance.get('collect_count_metrics', False)
self._collect_ena_metrics = instance.get('collect_aws_ena_metrics', False)
if fcntl is None and self._collect_ena_metrics:
raise ConfigurationError("fcntl not importable, collect_aws_ena_metrics should be disabled")
# This decides whether we should split or combine connection states,
# along with a few other things
self._setup_metrics(instance)
self._exclude_iface_re = None
exclude_re = instance.get('excluded_interface_re', None)
if exclude_re:
self.log.debug("Excluding network devices matching: %s", exclude_re)
self._exclude_iface_re = re.compile(exclude_re)
if Platform.is_linux():
self._check_linux(instance)
elif Platform.is_bsd():
self._check_bsd(instance)
elif Platform.is_solaris():
self._check_solaris(instance)
elif Platform.is_windows():
self._check_psutil(instance)
def _setup_metrics(self, instance):
self._combine_connection_states = instance.get('combine_connection_states', True)
if self._combine_connection_states:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'established'): 'system.net.tcp4.established',
('tcp4', 'opening'): 'system.net.tcp4.opening',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listening'): 'system.net.tcp4.listening',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp6', 'established'): 'system.net.tcp6.established',
('tcp6', 'opening'): 'system.net.tcp6.opening',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listening'): 'system.net.tcp6.listening',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "established",
"SYN-SENT": "opening",
"SYN-RECV": "opening",
"FIN-WAIT-1": "closing",
"FIN-WAIT-2": "closing",
"TIME-WAIT": "time_wait",
"UNCONN": "closing",
"CLOSE-WAIT": "closing",
"LAST-ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "established",
"SYN_SENT": "opening",
"SYN_RECV": "opening",
"FIN_WAIT1": "closing",
"FIN_WAIT2": "closing",
"TIME_WAIT": "time_wait",
"CLOSE": "closing",
"CLOSE_WAIT": "closing",
"LAST_ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "established",
psutil.CONN_SYN_SENT: "opening",
psutil.CONN_SYN_RECV: "opening",
psutil.CONN_FIN_WAIT1: "closing",
psutil.CONN_FIN_WAIT2: "closing",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "closing",
psutil.CONN_CLOSE_WAIT: "closing",
psutil.CONN_LAST_ACK: "closing",
psutil.CONN_LISTEN: "listening",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
else:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'estab'): 'system.net.tcp4.estab',
('tcp4', 'syn_sent'): 'system.net.tcp4.syn_sent',
('tcp4', 'syn_recv'): 'system.net.tcp4.syn_recv',
('tcp4', 'fin_wait_1'): 'system.net.tcp4.fin_wait_1',
('tcp4', 'fin_wait_2'): 'system.net.tcp4.fin_wait_2',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp4', 'unconn'): 'system.net.tcp4.unconn',
('tcp4', 'close'): 'system.net.tcp4.close',
('tcp4', 'close_wait'): 'system.net.tcp4.close_wait',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listen'): 'system.net.tcp4.listen',
('tcp4', 'last_ack'): 'system.net.tcp4.time_wait',
('tcp6', 'estab'): 'system.net.tcp6.estab',
('tcp6', 'syn_sent'): 'system.net.tcp6.syn_sent',
('tcp6', 'syn_recv'): 'system.net.tcp6.syn_recv',
('tcp6', 'fin_wait_1'): 'system.net.tcp6.fin_wait_1',
('tcp6', 'fin_wait_2'): 'system.net.tcp6.fin_wait_2',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
('tcp6', 'unconn'): 'system.net.tcp6.unconn',
('tcp6', 'close'): 'system.net.tcp6.close',
('tcp6', 'close_wait'): 'system.net.tcp6.close_wait',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listen'): 'system.net.tcp6.listen',
('tcp6', 'last_ack'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "estab",
"SYN-SENT": "syn_sent",
"SYN-RECV": "syn_recv",
"FIN-WAIT-1": "fin_wait_1",
"FIN-WAIT-2": "fin_wait_2",
"TIME-WAIT": "time_wait",
"UNCONN": "unconn",
"CLOSE-WAIT": "close_wait",
"LAST-ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "estab",
"SYN_SENT": "syn_sent",
"SYN_RECV": "syn_recv",
"FIN_WAIT1": "fin_wait_1",
"FIN_WAIT2": "fin_wait_2",
"TIME_WAIT": "time_wait",
"CLOSE": "close",
"CLOSE_WAIT": "close_wait",
"LAST_ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "estab",
psutil.CONN_SYN_SENT: "syn_sent",
psutil.CONN_SYN_RECV: "syn_recv",
psutil.CONN_FIN_WAIT1: "fin_wait_1",
psutil.CONN_FIN_WAIT2: "fin_wait_2",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "close",
psutil.CONN_CLOSE_WAIT: "close_wait",
psutil.CONN_LAST_ACK: "last_ack",
psutil.CONN_LISTEN: "listen",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
def _submit_netmetric(self, metric, value, tags=None):
if self._collect_rate_metrics:
self.rate(metric, value, tags=tags)
if self._collect_count_metrics:
self.monotonic_count('{}.count'.format(metric), value, tags=tags)
def _submit_devicemetrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
# adding the device to the tags as device_name is deprecated
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
expected_metrics = [
'bytes_rcvd',
'bytes_sent',
'packets_in.count',
'packets_in.error',
'packets_out.count',
'packets_out.error',
]
for m in expected_metrics:
assert m in vals_by_metric
assert len(vals_by_metric) == len(expected_metrics)
count = 0
for metric, val in iteritems(vals_by_metric):
self.rate('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network metrics for interface %s", count, iface)
def _submit_ena_metrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
allowed = [ENA_METRIC_PREFIX + m for m in ENA_METRIC_NAMES]
for m in vals_by_metric:
assert m in allowed
count = 0
for metric, val in iteritems(vals_by_metric):
self.gauge('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network ena metrics for interface %s", count, iface)
def _parse_value(self, v):
try:
return long(v)
except ValueError:
return 0
def _submit_regexed_values(self, output, regex_list, tags):
lines = output.splitlines()
for line in lines:
for regex, metric in regex_list:
value = re.match(regex, line)
if value:
self._submit_netmetric(metric, self._parse_value(value.group(1)), tags=tags)
def _is_collect_cx_state_runnable(self, proc_location):
"""
Determine if collect_connection_state is set and can effectively run.
If self._collect_cx_state is True and a custom proc_location is provided, the system cannot
run `ss` or `netstat` over a custom proc_location
:param proc_location: str
:return: bool
"""
if self._collect_cx_state is False:
return False
if proc_location != "/proc":
# If we have `ss`, we're fine with a non-standard `/proc` location
if distutils.spawn.find_executable("ss") is None:
self.warning(
"Cannot collect connection state: `ss` cannot be found and "
"currently with a custom /proc path: %s",
proc_location,
)
return False
else:
return True
return True
def _check_linux(self, instance):
"""
_check_linux can be run inside a container and still collects the network metrics from the host
For that procfs_path can be set to something like "/host/proc"
When a custom procfs_path is set, the collect_connection_state option is ignored
"""
proc_location = datadog_agent.get_config('procfs_path')
if not proc_location:
proc_location = '/proc'
proc_location = proc_location.rstrip('/')
custom_tags = instance.get('tags', [])
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `ss` to collect connection state")
# Try using `ss` for increased performance over `netstat`
ss_env = {"PROC_ROOT": net_proc_base_location}
# By providing the environment variables in ss_env, the PATH will be overriden. In CentOS,
# datadog-agent PATH is "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", while sh PATH
# will be '/usr/local/bin:/usr/bin'. In CentOS, ss is located in /sbin and /usr/sbin, not
# in the sh PATH, which will result in network metric collection failure.
#
# The line below will set sh PATH explicitly as the datadog-agent PATH to fix that issue.
if "PATH" in os.environ:
ss_env["PATH"] = os.environ["PATH"]
metrics = self._get_metrics()
for ip_version in ['4', '6']:
# Call `ss` for each IP version because there's no built-in way of distinguishing
# between the IP versions in the output
# Also calls `ss` for each protocol, because on some systems (e.g. Ubuntu 14.04), there is a
# bug that print `tcp` even if it's `udp`
# The `-H` flag isn't available on old versions of `ss`.
cmd = "ss --numeric --tcp --all --ipv{} | cut -d ' ' -f 1 | sort | uniq -c".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
# 7624 CLOSE-WAIT
# 72 ESTAB
# 9 LISTEN
# 1 State
# 37 TIME-WAIT
lines = output.splitlines()
self._parse_short_state_lines(lines, metrics, self.tcp_states['ss'], ip_version=ip_version)
cmd = "ss --numeric --udp --all --ipv{} | wc -l".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
metric = self.cx_state_gauge[('udp{}'.format(ip_version), 'connections')]
metrics[metric] = int(output) - 1 # Remove header
if self._collect_cx_queues:
cmd = "ss --numeric --tcp --all --ipv{}".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
for (state, recvq, sendq) in self._parse_queues("ss", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except OSError as e:
self.log.info("`ss` invocation failed: %s. Using `netstat` as a fallback", str(e))
output, _, _ = get_subprocess_output(["netstat", "-n", "-u", "-t", "-a"], self.log)
lines = output.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
if self._collect_cx_queues:
for (state, recvq, sendq) in self._parse_queues("netstat", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
proc_dev_path = "{}/net/dev".format(net_proc_base_location)
try:
with open(proc_dev_path, 'r') as proc:
lines = proc.readlines()
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_dev_path)
lines = []
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed # noqa: E501
# lo:45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0 # noqa: E501
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0 # noqa: E501
# eth1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 # noqa: E501
for line in lines[2:]:
cols = line.split(':', 1)
x = cols[1].split()
# Filter inactive interfaces
if self._parse_value(x[0]) or self._parse_value(x[8]):
iface = cols[0].strip()
metrics = {
'bytes_rcvd': self._parse_value(x[0]),
'bytes_sent': self._parse_value(x[8]),
'packets_in.count': self._parse_value(x[1]),
'packets_in.error': self._parse_value(x[2]) + self._parse_value(x[3]),
'packets_out.count': self._parse_value(x[9]),
'packets_out.error': self._parse_value(x[10]) + self._parse_value(x[11]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
# read ENA metrics, if configured and available
if self._collect_ena_metrics:
ena_metrics = self._collect_ena(iface)
if ena_metrics:
self._submit_ena_metrics(iface, ena_metrics, custom_tags)
netstat_data = {}
for f in ['netstat', 'snmp']:
proc_data_path = "{}/net/{}".format(net_proc_base_location, f)
try:
with open(proc_data_path, 'r') as netstat:
while True:
n_header = netstat.readline()
if not n_header:
break # No more? Abort!
n_data = netstat.readline()
h_parts = n_header.strip().split(' ')
h_values = n_data.strip().split(' ')
ns_category = h_parts[0][:-1]
netstat_data[ns_category] = {}
# Turn the data into a dictionary
for idx, hpart in enumerate(h_parts[1:]):
netstat_data[ns_category][hpart] = h_values[idx + 1]
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_data_path)
nstat_metrics_names = {
'Tcp': {
'RetransSegs': 'system.net.tcp.retrans_segs',
'InSegs': 'system.net.tcp.in_segs',
'OutSegs': 'system.net.tcp.out_segs',
},
'TcpExt': {
'ListenOverflows': 'system.net.tcp.listen_overflows',
'ListenDrops': 'system.net.tcp.listen_drops',
'TCPBacklogDrop': 'system.net.tcp.backlog_drops',
'TCPRetransFail': 'system.net.tcp.failed_retransmits',
},
'Udp': {
'InDatagrams': 'system.net.udp.in_datagrams',
'NoPorts': 'system.net.udp.no_ports',
'InErrors': 'system.net.udp.in_errors',
'OutDatagrams': 'system.net.udp.out_datagrams',
'RcvbufErrors': 'system.net.udp.rcv_buf_errors',
'SndbufErrors': 'system.net.udp.snd_buf_errors',
'InCsumErrors': 'system.net.udp.in_csum_errors',
},
}
# Skip the first line, as it's junk
for k in nstat_metrics_names:
for met in nstat_metrics_names[k]:
if met in netstat_data.get(k, {}):
self._submit_netmetric(
nstat_metrics_names[k][met], self._parse_value(netstat_data[k][met]), tags=custom_tags
)
# Get the conntrack -S information
conntrack_path = instance.get('conntrack_path')
use_sudo_conntrack = is_affirmative(instance.get('use_sudo_conntrack', True))
if conntrack_path is not None:
self._add_conntrack_stats_metrics(conntrack_path, use_sudo_conntrack, custom_tags)
# Get the rest of the metric by reading the files. Metrics available since kernel 3.6
conntrack_files_location = os.path.join(proc_location, 'sys', 'net', 'netfilter')
# By default, only max and count are reported. However if the blacklist is set,
# the whitelist is loosing its default value
blacklisted_files = instance.get('blacklist_conntrack_metrics')
whitelisted_files = instance.get('whitelist_conntrack_metrics')
if blacklisted_files is None and whitelisted_files is None:
whitelisted_files = ['max', 'count']
available_files = []
# Get the metrics to read
try:
for metric_file in os.listdir(conntrack_files_location):
if (
os.path.isfile(os.path.join(conntrack_files_location, metric_file))
and 'nf_conntrack_' in metric_file
):
available_files.append(metric_file[len('nf_conntrack_') :])
except Exception as e:
self.log.debug("Unable to list the files in %s. %s", conntrack_files_location, e)
filtered_available_files = pattern_filter(
available_files, whitelist=whitelisted_files, blacklist=blacklisted_files
)
for metric_name in filtered_available_files:
metric_file_location = os.path.join(conntrack_files_location, 'nf_conntrack_{}'.format(metric_name))
try:
with open(metric_file_location, 'r') as conntrack_file:
# Checking it's an integer
try:
value = int(conntrack_file.read().rstrip())
self.gauge('system.net.conntrack.{}'.format(metric_name), value, tags=custom_tags)
except ValueError:
self.log.debug("%s is not an integer", metric_name)
except IOError as e:
self.log.debug("Unable to read %s, skipping %s.", metric_file_location, e)
@staticmethod
def _get_net_proc_base_location(proc_location):
if Platform.is_containerized() and proc_location != "/proc":
net_proc_base_location = "%s/1" % proc_location
else:
net_proc_base_location = proc_location
return net_proc_base_location
def _add_conntrack_stats_metrics(self, conntrack_path, use_sudo_conntrack, tags):
"""
Parse the output of conntrack -S
Add the parsed metrics
"""
try:
cmd = [conntrack_path, "-S"]
if use_sudo_conntrack:
cmd.insert(0, "sudo")
output, _, _ = get_subprocess_output(cmd, self.log)
# conntrack -S sample:
# cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=39936711
# cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=36983181
lines = output.splitlines()
for line in lines:
cols = line.split()
cpu_num = cols[0].split('=')[-1]
cpu_tag = ['cpu:{}'.format(cpu_num)]
cols = cols[1:]
for cell in cols:
metric, value = cell.split('=')
self.monotonic_count('system.net.conntrack.{}'.format(metric), int(value), tags=tags + cpu_tag)
except SubprocessOutputEmptyError:
self.log.debug("Couldn't use %s to get conntrack stats", conntrack_path)
def _get_metrics(self):
return {val: 0 for val in itervalues(self.cx_state_gauge)}
def _parse_short_state_lines(self, lines, metrics, tcp_states, ip_version):
for line in lines:
value, state = line.split()
proto = "tcp{0}".format(ip_version)
if state in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[state]]
metrics[metric] += int(value)
def _parse_linux_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None):
"""
Parse the output of the command that retrieves the connection state (either `ss` or `netstat`)
Returns a dict metric_name -> value
"""
metrics = self._get_metrics()
for l in lines:
cols = l.split()
if cols[0].startswith('tcp') or protocol == 'tcp':
proto = "tcp{0}".format(ip_version) if ip_version else ("tcp4", "tcp6")[cols[0] == "tcp6"]
if cols[state_col] in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[cols[state_col]]]
metrics[metric] += 1
elif cols[0].startswith('udp') or protocol == 'udp':
proto = "udp{0}".format(ip_version) if ip_version else ("udp4", "udp6")[cols[0] == "udp6"]
metric = self.cx_state_gauge[proto, 'connections']
metrics[metric] += 1
return metrics
def _check_bsd(self, instance):
netstat_flags = ['-i', '-b']
custom_tags = instance.get('tags', [])
# FreeBSD's netstat truncates device names unless you pass '-W'
if Platform.is_freebsd():
netstat_flags.append('-W')
try:
output, _, _ = get_subprocess_output(["netstat"] + netstat_flags, self.log)
lines = output.splitlines()
# Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll
# lo0 16384 <Link#1> 318258 0 428252203 318258 0 428252203 0
# lo0 16384 localhost fe80:1::1 318258 - 428252203 318258 - 428252203 -
# lo0 16384 127 localhost 318258 - 428252203 318258 - 428252203 -
# lo0 16384 localhost ::1 318258 - 428252203 318258 - 428252203 -
# gif0* 1280 <Link#2> 0 0 0 0 0 0 0
# stf0* 1280 <Link#3> 0 0 0 0 0 0 0
# en0 1500 <Link#4> 04:0c:ce:db:4e:fa 20801309 0 13835457425 15149389 0 11508790198 0
# en0 1500 seneca.loca fe80:4::60c:ceff: 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 192.168.1 192.168.1.63 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# p2p0 2304 <Link#5> 06:0c:ce:db:4e:fa 0 0 0 0 0 0 0
# ham0 1404 <Link#6> 7a:79:05:4d:bf:f5 30100 0 6815204 18742 0 8494811 0
# ham0 1404 5 5.77.191.245 30100 - 6815204 18742 - 8494811 -
# ham0 1404 seneca.loca fe80:6::7879:5ff: 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204 18742 - 8494811 -
headers = lines[0].split()
# Given the irregular structure of the table above, better to parse from the end of each line
# Verify headers first
# -7 -6 -5 -4 -3 -2 -1
for h in ("Ipkts", "Ierrs", "Ibytes", "Opkts", "Oerrs", "Obytes", "Coll"):
if h not in headers:
self.log.error("%s not found in %s; cannot parse", h, headers)
return False
current = None
for l in lines[1:]:
# Another header row, abort now, this is IPv6 land
if "Name" in l:
break
x = l.split()
if len(x) == 0:
break
iface = x[0]
if iface.endswith("*"):
iface = iface[:-1]
if iface == current:
# skip multiple lines of same interface
continue
else:
current = iface
# Filter inactive interfaces
if self._parse_value(x[-5]) or self._parse_value(x[-2]):
iface = current
metrics = {
'bytes_rcvd': self._parse_value(x[-5]),
'bytes_sent': self._parse_value(x[-2]),
'packets_in.count': self._parse_value(x[-7]),
'packets_in.error': self._parse_value(x[-6]),
'packets_out.count': self._parse_value(x[-4]),
'packets_out.error': self._parse_value(x[-3]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-p" "tcp"], self.log)
# 3651535 packets sent
# 972097 data packets (615753248 bytes)
# 5009 data packets (2832232 bytes) retransmitted
# 0 resends initiated by MTU discovery
# 2086952 ack-only packets (471 delayed)
# 0 URG only packets
# 0 window probe packets
# 310851 window update packets
# 336829 control packets
# 0 data packets sent after flow control
# 3058232 checksummed in software
# 3058232 segments (571218834 bytes) over IPv4
# 0 segments (0 bytes) over IPv6
# 4807551 packets received
# 1143534 acks (for 616095538 bytes)
# 165400 duplicate acks
# ...
self._submit_regexed_values(netstat, BSD_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
proc_location = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `netstat` to collect connection state")
output_TCP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "tcp"], self.log)
output_UDP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "udp"], self.log)
lines = output_TCP.splitlines() + output_UDP.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
def _check_solaris(self, instance):
# Can't get bytes sent and received via netstat
# Default to kstat -p link:0:
custom_tags = instance.get('tags', [])
try:
netstat, _, _ = get_subprocess_output(["kstat", "-p", "link:0:"], self.log)
metrics_by_interface = self._parse_solaris_netstat(netstat)
for interface, metrics in iteritems(metrics_by_interface):
self._submit_devicemetrics(interface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting kstat stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-P" "tcp"], self.log)
# TCP: tcpRtoAlgorithm= 4 tcpRtoMin = 200
# tcpRtoMax = 60000 tcpMaxConn = -1
# tcpActiveOpens = 57 tcpPassiveOpens = 50
# tcpAttemptFails = 1 tcpEstabResets = 0
# tcpCurrEstab = 0 tcpOutSegs = 254
# tcpOutDataSegs = 995 tcpOutDataBytes =1216733
# tcpRetransSegs = 0 tcpRetransBytes = 0
# tcpOutAck = 185 tcpOutAckDelayed = 4
# ...
self._submit_regexed_values(netstat, SOLARIS_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
def _parse_solaris_netstat(self, netstat_output):
"""
Return a mapping of network metrics by interface. For example:
{ interface:
{'bytes_sent': 0,
'bytes_rcvd': 0,
'bytes_rcvd': 0,
...
}
}
"""
# Here's an example of the netstat output:
#
# link:0:net0:brdcstrcv 527336
# link:0:net0:brdcstxmt 1595
# link:0:net0:class net
# link:0:net0:collisions 0
# link:0:net0:crtime 16359935.2637943
# link:0:net0:ierrors 0
# link:0:net0:ifspeed 10000000000
# link:0:net0:ipackets 682834
# link:0:net0:ipackets64 682834
# link:0:net0:link_duplex 0
# link:0:net0:link_state 1
# link:0:net0:multircv 0
# link:0:net0:multixmt 1595
# link:0:net0:norcvbuf 0
# link:0:net0:noxmtbuf 0
# link:0:net0:obytes 12820668
# link:0:net0:obytes64 12820668
# link:0:net0:oerrors 0
# link:0:net0:opackets 105445
# link:0:net0:opackets64 105445
# link:0:net0:rbytes 113983614
# link:0:net0:rbytes64 113983614
# link:0:net0:snaptime 16834735.1607669
# link:0:net0:unknowns 0
# link:0:net0:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# link:0:net1:brdcstrcv 4947620
# link:0:net1:brdcstxmt 1594
# link:0:net1:class net
# link:0:net1:collisions 0
# link:0:net1:crtime 16359935.2839167
# link:0:net1:ierrors 0
# link:0:net1:ifspeed 10000000000
# link:0:net1:ipackets 4947620
# link:0:net1:ipackets64 4947620
# link:0:net1:link_duplex 0
# link:0:net1:link_state 1
# link:0:net1:multircv 0
# link:0:net1:multixmt 1594
# link:0:net1:norcvbuf 0
# link:0:net1:noxmtbuf 0
# link:0:net1:obytes 73324
# link:0:net1:obytes64 73324
# link:0:net1:oerrors 0
# link:0:net1:opackets 1594
# link:0:net1:opackets64 1594
# link:0:net1:rbytes 304384894
# link:0:net1:rbytes64 304384894
# link:0:net1:snaptime 16834735.1613302
# link:0:net1:unknowns 0
# link:0:net1:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# A mapping of solaris names -> datadog names
metric_by_solaris_name = {
'rbytes64': 'bytes_rcvd',
'obytes64': 'bytes_sent',
'ipackets64': 'packets_in.count',
'ierrors': 'packets_in.error',
'opackets64': 'packets_out.count',
'oerrors': 'packets_out.error',
}
lines = [l for l in netstat_output.splitlines() if len(l) > 0]
metrics_by_interface = {}
for l in lines:
# Parse the metric & interface.
cols = l.split()
link, n, iface, name = cols[0].split(":")
assert link == "link"
# Get the datadog metric name.
ddname = metric_by_solaris_name.get(name, None)
if ddname is None:
continue
# Add it to this interface's list of metrics.
metrics = metrics_by_interface.get(iface, {})
metrics[ddname] = self._parse_value(cols[1])
metrics_by_interface[iface] = metrics
return metrics_by_interface
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if self._collect_cx_state:
self._cx_state_psutil(tags=custom_tags)
self._cx_counters_psutil(tags=custom_tags)
def _cx_state_psutil(self, tags=None):
"""
Collect metrics about connections state using psutil
"""
metrics = defaultdict(int)
tags = [] if tags is None else tags
for conn in psutil.net_connections():
protocol = self._parse_protocol_psutil(conn)
status = self.tcp_states['psutil'].get(conn.status)
metric = self.cx_state_gauge.get((protocol, status))
if metric is None:
self.log.warning('Metric not found for: %s,%s', protocol, status)
else:
metrics[metric] += 1
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=tags)
def _cx_counters_psutil(self, tags=None):
"""
Collect metrics about interfaces counters using psutil
"""
tags = [] if tags is None else tags
for iface, counters in iteritems(psutil.net_io_counters(pernic=True)):
metrics = {
'bytes_rcvd': counters.bytes_recv,
'bytes_sent': counters.bytes_sent,
'packets_in.count': counters.packets_recv,
'packets_in.error': counters.errin,
'packets_out.count': counters.packets_sent,
'packets_out.error': counters.errout,
}
self._submit_devicemetrics(iface, metrics, tags)
def _parse_protocol_psutil(self, conn):
"""
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.cx_state_gauge`
"""
protocol = self.PSUTIL_TYPE_MAPPING.get(conn.type, '')
family = self.PSUTIL_FAMILY_MAPPING.get(conn.family, '')
return '{}{}'.format(protocol, family)
def _parse_queues(self, tool, ss_output):
"""
for each line of `ss_output`, returns a triplet with:
* a connection state (`established`, `listening`)
* the receive queue size
* the send queue size
"""
for line in ss_output.splitlines():
fields = line.split()
if len(fields) < (6 if tool == "netstat" else 3):
continue
state_column = 0 if tool == "ss" else 5
try:
state = self.tcp_states[tool][fields[state_column]]
except KeyError:
continue
yield (state, fields[1], fields[2])
def _collect_ena(self, iface):
"""
Collect ENA metrics for given interface.
ENA metrics are collected via the ioctl SIOCETHTOOL call. At the time of writing
this method, there are no maintained Python libraries that do this. The solution
is based on:
* https://github.com/safchain/ethtool
* https://gist.github.com/yunazuno/d7cd7e1e127a39192834c75d85d45df9
"""
ethtool_socket = None
try:
ethtool_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
return self._get_ena_metrics(iface, ethtool_socket)
except OSError as e:
# this will happen for interfaces that don't support SIOCETHTOOL - e.g. loopback or docker
self.log.debug('OSError while trying to collect ENA metrics for interface %s: %s', iface, str(e))
except Exception:
self.log.exception('Unable to collect ENA metrics for interface %s', iface)
finally:
if ethtool_socket is not None:
ethtool_socket.close()
return {}
def _send_ethtool_ioctl(self, iface, sckt, data):
"""
Send an ioctl SIOCETHTOOL call for given interface with given data.
"""
ifr = struct.pack('16sP', iface.encode('utf-8'), data.buffer_info()[0])
fcntl.ioctl(sckt.fileno(), SIOCETHTOOL, ifr)
def _get_ethtool_gstringset(self, iface, sckt):
"""
Retrieve names of all ethtool stats for given interface.
"""
sset_info = array.array('B', struct.pack('IIQI', ETHTOOL_GSSET_INFO, 0, 1 << ETH_SS_STATS, 0))
self._send_ethtool_ioctl(iface, sckt, sset_info)
sset_mask, sset_len = struct.unpack('8xQI', sset_info)
if sset_mask == 0:
sset_len = 0
strings = array.array('B', struct.pack('III', ETHTOOL_GSTRINGS, ETH_SS_STATS, sset_len))
strings.extend([0] * sset_len * ETH_GSTRING_LEN)
self._send_ethtool_ioctl(iface, sckt, strings)
all_names = []
for i in range(sset_len):
offset = 12 + ETH_GSTRING_LEN * i
s = strings[offset : offset + ETH_GSTRING_LEN]
s = s.tobytes() if PY3 else s.tostring()
s = s.partition(b'\x00')[0].decode('utf-8')
all_names.append(s)
return all_names
def _get_ena_metrics(self, iface, sckt):
"""
Get all ENA metrics specified in ENA_METRICS_NAMES list and their values from ethtool.
"""
stats_names = list(self._get_ethtool_gstringset(iface, sckt))
stats_count = len(stats_names)
stats = array.array('B', struct.pack('II', ETHTOOL_GSTATS, stats_count))
# we need `stats_count * (length of uint64)` for the result
stats.extend([0] * len(struct.pack('Q', 0)) * stats_count)
self._send_ethtool_ioctl(iface, sckt, stats)
metrics = {}
for i, stat_name in enumerate(stats_names):
if stat_name in ENA_METRIC_NAMES:
offset = 8 + 8 * i
value = struct.unpack('Q', stats[offset : offset + 8])[0]
metrics[ENA_METRIC_PREFIX + stat_name] = value
return metrics
| bsd-3-clause | 250,857,119,354,836,900 | 44.993204 | 149 | 0.522956 | false | 3.717862 | false | false | false |
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/dynamic/slalom_hyq_interpKino05.py | 1 | 4188 | #Importing helper class for RBPRM
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer
#calling script darpa_hyq_path to compute root path
import slalom_hyq_pathKino05 as tp
from os import environ
ins_dir = environ['DEVEL_DIR']
db_dir = ins_dir+"/install/share/hyq-rbprm/database/hyq_"
pathId = tp.ps.numberPaths()-1
packageName = "hyq_description"
meshPackageName = "hyq_description"
rootJointType = "freeflyer"
# Information to retrieve urdf and srdf files.
urdfName = "hyq"
urdfSuffix = ""
srdfSuffix = ""
# This time we load the full body model of HyQ
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.client.basic.robot.setDimensionExtraConfigSpace(tp.extraDof)
fullBody.setJointBounds ("base_joint_xyz", [-6,6, -2.5, 2.5, 0.0, 1.])
# Setting a number of sample configurations used
nbSamples = 20000
dynamic=True
ps = tp.ProblemSolver(fullBody)
ps.client.problem.setParameter("aMax",tp.aMax)
ps.client.problem.setParameter("vMax",tp.vMax)
r = tp.Viewer (ps,viewerClient=tp.r.client)
rootName = 'base_joint_xyz'
def addLimbDb(limbId, heuristicName, loadValues = True, disableEffectorCollision = False):
fullBody.addLimbDatabase(str(db_dir+limbId+'.db'), limbId, heuristicName,loadValues, disableEffectorCollision)
rLegId = 'rfleg'
lLegId = 'lhleg'
rarmId = 'rhleg'
larmId = 'lfleg'
addLimbDb(rLegId, "manipulability")
addLimbDb(lLegId, "manipulability")
addLimbDb(rarmId, "manipulability")
addLimbDb(larmId, "manipulability")
q_0 = fullBody.getCurrentConfig();
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.ps.configAtParam(0,0.01)[0:7] # use this to get the correct orientation
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[0:7]
dir_init = tp.ps.configAtParam(pathId,0.01)[7:10]
acc_init = tp.ps.configAtParam(pathId,0.01)[10:13]
dir_goal = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[7:10]
acc_goal = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[10:13]
configSize = fullBody.getConfigSize() -fullBody.client.basic.robot.getDimensionExtraConfigSpace()
# copy extraconfig for start and init configurations
q_init[configSize:configSize+3] = dir_init[::]
q_init[configSize+3:configSize+6] = acc_init[::]
q_goal[configSize:configSize+3] = dir_goal[::]
q_goal[configSize+3:configSize+6] = acc_goal[::]
fullBody.setStaticStability(False)
# Randomly generating a contact configuration at q_init
fullBody.setCurrentConfig (q_init)
q_init = fullBody.generateContacts(q_init,dir_init,acc_init,2)
# Randomly generating a contact configuration at q_end
fullBody.setCurrentConfig (q_goal)
q_goal = fullBody.generateContacts(q_goal, dir_goal,acc_goal,2)
# specifying the full body configurations as start and goal state of the problem
fullBody.setStartState(q_init,[larmId,rLegId,rarmId,lLegId])
fullBody.setEndState(q_goal,[larmId,rLegId,rarmId,lLegId])
r(q_init)
# computing the contact sequence
configs = fullBody.interpolate(0.08,pathId=pathId,robustnessTreshold = 0, filterStates = True)
print "number of configs =", len(configs)
r(configs[-1])
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
import fullBodyPlayer
player = fullBodyPlayer.Player(fullBody,pp,tp,configs,draw=True,optim_effector=False,use_velocity=dynamic,pathId = pathId)
#player.displayContactPlan()
r(configs[5])
player.interpolate(5,99)
#player.play()
"""
camera = [0.5681925415992737,
-6.707448482513428,
2.5206544399261475,
0.8217507600784302,
0.5693002343177795,
0.020600343123078346,
0.01408931240439415]
r.client.gui.setCameraTransform(0,camera)
"""
"""
import hpp.corbaserver.rbprm.tools.cwc_trajectory
import hpp.corbaserver.rbprm.tools.path_to_trajectory
import hpp.corbaserver.rbprm.tools.cwc_trajectory_helper
reload(hpp.corbaserver.rbprm.tools.cwc_trajectory)
reload(hpp.corbaserver.rbprm.tools.path_to_trajectory)
reload(hpp.corbaserver.rbprm.tools.cwc_trajectory_helper)
reload(fullBodyPlayer)
"""
| lgpl-3.0 | -8,926,711,783,031,606,000 | 28.914286 | 126 | 0.776027 | false | 2.723017 | true | false | false |
endolith/scikit-image | skimage/restoration/_denoise.py | 1 | 10008 | # coding: utf-8
import numpy as np
from .. import img_as_float
from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
from .._shared.utils import _mode_deprecations
def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,
bins=10000, mode='constant', cval=0):
"""Denoise image using bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by the gaussian function of the euclidian
distance between two pixels and a certain standard deviation
(`sigma_spatial`).
Radiometric similarity is measured by the gaussian function of the euclidian
distance between two color values and a certain standard deviation
(`sigma_range`).
Parameters
----------
image : ndarray, shape (M, N[, 3])
Input image, 2D grayscale or RGB.
win_size : int
Window size for filtering.
sigma_range : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for gaussian weights of color filtering.
A larger value results in improved accuracy.
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}
How to handle values outside the image borders. See
`numpy.pad` for detail.
cval : string
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
denoised : ndarray
Denoised image.
References
----------
.. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf
Example
-------
>>> from skimage import data, img_as_float
>>> astro = img_as_float(data.astronaut())
>>> astro = astro[220:300, 220:320]
>>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
>>> noisy = np.clip(noisy, 0, 1)
>>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15)
"""
mode = _mode_deprecations(mode)
return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,
bins, mode, cval)
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter.
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
def _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = im.ndim
p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype)
g = np.zeros_like(p)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
slices_d = [slice(None), ] * ndim
slices_p = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax+1] = slice(0, -1)
slices_p[0] = ax
d[slices_d] += p[slices_p]
slices_d[ax] = slice(None)
slices_p[ax+1] = slice(None)
out = im + d
else:
out = im
E = (d ** 2).sum()
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
slices_g = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_g[ax+1] = slice(0, -1)
slices_g[0] = ax
g[slices_g] = np.diff(out, axis=ax)
slices_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
E += weight * norm.sum()
tau = 1. / (2.*ndim)
norm *= tau / weight
norm += 1.
p -= tau * g
p /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200,
multichannel=False):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray of ints, uints or floats
Input data to be denoised. `im` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = im.dtype
if not im_type.kind == 'f':
im = img_as_float(im)
if multichannel:
out = np.zeros_like(im)
for c in range(im.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps,
n_iter_max)
else:
out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max)
return out
| bsd-3-clause | -854,628,907,179,249,500 | 34.211268 | 92 | 0.6072 | false | 3.67242 | false | false | false |
JMMolenaar/cadnano2.5 | cadnano/part/removeallstrandscmd.py | 1 | 1893 | from cadnano.cnproxy import UndoCommand
class RemoveAllStrandsCommand(UndoCommand):
"""
1. Remove all strands. Emits strandRemovedSignal for each.
2. Remove all oligos.
"""
def __init__(self, part):
super(RemoveAllStrandsCommand, self).__init__("remove all strands")
self._part = part
self._vhs = vhs = part.getVirtualHelices()
self._strand_sets = []
for vh in self._vhs:
x = vh.getStrandSets()
self._strand_sets.append(x[0])
self._strand_sets.append(x[1])
self._strandSetListCopies = \
[[y for y in x._strand_list] for x in self._strand_sets]
self._oligos = set(part.oligos())
# end def
def redo(self):
part = self._part
# Remove the strand
for s_set in self.__strand_set:
s_list = s_set._strand_list
for strand in s_list:
s_set.removeStrand(strand)
# end for
s_set._strand_list = []
#end for
for vh in self._vhs:
# for updating the Slice View displayed helices
part.partStrandChangedSignal.emit(part, vh)
# end for
self._oligos.clear()
# end def
def undo(self):
part = self._part
# Remove the strand
sListCopyIterator = iter(self._strandSetListCopies)
for s_set in self._strand_sets:
s_list = next(sListCopyIterator)
for strand in s_list:
s_set.strandsetStrandAddedSignal.emit(s_set, strand)
# end for
s_set._strand_list = s_list
#end for
for vh in self._vhs:
# for updating the Slice View displayed helices
part.partStrandChangedSignal.emit(part, vh)
# end for
for olg in self._oligos:
part.addOligo(olg)
# end def
# end class | mit | 4,979,934,590,713,284,000 | 32.22807 | 76 | 0.555203 | false | 3.65444 | false | false | false |
cthit/CodeIT | behaviours/Jump.py | 1 | 1201 | import random
import pygame
from behaviours.Behaviour import Behaviour
from behaviours.Collide import Collide
from src.GameMethods import GameMethods
class Jump(Behaviour):
def __init__(self, jump_velocity=10, jump_key=None):
self.jump_velocity = jump_velocity
self.can_jump = False
self.jump_key = jump_key
self._game_methods = None
def update(self, delta_time, keys, config, game_methods: GameMethods):
self._game_methods = game_methods
c = self.owner.get_behaviour(Collide)
self.can_jump = False
if len(c.check_bottom(0.05)) > 0:
if self.owner.velocity.y >= 0:
self.can_jump = True
if self.jump_key is not None and keys[self.jump_key]:
self.jump_if_possible()
def jump_if_possible(self):
if self.can_jump:
self._game_methods.play_sound(random.choice([
"jump-00.wav",
"jump-01.wav",
"jump-02.wav",
"jump-03.wav"]))
self.owner.velocity.y = -self.jump_velocity
self.can_jump = False
def bind_to_key(self, keyboard_key):
self.jump_key = keyboard_key
| mit | -5,081,456,756,045,194,000 | 29.794872 | 74 | 0.592007 | false | 3.574405 | false | false | false |
tyiannak/inf_teiste_info_theory_lab | compressText.py | 1 | 2472 | import ITlib, sys, bitarray, cPickle, os
if __name__ == '__main__':
mode, inputFilePath, outputFilePath = (sys.argv[1], sys.argv[2], sys.argv[3])
if mode == "compress":
method = sys.argv[4]
f = open(inputFilePath) # read the input file
text = f.read()
f.close()
charCounts = ITlib.getTextCountsUnique(text) # get character counts
if method == "H" or method == "Huffman":
code, length = ITlib.generateHuffmanCode(charCounts) # generate huffman code
elif method == "SF" or method == "Shannon-Fano":
code, length = ITlib.generateShannonFanoCode(charCounts) # generate shannon-fano code
else:
raise ValueError('Method argument must be either Huffman\
(or H) or Shannon-Fano (or SF)')
etext = ITlib.encode(code, text) # encode using huffman code
etext = "".join(etext) # convert list to string
etextBits = bitarray.bitarray(etext) # convert to bitarray type
with open(outputFilePath,"wb") as f: # write bits to binary file
etextBits.tofile(f)
cPickle.dump(code, open(outputFilePath+"_code", "wb" ) ) # write code to file
inFSize = os.stat(inputFilePath).st_size
outFSize = os.stat(outputFilePath).st_size
codeFSize = os.stat(outputFilePath+"_code").st_size
print "Original file size is %d bytes" % inFSize
print "Compressed file size is %d bytes \
(%d for encoded text and %d for the code itself)" % \
(outFSize + codeFSize, outFSize, codeFSize)
print "Compression ratio is %.3f" % \
(float(inFSize) / (outFSize + codeFSize))
elif mode == "uncompress":
etextBits = bitarray.bitarray()
with open(inputFilePath,"r") as f: # load bits from comrpessed file
etextBits.fromfile(f)
code = cPickle.load(open(inputFilePath+"_code", "r" ) ) # load code from file
text_n = ITlib.decode(code, etextBits.to01()) # decode the text
with open(outputFilePath, "w") as f: # write decoded text to file
f.write(text_n)
f.close() | apache-2.0 | -2,836,186,854,288,598,000 | 60.825 | 112 | 0.529531 | false | 3.974277 | false | false | false |
mbaldessari/pcp | src/python/setup.py | 1 | 2568 | """ Build script for the PCP python package """
#
# Copyright (C) 2012-2014 Red Hat.
# Copyright (C) 2009-2012 Michael T. Werner
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from distutils.core import setup, Extension
setup(name = 'pcp',
version = '1.0',
description = 'Python package for Performance Co-Pilot',
license = 'GPLv2+',
author = 'Performance Co-Pilot Development Team',
author_email = '[email protected]',
url = 'http://www.performancecopilot.org',
packages = ['pcp'],
ext_modules = [
Extension('cpmapi', ['pmapi.c'], libraries = ['pcp']),
Extension('cpmda', ['pmda.c'], libraries = ['pcp_pmda', 'pcp']),
Extension('cpmgui', ['pmgui.c'], libraries = ['pcp_gui']),
Extension('cpmi', ['pmi.c'], libraries = ['pcp_import']),
Extension('cmmv', ['mmv.c'], libraries = ['pcp_mmv']),
],
platforms = [ 'Windows', 'Linux', 'FreeBSD', 'Solaris', 'Mac OS X', 'AIX' ],
long_description =
'PCP provides services to support system-level performance monitoring',
classifiers = [
'Development Status :: 5 - Production/Stable'
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: Software Development :: Libraries',
],
)
| lgpl-2.1 | 4,887,346,861,633,221,000 | 41.098361 | 80 | 0.630841 | false | 4 | false | false | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.exodus/resources/lib/sources/primewire_mv_tv.py | 1 | 9244 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.domains = ['primewire.ag']
self.base_link = 'http://www.primewire.ag'
self.key_link = 'http://www.primewire.ag/index.php?search'
self.moviesearch_link = 'http://www.primewire.ag/index.php?search_keywords=%s&key=%s&search_section=1'
self.tvsearch_link = 'http://www.primewire.ag/index.php?search_keywords=%s&key=%s&search_section=2'
def request(self, url, check):
try:
result = client.source(url)
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
except:
return
def movie(self, imdb, title, year):
try:
result = self.request(self.key_link, 'searchform')
query = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.moviesearch_link % (urllib.quote_plus(re.sub('\'', '', title)), query)
result = self.request(query, 'index_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
title = 'watch' + cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
except: pass
match = [i[0] for i in result if title == cleantitle.get(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
result = self.request(urlparse.urljoin(self.base_link, i), 'choose_tabs')
if imdb in str(result): url = i ; break
except:
pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
result = self.request(self.key_link, 'searchform')
query = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', tvshowtitle)), query)
result = self.request(query, 'index_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
except: pass
match = [i[0] for i in result if tvshowtitle == cleantitle.get(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
result = self.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item')
if imdb in str(result): url = i ; break
except:
pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = self.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = self.request(url, 'choose_tabs')
links = client.parseDOM(result, 'tbody')
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')[0]
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
url = base64.b64decode(url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
quality = client.parseDOM(i, 'span', ret='class')[0]
if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM'
elif quality == 'quality_dvd': quality = 'SD'
else: raise Exception()
sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | 5,713,432,665,606,945,000 | 40.828054 | 188 | 0.535482 | false | 3.66099 | false | false | false |
aerialhedgehog/VyPy | tests/data/ordered_bunch.py | 1 | 5662 |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from VyPy.data import OrderedBunch, Property
import pickle
from copy import deepcopy
from time import time, sleep
import numpy as np
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# --------------------------------------------------------
# Initialize
# --------------------------------------------------------
o = OrderedBunch()
# --------------------------------------------------------
# Load up data
# --------------------------------------------------------
o['x'] = 'hello' # dictionary style
o.y = 1 # attribute style
o['z'] = [3,4,5]
o.t = OrderedBunch() # sub-bunch
o.t['h'] = 20
o.t.i = (1,2,3)
# --------------------------------------------------------
# Attach a callable object
# --------------------------------------------------------
o.f = Callable(test_function,o)
# --------------------------------------------------------
# Printing
# --------------------------------------------------------
print '>>> print o.keys()'
print o.keys()
print ''
print '>>> print o'
print o
print '>>> print o.f()'
print o.f()
print ''
# --------------------------------------------------------
# Pickling test
# --------------------------------------------------------
print '>>> pickle.dumps()'
d = pickle.dumps(o)
print '>>> pickle.loads()'
p = pickle.loads(d)
print ''
print '>>> print p'
print p
#print "should be true:" , p.f.d is p
#assert p.f.d is p
# --------------------------------------------------------
# The update function
# --------------------------------------------------------
o.t['h'] = 'changed'
p.update(o)
print "should be 'changed':" , p.t.h
assert p.t.h == 'changed'
#assert p.f.d.t.h == 'changed'
print ''
# --------------------------------------------------------
# Array Manipulation
# --------------------------------------------------------
# an ordered bunch of floats
a = OrderedBunch()
a.f = 1
a.g = 2
a.b = OrderedBunch()
a.b.h = np.array([1,2,3])
a.n = 'strings ignored'
print '>>> print a'
print a
print ''
# dump the numerical data to an array
print '>>> a.pack_array()'
c = a.pack_array()
print c
print ''
# modify array
print '>>> modify c[2]'
c[2] = 25
print c
print ''
# repack dictionary
a.unpack_array(c)
print '>>> a.unpack_array(c)'
print a
print ''
# make a copy
b = deepcopy(a)
# a method to do recursivlely on both a and b
def method(self,other):
try: return self-other
except: return None # ignore strings or failed operations
d = a.do_recursive(method,b)
print ">>> recursive a-b"
print d
print ''
# --------------------------------------------------------
# Access Speed test
# --------------------------------------------------------
print 'Access speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e6)):
v = o.t.i
t1 = time()-t0
# accessing a simpler bunch
class SimpleBunch:
pass
z = SimpleBunch()
z.t = SimpleBunch
z.t.i = 0
t0 = time()
for i in range(int(1e6)):
v = z.t.i
t2 = time()-t0
# results
print 'OrderedBunch: %.6f s' % (t1)
print 'SimpleBunch: %.6f s' % (t2)
assert (t1-t2)/t2 < 0.5
print ''
# --------------------------------------------------------
# Assignment Speed test
# --------------------------------------------------------
print 'Assignment speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e6)):
o.t.i = 10
t1 = time()-t0
# accessing a simpler bunch
t0 = time()
for i in range(int(1e6)):
z.t.i = 10
t2 = time()-t0
# results
print 'OrderedBunch: %.6f s' % (t1)
print 'SimpleBunch: %.6f s' % (t2)
assert (t1-t2)/t2 < 5.0
print ''
# ----------------------------------------------------------------------
# Callable Object
# ----------------------------------------------------------------------
# has a hidden property
# works like a decorator
class Callable(OrderedBunch):
d = Property('d')
def __init__(self,f,d):
self.f = f
self.d = d
def __call__(self,*x):
return self.f(self.d,*x)
# ----------------------------------------------------------------------
# Test Function
# ----------------------------------------------------------------------
# to work in the callable object
def test_function(c):
return c.x
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main() | bsd-3-clause | -5,173,433,652,382,378,000 | 23.399103 | 80 | 0.310138 | false | 4.652424 | true | false | false |
aosprey/rose | lib/python/rose/suite_hook.py | 1 | 5734 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-8 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Hook functionalities for a suite."""
from email.mime.text import MIMEText
import os
import pwd
from rose.opt_parse import RoseOptionParser
from rose.popen import RosePopener
from rose.reporter import Reporter
from rose.resource import ResourceLocator
from rose.suite_engine_proc import SuiteEngineProcessor
from smtplib import SMTP, SMTPException
import socket
class RoseSuiteHook(object):
"""Hook functionalities for a suite."""
def __init__(self, event_handler=None, popen=None, suite_engine_proc=None):
self.event_handler = event_handler
if popen is None:
popen = RosePopener(event_handler)
self.popen = popen
if suite_engine_proc is None:
suite_engine_proc = SuiteEngineProcessor.get_processor(
event_handler=event_handler, popen=popen)
self.suite_engine_proc = suite_engine_proc
def handle_event(self, *args, **kwargs):
"""Call self.event_handler if it is callabale."""
if callable(self.event_handler):
return self.event_handler(*args, **kwargs)
def run(self, suite_name, task_id, hook_event, hook_message=None,
should_mail=False, mail_cc_list=None, should_shutdown=False,
should_retrieve_job_logs=False):
"""
Invoke the hook for a suite.
1. For a task hook, if the task runs remotely, retrieve its log from
the remote host.
2. If "should_mail", send an email notification to the current user,
and those in the "mail_cc_list".
3. If "should_shutdown", shut down the suite.
"""
# Retrieve log and populate job logs database
task_ids = []
if task_id and should_retrieve_job_logs:
task_ids = [task_id]
self.suite_engine_proc.job_logs_pull_remote(suite_name, task_ids)
# Send email notification if required
email_exc = None
if should_mail:
text = ""
if task_id:
text += "Task: %s\n" % task_id
if hook_message:
text += "Message: %s\n" % hook_message
url = self.suite_engine_proc.get_suite_log_url(None, suite_name)
text += "See: %s\n" % (url)
user = pwd.getpwuid(os.getuid()).pw_name
conf = ResourceLocator.default().get_conf()
host = conf.get_value(["rose-suite-hook", "email-host"],
default="localhost")
msg = MIMEText(text)
msg["From"] = user + "@" + host
msg["To"] = msg["From"]
if mail_cc_list:
mail_cc_addresses = []
for mail_cc_address in mail_cc_list:
if "@" not in mail_cc_address:
mail_cc_address += "@" + host
mail_cc_addresses.append(mail_cc_address)
msg["Cc"] = ", ".join(mail_cc_addresses)
mail_cc_list = mail_cc_addresses
else:
mail_cc_list = []
msg["Subject"] = "[%s] %s" % (hook_event, suite_name)
smtp_host = conf.get_value(["rose-suite-hook", "smtp-host"],
default="localhost")
try:
smtp = SMTP(smtp_host)
smtp.sendmail(
msg["From"], [msg["To"]] + mail_cc_list, msg.as_string())
smtp.quit()
except (socket.error, SMTPException) as email_exc:
pass
# Shut down if required
if should_shutdown:
self.suite_engine_proc.shutdown(suite_name, args=["--kill"])
if email_exc is not None:
raise
__call__ = run
def main():
"""Implement "rose suite-hook" command."""
opt_parser = RoseOptionParser()
opt_parser.add_my_options(
"mail_cc", "mail", "retrieve_job_logs", "shutdown")
opts, args = opt_parser.parse_args()
for key in ["mail_cc"]:
values = []
if getattr(opts, key):
for value in getattr(opts, key):
values.extend(value.split(","))
setattr(opts, key, values)
report = Reporter(opts.verbosity - opts.quietness - 1) # Reduced default
popen = RosePopener(event_handler=report)
suite_engine_proc = SuiteEngineProcessor.get_processor(
event_handler=report, popen=popen)
args = suite_engine_proc.process_suite_hook_args(*args, **vars(opts))
hook = RoseSuiteHook(event_handler=report,
popen=popen,
suite_engine_proc=suite_engine_proc)
hook(*args,
should_mail=opts.mail,
mail_cc_list=opts.mail_cc,
should_shutdown=opts.shutdown,
should_retrieve_job_logs=opts.retrieve_job_logs)
if __name__ == "__main__":
main()
| gpl-3.0 | -6,236,962,520,064,511,000 | 37.483221 | 79 | 0.571503 | false | 3.970914 | false | false | false |
mcdallas/nba_py | nba_py/constants.py | 1 | 17673 | CURRENT_SEASON = '2016-17'
TEAMS = {
'ATL': {
'abbr': 'ATL',
'city': 'Atlanta',
'code': 'hawks',
'conference': 'Eastern',
'displayAbbr': 'ATL',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612737',
'name': 'Hawks',
'color': 'E2373E',
'colors': ['E2373E', '002A5C', 'BAC4CA']
}, 'BOS': {
'abbr': 'BOS',
'city': 'Boston',
'code': 'celtics',
'conference': 'Eastern',
'displayAbbr': 'BOS',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612738',
'name': 'Celtics',
'color': '007239',
'colors': ['007239', 'AE8445', '982527', '000000']
}, 'BKN': {
'abbr': 'BKN',
'city': 'Brooklyn',
'code': 'nets',
'conference': 'Eastern',
'displayAbbr': 'BKN',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612751',
'name': 'Nets',
'color': '000000',
'colors': ['000000', 'FFFFFF']
}, 'CHA': {
'abbr': 'CHA',
'city': 'Charlotte',
'code': 'hornets',
'conference': 'Eastern',
'displayAbbr': 'CHA',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612766',
'name': 'Hornets',
'color': '00848E',
'colors': ['00848E', '260F54', 'CCCCCC']
}, 'CHI': {
'abbr': 'CHI',
'city': 'Chicago',
'code': 'bulls',
'conference': 'Eastern',
'displayAbbr': 'CHI',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612741',
'name': 'Bulls',
'color': 'C60033',
'colors': ['C60033', '000000']
}, 'CLE': {
'abbr': 'CLE',
'city': 'Cleveland',
'code': 'cavaliers',
'conference': 'Eastern',
'displayAbbr': 'CLE',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612739',
'name': 'Cavaliers',
'color': '860038',
'colors': ['860038', '002D62', 'FDBA31']
}, 'DAL': {
'abbr': 'DAL',
'city': 'Dallas',
'code': 'mavericks',
'conference': 'Western',
'displayAbbr': 'DAL',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612742',
'name': 'Mavericks',
'color': '0063AF',
'colors': ['0063AF', 'BAC4CA', '000000']
}, 'DEN': {
'abbr': 'DEN',
'city': 'Denver',
'code': 'nuggets',
'conference': 'Western',
'displayAbbr': 'DEN',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612743',
'name': 'Nuggets',
'color': '559FD6',
'colors': ['559FD6', '006BB7', 'FEA927']
}, 'DET': {
'abbr': 'DET',
'city': 'Detroit',
'code': 'pistons',
'conference': 'Eastern',
'displayAbbr': 'DET',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612765',
'name': 'Pistons',
'color': 'EC003D',
'colors': ['EC003D', '0058A6', '001D4A']
}, 'GSW': {
'abbr': 'GSW',
'city': 'Golden State',
'code': 'warriors',
'conference': 'Western',
'displayAbbr': 'GSW',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612744',
'name': 'Warriors',
'color': '0068B3',
'colors': ['0068B3', 'FFC423']
}, 'HOU': {
'abbr': 'HOU',
'city': 'Houston',
'code': 'rockets',
'conference': 'Western',
'displayAbbr': 'HOU',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612745',
'name': 'Rockets',
'color': 'C60033',
'colors': ['C60033', '000000']
}, 'IND': {
'abbr': 'IND',
'city': 'Indiana',
'code': 'pacers',
'conference': 'Eastern',
'displayAbbr': 'IND',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612754',
'name': 'Pacers',
'color': '001D4A',
'colors': ['001D4A', 'FEAC2D', 'B0B2B5']
}, 'LAC': {
'abbr': 'LAC',
'city': 'Los Angeles',
'code': 'clippers',
'conference': 'Western',
'displayAbbr': 'LAC',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612746',
'name': 'Clippers',
'color': '00559A',
'colors': ['00559A', 'EC003D']
}, 'LAL': {
'abbr': 'LAL',
'city': 'Los Angeles',
'code': 'lakers',
'conference': 'Western',
'displayAbbr': 'LAL',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612747',
'name': 'Lakers',
'color': 'FEA927',
'colors': ['FEA927', '42186E', '000000']
}, 'MEM': {
'abbr': 'MEM',
'city': 'Memphis',
'code': 'grizzlies',
'conference': 'Western',
'displayAbbr': 'MEM',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612763',
'name': 'Grizzlies',
'color': '182A48',
'colors': ['182A48', '4C78AD', 'FEA927', 'AAC8E5']
}, 'MIA': {
'abbr': 'MIA',
'city': 'Miami',
'code': 'heat',
'conference': 'Eastern',
'displayAbbr': 'MIA',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612748',
'name': 'Heat',
'color': '98002E',
'colors': ['98002E', 'F88D1D', '000000']
}, 'MIL': {
'abbr': 'MIL',
'city': 'Milwaukee',
'code': 'bucks',
'conference': 'Eastern',
'displayAbbr': 'MIL',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612749',
'name': 'Bucks',
'color': 'C41230',
'colors': ['C41230', '003815', 'BAC4CA']
}, 'MIN': {
'abbr': 'MIN',
'city': 'Minnesota',
'code': 'timberwolves',
'conference': 'Western',
'displayAbbr': 'MIN',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612750',
'name': 'Timberwolves',
'color': '#003F70',
'colors': ['003F70', '006F42', 'BAC4CA', 'FFE211', 'DE2032', '000000']
}, 'NOP': {
'abbr': 'NOP',
'city': 'New Orleans',
'code': 'pelicans',
'conference': 'Western',
'displayAbbr': 'NOP',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612740',
'name': 'Pelicans',
'color': '#002B5C',
'colors': ['002B5C', 'B4975A', 'E13A3E']
}, 'NYK': {
'abbr': 'NYK',
'city': 'New York',
'code': 'knicks',
'conference': 'Eastern',
'displayAbbr': 'NYK',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612752',
'name': 'Knicks',
'color': 'F3571F',
'colors': ['F3571F', '0067B2', 'BAC4CA']
}, 'OKC': {
'abbr': 'OKC',
'city': 'Oklahoma City',
'code': 'thunder',
'conference': 'Western',
'displayAbbr': 'OKC',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612760',
'name': 'Thunder',
'color': 'FDBB30',
'colors': ['FDBB30', 'F05133', '007DC3', '002D62']
}, 'ORL': {
'abbr': 'ORL',
'city': 'Orlando',
'code': 'magic',
'conference': 'Eastern',
'displayAbbr': 'ORL',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612753',
'name': 'Magic',
'color': '006BB7',
'colors': ['006BB7', 'BAC4CA', '000000']
}, 'PHI': {
'abbr': 'PHI',
'city': 'Philadelphia',
'code': 'sixers',
'conference': 'Eastern',
'displayAbbr': 'PHI',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612755',
'name': 'Sixers',
'color': 'EC003D',
'colors': ['EC003D', '00559A', 'BAC4CA']
}, 'PHX': {
'abbr': 'PHX',
'city': 'Phoenix',
'code': 'suns',
'conference': 'Western',
'displayAbbr': 'PHX',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612756',
'name': 'Suns',
'color': 'E45F1F',
'colors': ['E45F1F', 'F89F1B', 'BAC4CA', '000000']
}, 'POR': {
'abbr': 'POR',
'city': 'Portland',
'code': 'blazers',
'conference': 'Western',
'displayAbbr': 'POR',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612757',
'name': 'Trail Blazers',
'color': 'DE2032',
'colors': ['DE2032', 'BAC4CA', '000000']
}, 'SAC': {
'abbr': 'SAC',
'city': 'Sacramento',
'code': 'kings',
'conference': 'Western',
'displayAbbr': 'SAC',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612758',
'name': 'Kings',
'color': '542E91',
'colors': ['542E91', 'BAC4CA', '000000']
}, 'SAS': {
'abbr': 'SAS',
'city': 'San Antonio',
'code': 'spurs',
'conference': 'Western',
'displayAbbr': 'SAS',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612759',
'name': 'Spurs',
'color': '#BA24CA',
'colors': ['BA24CA', '000000']
}, 'TOR': {
'abbr': 'TOR',
'city': 'Toronto',
'code': 'raptors',
'conference': 'Eastern',
'displayAbbr': 'TOR',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612761',
'name': 'Raptors',
'color': 'C60033',
'colors': ['C60033', 'BAC4CA']
}, 'UTA': {
'abbr': 'UTA',
'city': 'Utah',
'code': 'jazz',
'conference': 'Western',
'displayAbbr': 'UTA',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612762',
'name': 'Jazz',
'color': '#002A5C',
'colors': ['002A5C', '004812', 'FCB034', 'BACA4CA']
}, 'WAS': {
'abbr': 'WAS',
'city': 'Washington',
'code': 'wizards',
'conference': 'Eastern',
'displayAbbr': 'WAS',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612764',
'name': 'Wizards',
'color': '002A5B',
'colors': ['002A5B', 'E21836', 'BAC4CA']
}
}
class _DefaultN:
Default = 'N'
class _DefaultBlank:
Default = ''
class _DefaultZero:
Default = '0'
class League:
NBA = '00'
Default = NBA
class PerMode:
Totals = 'Totals'
PerGame = 'PerGame'
MinutesPer = 'MinutesPer'
Per48 = 'Per48'
Per40 = 'Per40'
Per36 = 'Per36'
PerMinute = 'PerMinute'
PerPossession = 'PerPossession'
PerPlay = 'PerPlay'
Per100Possessions = 'Per100Possessions'
Per100Plays = 'Per100Plays'
Default = PerGame
class SeasonType:
Regular = 'Regular Season'
Playoffs = 'Playoffs'
Default = Regular
class MeasureType:
Base = 'Base'
Advanced = 'Advanced'
Misc = 'Misc'
FourFactors = 'Four Factors'
Scoring = 'Scoring'
Opponent = 'Opponent'
Usage = 'Usage'
Default = Base
class PtMeasureType:
SpeedDistance = 'SpeedDistance'
class GroupQuantity:
Default = 5
class Outcome(_DefaultBlank):
Win = 'W'
Loss = 'L'
class Location(_DefaultBlank):
Home = 'Home'
Away = 'Away'
class SeasonSegment(_DefaultBlank):
EntireSeason = ''
PreAllStar = 'Pre All-Star'
PostAllStar = 'Post All-Star'
class DateFrom(_DefaultBlank):
pass
class DateTo(_DefaultBlank):
pass
class VsConference(_DefaultBlank):
All = ''
East = 'East'
West = 'West'
class VsDivision(_DefaultBlank):
All = ''
Atlantic = 'Atlantic'
Central = 'Central'
Northwest = 'Northwest'
Pacific = 'Pacific'
Southeast = 'Southeast'
Southwest = 'Southwest'
class GameSegment(_DefaultBlank):
EntireGame = ''
FirstHalf = 'First Half'
SecondHalf = 'Second Half'
Overtime = 'Overtime'
class ClutchTime(_DefaultBlank):
Last5Min = 'Last 5 Minutes'
Last4Min = 'Last 4 Minutes'
Last3Min = 'Last 3 Minutes'
Last2Min = 'Last 2 Minutes'
Last1Min = 'Last 1 Minutes'
Last30Sec = 'Last 30 Seconds'
Last10Sec = 'Last 10 Seconds'
class ShotClockRange(_DefaultBlank):
AllRanges = ''
# I honestly don't know anytime the shot clock would be off
ShotClockOff = 'ShotClock Off'
def get(self, n):
if n > 24 or n < 0:
return ''
elif 22 <= n <= 24:
return '24-22'
elif 18 <= n < 22:
return '22-18 Very Early'
elif 15 <= n < 18:
return '18-15 Early'
elif 7 <= n < 15:
return '15-7 Average'
elif 4 <= n < 7:
return '7-4 Late'
elif 0 <= n < 4:
return '4-0 Very Late'
class AheadBehind(_DefaultBlank):
AheadOrBehind = 'Ahead or Behind'
AheadOrTied = 'Ahead or Tied'
BehindOrTied = 'Behind or Tied'
class PlusMinus(_DefaultN):
pass
class PaceAdjust(_DefaultN):
pass
class Rank(_DefaultN):
pass
class OpponentTeamID(_DefaultZero):
pass
class Period(_DefaultZero):
AllQuarters = '0'
FirstQuarter = '1'
SecondQuarter = '2'
ThirdQuarter = '3'
FourthQuarter = '4'
def Overtime(self, n):
return str(4 + n)
class LastNGames(_DefaultZero):
pass
class PlayoffRound(_DefaultZero):
All = '0'
QuarterFinals = '1'
SemiFinals = '2'
ConferenceFinals = '3'
Finals = '4'
class Month(_DefaultZero):
All = '0'
October = '1'
November = '2'
December = '3'
January = '4'
February = '5'
March = '6'
April = '7'
May = '8'
June = '9'
July = '10'
August = '11'
September = '12'
class RangeType(_DefaultZero):
pass
class StartRange(_DefaultZero):
pass
class EndRange(_DefaultZero):
pass
class StartPeriod(Period):
pass
class EndPeriod(Period):
pass
class StatCategory:
PTS = 'PTS'
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG%'
FG3M = '3PM'
FG3A = '3PA'
FG3_PCT = '3P%'
FTM = 'FTM'
FTA = 'FTA'
FT_PCT = 'FT%'
OREB = 'OREB'
DREB = 'DREB'
REB = 'REB'
AST = 'AST'
STL = 'STL'
BLK = 'BLK'
TOV = 'TOV'
EFF = 'EFF'
AST_TOV = 'AST/TO'
STL_TOV = 'STL/TOV'
PF = 'PF'
Default = PTS
class ContextMeasure:
# Not sure if this is mapped correctly. Source: https://github.com/bradleyfay/NBAStats
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG_PCT'
FG3M = 'FG3m'
FG3A = 'FG3A'
FG3_PCT = 'FG3_PCT'
PF = 'PF'
EFG_PCT = 'EFG_PCT'
TS_PCT = 'TS_PCT'
PTS_FB = 'PTS_FB'
PTS_OFF_TOV = 'PTS_OFF_TOV'
PTS_2ND_CHANCE = 'PTS_2ND_CHANCE'
Default = FGM
class Scope:
AllPlayers = 'S'
Rookies = 'Rookies'
Default = AllPlayers
class PlayerScope:
# ugh this is so similar to Scope, why does it have its own
AllPlayers = 'All Players'
Rookies = 'Rookie'
Default = AllPlayers
class PlayerOrTeam:
Player = 'Player'
Team = 'Team'
Default = Player
class GameScope:
Season = 'Season'
Last10 = 'Last 10'
Yesterday = 'Yesterday'
Finals = 'Finals'
Default = Season
class Game_Scope(_DefaultBlank):
Last10 = 'Last 10'
Yesterday = 'Yesterday'
class Player_or_Team:
Player = 'P'
Team = 'T'
Default = Player
class Conference(VsConference):
pass
class Division(VsDivision):
pass
class TeamID(_DefaultZero):
pass
class GameID(_DefaultBlank):
pass
class RookieYear(_DefaultBlank):
pass
class PlayerExperience(_DefaultBlank):
Rookie = 'Rookie'
Sophomore = 'Sophomore'
Veteran = 'Veteran'
class PlayerPosition(_DefaultBlank):
Forward = 'F'
Center = 'C'
Guard = 'G'
class StarterBench(_DefaultBlank):
Starters = 'Starters'
Bench = 'Bench'
class DraftYear(_DefaultBlank):
pass
class DraftPick(_DefaultBlank):
FirstRound = '1st+Round'
SecondRound = '2nd+Round'
FirstPick = '1st+Pick'
Lottery = 'Lottery+Pick'
Top5 = 'Top+5+Pick'
Top10 = 'Top+10+Pick'
Top15 = 'Top+15+Pick'
Top20 = 'Top+20+Pick'
Top25 = 'Top+25+Pick'
Picks11Thru20 = 'Picks+11+Thru+20'
Picks21Thru30 = 'Picks+21+Thru+30'
Undrafted = 'Undrafted'
class College(_DefaultBlank):
pass
class Country(_DefaultBlank):
pass
class Height(_DefaultBlank):
'''
Example:
for greater than 6ft8 api call should be GT+6-8
for lower than 7ft3 api call should be LT+7-3
'''
class Weight(_DefaultBlank):
'''
Example:
for greater than 225lbs api call should be GT+225lbs
'''
class Counter:
Default = '1000'
class Sorter:
PTS = 'PTS'
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG_PCT'
FG3M = 'FG3M'
FG3A = 'FG3A'
FG3_PCT = 'FG3_PCT'
FTM = 'FTM'
FTA = 'FTA'
FT_PCT = 'FT_PCT'
OREB = 'OREB'
DREB = 'DREB'
AST = 'AST'
STL = 'STL'
BLK = 'BLK'
TOV = 'TOV'
REB = 'REB'
Default = PTS
class Direction:
DESC = 'DESC'
ASC = 'ASC'
Default = DESC
| bsd-3-clause | -3,240,081,273,265,094,000 | 21.951948 | 90 | 0.502009 | false | 2.995424 | false | false | false |
swiftcoder/ashima-iv | src/game.py | 1 | 2465 |
import pyglet
from pyglet.gl import *
import math
from app import AppState, enter_state
from outcome import OutcomeState
from window import Window
from entity import World
import factories
from euclid import Vector3
from resources import Resources
from camera import Camera
from controller import Controller
from tether import Tether
from graphics import Graphics
from teams import Teams
class GameState(AppState):
def start(self):
music = pyglet.resource.media('data/music/the_moonlight_strikers_act1.mp3')
self.player = music.play()
self.sunlight = Resources.load_shader('data/shaders/sunlight.shader')
ship = factories.create_hammerfall(Vector3(0, -250, 2400), 'red')
World.add(ship)
for i in range(4, 0, -1):
ship = factories.create_anaconda(Vector3(i*5, i*10, i*10 + 1000), 'red')
World.add(ship)
for i in range(2, 0, -1):
ship = factories.create_viper(Vector3(i*40, i*-10, i*10 + 25), 'blue', i != 1)
World.add(ship)
self.ship = ship
World.set_player(self.ship)
@ship.event
def on_remove(ship):
print 'defeat'
enter_state( OutcomeState(False) )
self.fps_display = pyglet.clock.ClockDisplay()
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
aspect = float(Window.width)/float(Window.height)
camera = Camera(math.pi/4, aspect, 0.1, 100000.0)
Graphics.camera = camera
cam = factories.create_camera(camera)
World.add(cam)
tether = Tether(cam, ship, Vector3(-5, 8, -16), Vector3(0, 0, 65))
aim = factories.aim_assist(cam)
crosshairs = factories.cross_hairs(ship)
factories.create_sky(cam)
def resume(self):
control = Controller(self.ship)
self.player.play()
def pause(self):
if self.player:
self.player.pause()
def update(self, dt):
World.perform_update(dt)
if Teams.in_team('red') == []:
print 'victory'
enter_state( OutcomeState(True) )
def draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor4f(1, 1, 1, 1)
self.sunlight.bind()
self.sunlight.uniform('sunDir', Vector3(-1, 1, 0).normalize())
self.sunlight.unbind()
World.perform_frame()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, Window.width, 0, Window.height, -100, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.fps_display.draw()
| bsd-3-clause | 3,728,101,832,243,301,400 | 22.037383 | 81 | 0.691278 | false | 2.833333 | false | false | false |
Reilithion/xmms2-reilithion | wafadmin/Tools/gas.py | 1 | 1258 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008 (ita)
"as and gas"
import os, sys
import Task
from TaskGen import extension, taskgen, after, before
EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}'
Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o')
@extension(EXT_ASM)
def asm_hook(self, node):
# create the compilation task: cpp or cc
task = self.create_task('asm')
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task.inputs = [node]
task.outputs = [node.change_ext(obj_ext)]
self.compiled_tasks.append(task)
self.meths.append('asm_incflags')
@taskgen
@after('apply_obj_vars_cc')
@after('apply_obj_vars_cxx')
@before('apply_link')
def asm_incflags(self):
if self.env['ASINCFLAGS']: self.env['_ASINCFLAGS'] = self.env['ASINCFLAGS']
if 'cxx' in self.features: self.env['_ASINCFLAGS'] = self.env['_CXXINCFLAGS']
else: self.env['_ASINCFLAGS'] = self.env['_CCINCFLAGS']
def detect(conf):
comp = os.environ.get('AS', '')
if not comp: comp = conf.find_program('as', var='AS')
if not comp: comp = conf.find_program('gas', var='AS')
if not comp: comp = conf.env['CC']
if not comp: return
v = conf.env
v['ASFLAGS'] = ''
| lgpl-2.1 | 4,753,860,226,152,624,000 | 26.347826 | 78 | 0.651828 | false | 2.615385 | false | false | false |
YannickB/odoo-hosting | clouder_template_gitlab/oneclick.py | 1 | 3584 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from odoo import models, api
except ImportError:
from openerp import models, api
class ClouderNode(models.Model):
"""
Add methods to manage the postgres specificities.
"""
_inherit = 'clouder.node'
@api.multi
def oneclick_deploy_exec(self):
super(ClouderNode, self).oneclick_deploy_exec()
self.oneclick_deploy_element('service', 'backup-bup')
bind = self.oneclick_deploy_element('service', 'bind', ports=[53])
if not self.domain_id.dns_id:
self.domain_id.write({'dns_id': bind.id})
self.deploy_dns_exec()
self.oneclick_deploy_element('service', 'postfix-all', ports=[25])
self.oneclick_deploy_element('service', 'proxy', ports=[80, 443])
# service = self.oneclick_deploy_element('service', 'shinken')
# self.oneclick_deploy_element('base', 'shinken', service=service)
#
# service = self.oneclick_deploy_element('service', 'registry')
# self.oneclick_deploy_element('base', 'registry', service=service)
#
# self.oneclick_deploy_element('service', 'gitlab-all')
# self.oneclick_deploy_element(
# 'base', 'gitlab', code_service='gitlab-all-gitlab')
#
# self.oneclick_deploy_element('service', 'gitlabci')
@api.multi
def oneclick_purge_exec(self):
service_obj = self.env['clouder.service']
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'gitlabci')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'gitlab-all')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'registry')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'shinken')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'proxy')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'bind')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'postfix-all')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'backup-bup')]).unlink()
super(ClouderNode, self).oneclick_purge_exec()
| agpl-3.0 | 4,088,218,775,987,584,500 | 37.12766 | 79 | 0.57394 | false | 4.133795 | false | false | false |
FairyDevicesRD/FairyMaCorpus | scripts/validate.py | 1 | 1535 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import codecs
import sys
def operation(inf, outf):
'''
Check
'''
err = False
annotations = set([])
for line in inf:
if line.startswith(";"):
continue
items = line[:-1].split("\t")
if len(items) == 0:
continue
# Only | and ? are allowed to use in ASCII characters.
annotation = items[0]
for char in annotation:
if ord(char) <= 128:
if char not in ['|', '?']:
outf.write("Illigal ASCII character: %s (%s)\n" % (char, ord(char)))
err = True
if annotation in annotations:
outf.write("Duplication: %s\n" % (annotation))
err = True
annotations.add(annotation)
return err
def main():
'''
Parse arguments
'''
oparser = argparse.ArgumentParser()
oparser.add_argument("-i", "--input", dest="input", default="-")
oparser.add_argument("-o", "--output", dest="output", default="-")
oparser.add_argument(
"--verbose", dest="verbose", action="store_true", default=False)
opts = oparser.parse_args()
if opts.input == "-":
inf = sys.stdin
else:
inf = codecs.open(opts.input, "r", "utf8")
if opts.output == "-":
outf = sys.stdout
else:
outf = codecs.open(opts.output, "w", "utf8")
err = operation(inf, outf)
if err:
sys.exit(-1)
if __name__ == '__main__':
main()
| apache-2.0 | -762,907,266,110,665,200 | 23.365079 | 88 | 0.517915 | false | 3.8375 | false | false | false |
codebox/star-charts | diagram.py | 1 | 2486 |
from svg import Svg
import codecs
MARGIN_X=20
MARGIN_Y=60
MAGNIFICATION = 500
MIN_D = 1
MAX_D = 4
DIMMEST_MAG = 6
BRIGHTEST_MAG = -1.5
LABEL_OFFSET_X = 4
LABEL_OFFSET_Y = 3
FONT_SIZE=16
FONT_COLOUR='#167ac6'
TITLE_SIZE=16
TITLE_COLOUR='#000'
COORDS_SIZE=12
COORDS_COLOUR='#000'
STAR_COLOUR='#000'
CURVE_WIDTH = 0.1
CURVE_COLOUR = '#000'
class Diagram:
def __init__(self, title, area, star_data_list):
self.title = title
self.area = area
self.star_data_list = star_data_list
self.curves = []
self.border_min_x = self.border_min_y = self.border_max_x = self.border_max_y = None
def add_curve(self, curve_points):
self.curves.append(curve_points)
def _mag_to_d(self, m):
mag_range = DIMMEST_MAG - BRIGHTEST_MAG
m_score = (DIMMEST_MAG - m) / mag_range
r_range = MAX_D - MIN_D
return MIN_D + m_score * r_range
def _invert_and_offset(self, x, y):
return x + MARGIN_X, (self.star_data_list.max_y - y) + MARGIN_Y
def render_svg(self, outfile):
svg = Svg()
# add stars first
for star_data in self.star_data_list.data:
x, y = self._invert_and_offset(star_data.x, star_data.y)
svg.circle(x, y, self._mag_to_d(star_data.mag), STAR_COLOUR)
# next add labels
for star_data in self.star_data_list.data:
if star_data.label:
x, y = self._invert_and_offset(star_data.x, star_data.y)
d = self._mag_to_d(star_data.mag)
svg.text(x + LABEL_OFFSET_X + d/2, y + LABEL_OFFSET_Y, star_data.label, FONT_COLOUR, FONT_SIZE)
# next add curves
for curve_points in self.curves:
svg.curve([self._invert_and_offset(cp[0], cp[1]) for cp in curve_points], CURVE_WIDTH, CURVE_COLOUR)
# title
center_x = self.star_data_list.max_x/2 + MARGIN_X
svg.text(center_x, MARGIN_Y/2, self.title, TITLE_COLOUR, TITLE_SIZE, 'middle', 'underline')
# coords
chart_bottom_y = self.star_data_list.max_y + MARGIN_Y
svg.text(center_x, chart_bottom_y + MARGIN_Y/2, "Right Ascension: {}-{}".format(self.area.ra_min, self.area.ra_max), COORDS_COLOUR, COORDS_SIZE, 'middle')
svg.text(center_x, chart_bottom_y + MARGIN_Y/2 + COORDS_SIZE, "Declination: {}-{}".format(self.area.dec_min, self.area.dec_max), COORDS_COLOUR, COORDS_SIZE, 'middle')
codecs.open(outfile, 'w', 'utf-8').writelines(svg.to_list())
| mit | -7,223,114,317,514,982,000 | 30.871795 | 174 | 0.604988 | false | 2.690476 | false | false | false |
avinassh/prawoauth2 | prawoauth2/PrawOAuth2Server.py | 1 | 3785 | #!/usr/bin/env python
import webbrowser
import tornado.ioloop
import tornado.web
__all__ = ['PrawOAuth2Server']
application = None
REDIRECT_URL = 'http://127.0.0.1:65010/authorize_callback'
SCOPES = ['identity', 'read']
REFRESHABLE = True
CODE = None
class AuthorizationHandler(tornado.web.RequestHandler):
def get(self):
global CODE
CODE = self.get_argument('code')
self.write('successful (:')
tornado.ioloop.IOLoop.current().stop()
class PrawOAuth2Server:
"""Creates an instance of `PrawOAuth2Server` which is responsible for
getting `access_token` and `refresh_token` given valid `app_key` and
`app_secret`. This is meant to be run once only.
:param reddit_client: An Instance of praw
:param app_key: App Secret (or also known as Client Id) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param app_secret: App Key (or also known as Client Secret) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param state: Some unique string which represents your client. You
could use `user_agent` which you used when creating the praw
instance.
:param scopes: List of scopes for OAuth. Default is `['identity']`.
https://praw.readthedocs.org/en/latest/pages/oauth.html#oauth-scopes
:param redirect_url: Redirect URL used in authorization process using
`PrawOAuth2Server`. Default is `http://127.0.0.1:9999/authorize_callback`
(which is recommended by praw).
:param refreshable: Boolean. Specifies whether you want `access_token`
to be refreshable or not. If it is set to `False` then you have to
use `PrawOAuth2Server` again to generate new `access_token`.
Default is `True`.
"""
def __init__(self, reddit_client, app_key, app_secret,
state, redirect_url=REDIRECT_URL, scopes=SCOPES,
refreshable=REFRESHABLE):
self.reddit_client = reddit_client
self.app_key = app_key
self.app_secret = app_secret
self.state = state
self.redirect_url = redirect_url
self.scopes = set(scopes)
self.refreshable = refreshable
self.code = None
self._set_app_info()
self._set_up_tornado()
def _set_app_info(self):
self.reddit_client.set_oauth_app_info(client_id=self.app_key,
client_secret=self.app_secret,
redirect_uri=self.redirect_url)
def _set_up_tornado(self):
global application
application = tornado.web.Application([
(r'/authorize_callback', AuthorizationHandler),
])
application.listen(65010)
def _get_auth_url(self):
return self.reddit_client.get_authorize_url(
state=self.state, scope=self.scopes,
refreshable=self.refreshable)
def start(self):
"""Starts the `PrawOAuth2Server` server. It will open the default
web browser and it will take you to Reddit's authorization page,
asking you to authorize your Reddit account(or account of the bot's)
with your app(or bot script). Once authorized successfully, it will
show `successful` message in web browser.
"""
global CODE
url = self._get_auth_url()
webbrowser.open(url)
tornado.ioloop.IOLoop.current().start()
self.code = CODE
def get_access_codes(self):
"""Returns the `access_token` and `refresh_token`. Obviously, this
method should be called after `start`.
:returns: A dictionary containing `access_token` and `refresh_token`.
"""
return self.reddit_client.get_access_information(code=self.code)
| mit | -6,661,762,104,879,166,000 | 35.747573 | 81 | 0.639102 | false | 3.94682 | false | false | false |
d1m0/bap_play | lib/z3_embed/embedder.py | 1 | 8204 | from bap import disasm
from bap.adt import Visitor, visit
from ..util import flatten
from z3 import If, eq, Const, And, BitVecRef, ArrayRef, BitVecNumRef, \
BitVecVal, BitVecSort, Context
from re import compile
def boolToBV(boolExp, ctx):
return If(boolExp, BitVecVal(1, 1, ctx=ctx), BitVecVal(0, 1, ctx=ctx),
ctx=ctx)
def bvToBool(bvExp, ctx):
assert eq(bvExp.sort(), BitVecSort(1, ctx=ctx))
return bvExp == BitVecVal(1, 1, ctx)
def bitsToBil(bits, target='x86-64'):
return flatten([x.bil for x in disasm(bits.toBinStr(), arch=target)])
class Stack(list):
def push(self, arg):
return self.append(arg)
def z3Ids(z3Term):
if len(z3Term.children()) == 0:
if (isinstance(z3Term, BitVecRef) or
isinstance(z3Term, ArrayRef)) and \
not isinstance(z3Term, BitVecNumRef):
return set([(z3Term.decl().name(), z3Term.sort())])
else:
return set()
else:
return reduce(lambda acc, el: acc.union(z3Ids(el)),
z3Term.children(),
set())
ssaRE = compile("(.*)\.([0-9]*)")
initialRE = compile("(.*)\.initial*")
unknownRE = compile("unknown_[0-9]*")
def unssa(name):
m = ssaRE.match(name)
assert m
return (m.groups()[0], int(m.groups()[1]))
def isInitial(name):
return initialRE.match(name) is not None
def isUnknown(name):
return unknownRE.match(name) is not None
class StmtNode:
sId = 0
def __init__(self, parents):
self.mDef = {}
self.mSort = {}
self.mPrefix = ""
self.mCond = []
# Assert simpler tree structures - only 2-way branch/join from ifs
assert (len(parents) <= 2)
self.mParents = parents
self.mSplitSrc = None
self.mId = StmtNode.sId
StmtNode.sId += 1
def lookupDef(self, name, cache=False):
if name in self.mDef:
return self
elif len(self.mParents) == 1:
return self.mParents[0].lookupDef(name)
elif len(self.mParents) > 1:
defs = set([x.lookupDef(name) for x in self.mParents])
if (len(defs) == 1):
# If all agree it hasn't been modified in some branch
return list(defs)[0]
else:
# name has been defined independently in different branches.
# Need a phi def here
# Make sure all definitions have the same sort
s = list(defs)[0].mSort[name]
for d in defs:
assert eq(s, d.mSort[name])
self.mDef[name] = defs
self.mSort[name] = s
return self
else:
return None
def cond(self, other):
if (self == other):
return []
elif (len(self.mParents) == 1):
c = self.mParents[0].cond(other)
elif (len(self.mParents) > 1):
c = self.mSplitSrc.cond(other)
else:
assert False, str(other) + " doesn't dominate " + str(self)
return c + self.mCond
def prefix(self):
if len(self.mParents) == 1:
return self.mParents[0].prefix() + self.mPrefix
elif len(self.mParents) > 1:
return self.mSplitSrc.prefix() + self.mPrefix
else:
return self.mPrefix
def ssa(self, name):
return name + self.prefix() + "." + str(self.mId)
class StmtDef(StmtNode):
def __init__(self, parents, **kwArgs):
StmtNode.__init__(self, parents)
self.mDef = kwArgs
self.mSort = {k: v.sort() for (k, v) in kwArgs.iteritems()}
class StmtBranch(StmtNode):
def __init__(self, parent, cond, prefix):
StmtNode.__init__(self, [parent])
self.mCond = [cond]
self.mPrefix = prefix
class StmtJoin(StmtNode):
def __init__(self, parents, splitSrc):
StmtNode.__init__(self, parents)
self.mSplitSrc = splitSrc
class Z3Embedder(Visitor):
""" Z3 BIL Visitor. Entry points correpsond to
the ADTs defined in the bap.bil module
"""
def __init__(self, ctx):
Visitor.__init__(self)
self.mStack = Stack()
self.mNodeMap = {}
self.mCtx = ctx
initialState = {name: Const(name + ".initial", sort)
for name, sort in self.arch_state()}
self.mRoot = StmtDef([], **initialState)
self.mScope = self.mRoot
self.mNodeMap = {self.mScope.mId: self.mScope}
self.mNumUnknowns = 0
def getFreshUnknown(self, typ):
newUnknown = "unknown_" + str(self.mNumUnknowns)
z3Unknown = Const(newUnknown, typ)
self.mScope.mDef[newUnknown] = z3Unknown
self.mScope.mSort[newUnknown] = typ
self.mNumUnknowns += 1
return z3Unknown
def pushScope(self, **kwArgs):
if (len(kwArgs) == 0):
raise TypeError("Can't push a scope unless we modify some vars")
self.mScope = StmtDef([self.mScope], **kwArgs)
self.mNodeMap[self.mScope.mId] = self.mScope
def pushBranchScope(self, prefix, cond, fromScope):
self.mScope = StmtBranch(fromScope, cond, prefix)
self.mNodeMap[self.mScope.mId] = self.mScope
def pushJoinScope(self, left, right, split):
self.mScope = StmtJoin([left, right], split)
self.mNodeMap[self.mScope.mId] = self.mScope
def popScope(self):
# Can only pop Def scopes (related to Let exprs)
assert len(self.mScope.mParents) == 1 and\
isinstance(self.mScope, StmtDef)
res = self.mScope
self.mScope = self.mScope.mParents[0]
return res
def lookupNode(self, id):
try:
return self.mNodeMap[id]
except KeyError, e:
print self.mNodeMap
raise e
def lookup(self, name):
defNode = self.mScope.lookupDef(name)
if (defNode):
return (defNode.ssa(name), defNode.mSort[name])
else:
return (name, None)
def scopeMarker(self):
return self.mScope
def extract_one(self, node, name, sort, emitted):
if (node, name) in emitted:
return []
ssaName = node.ssa(name)
defn = node.mDef[name]
ctx = self.mCtx
asserts = []
if (isinstance(defn, set)):
asserts.extend(reduce(
lambda acc, nd: acc + self.extract_one(nd, name, sort,
emitted),
defn, []))
baseDef = [x for x in defn if len(x.cond(self.mRoot)) == 0]
assert len(baseDef) == 1
baseDef = baseDef[0]
otherDefs = filter(lambda x: x != baseDef, defn)
z3Val = reduce(
lambda exp, d: If(And(*(d.cond(self.mRoot) + [ctx])),
Const(d.ssa(name), sort),
exp),
otherDefs,
Const(baseDef.ssa(name), sort))
else:
for (id, idSort) in z3Ids(defn):
if isInitial(id) or isUnknown(id):
# Initial values and unknowns are not defined in
# any scope
continue
unssaName, ssaId = unssa(id)
defnNode = self.lookupNode(ssaId)
asserts.extend(self.extract_one(defnNode,
unssaName, idSort, emitted))
z3Val = defn
asserts.append(Const(ssaName, sort) == z3Val)
emitted.add((node, name))
return asserts
def extract(self):
asserts = []
emitted = set()
for (name, sort) in self.arch_state():
asserts.extend(self.extract_one(self.mScope.lookupDef(name),
name, sort, emitted))
return asserts
def arch_state(self):
raise Exception("Abstract")
def embed(bil, visitor_class):
visitor = visitor_class(Context())
visit(visitor, bil)
assert len(visitor.mStack) == 0
return visitor.extract()
| gpl-3.0 | 1,772,084,412,615,283,500 | 29.498141 | 76 | 0.53998 | false | 3.630088 | false | false | false |
eguil/ENSO_metrics | scripts/driverCCollection_testMC3.py | 1 | 27073 | from cdms2 import open as CDMS2open
from os.path import join as join_path
from os import environ
from sys import exit
#from EnsoMetrics.EnsoCollectionsLib import CmipVariables, defCollection, ReferenceObservations
#from EnsoMetrics.EnsoComputeMetricsLib import ComputeCollection
from EnsoCollectionsLib import CmipVariables, defCollection, ReferenceObservations
from EnsoComputeMetricsLib import ComputeCollection
xmldir = environ['XMLDIR']
def find_xml(name, frequency, variable, project='', experiment='', ensemble='', realm=''):
list_obs = ReferenceObservations().keys()
if name in list_obs:
file_name, file_area, file_land = find_xml_obs(name, frequency, variable)
else:
file_name, file_area, file_land = find_xml_cmip(name, project, experiment, ensemble, frequency, realm, variable)
return file_name, file_area, file_land
def find_xml_cmip(model, project, experiment, ensemble, frequency, realm, variable):
file_name = join_path(xmldir, str(model) + '_' + str(project) + '_' + str(experiment) + '_' + str(ensemble) +
'_glob_' + str(frequency) + '_' + str(realm) + '.xml')
xml = CDMS2open(file_name)
listvar1 = sorted(xml.listvariables())
if variable not in listvar1:
if realm == 'O':
new_realm = 'A'
elif realm == 'A':
new_realm = 'O'
# if var is not in realm 'O' (for ocean), look for it in realm 'A' (for atmosphere)
file_name = join_path(xmldir, str(model) + '_' + str(project) + '_' + str(experiment) + '_' + str(ensemble) +
'_glob_' + str(frequency) + '_' + str(new_realm) + '.xml')
xml = CDMS2open(file_name)
listvar2 = sorted(xml.listvariables())
if variable not in listvar2:
print '\033[95m' + str().ljust(5) + "CMIP var " + str(variable) + " cannot be found (realm A and O)"\
+ '\033[0m'
print '\033[95m' + str().ljust(10) + "file_name = " + str(file_name) + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar1) + '\033[0m'
print '\033[95m' + str().ljust(10) + "AND" + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar2) + '\033[0m'
exit(1)
file_area, file_land = find_xml_fx(model, project=project, experiment=experiment, realm=new_realm)
else:
file_area, file_land = find_xml_fx(model, project=project, experiment=experiment, realm=realm)
return file_name, file_area, file_land
def find_xml_fx(name, project='', experiment='', realm=''):
list_obs = ReferenceObservations().keys()
if name in list_obs:
file_area = join_path(xmldir, 'obs_' + str(name) + '_glob_fx_O_areacell.xml')
file_land = join_path(xmldir, 'obs_' + str(name) + '_glob_fx_O_landmask.xml')
else:
file_area = join_path(xmldir, str(name) + '_' + str(project) + '_' + str(experiment) + '_r0i0p0_glob_fx_'
+ str(realm) + '_areacell.xml')
file_land = join_path(xmldir, str(name) + '_' + str(project) + '_' + str(experiment) + '_r0i0p0_glob_fx_'
+ str(realm) + '_landmask.xml')
try: xml = CDMS2open(file_area)
except: file_area = None
try: xml = CDMS2open(file_land)
except: file_land = None
return file_area, file_land
def find_xml_obs(obs, frequency, variable):
file_name = join_path(xmldir, 'obs_' + str(obs) + '_glob_' + str(frequency) + '_O.xml')
xml = CDMS2open(file_name)
listvar1 = sorted(xml.listvariables())
if variable not in listvar1:
print '\033[95m' + str().ljust(5) + "obs var " + str(variable) + " cannot be found" + '\033[0m'
print '\033[95m' + str().ljust(10) + "file_name = " + str(file_name) + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar1) + '\033[0m'
exit(1)
file_area, file_land = find_xml_fx(obs)
return file_name, file_area, file_land
# metric collection
mc_name = 'ENSO_proc'#'EVAL_IPSL'#'ENSO_perf'#'ENSO_tel'#'MC1'#
dict_mc = defCollection(mc_name)
list_metric = sorted(dict_mc['metrics_list'].keys())
# parameters
project = 'CMIP5'
experiment = 'historical'
ensemble = 'r1i1p1'
frequency = 'mon'
realm = 'A'
# list of variables
list_variables = list()
for metric in list_metric:
listvar = dict_mc['metrics_list'][metric]['variables']
for var in listvar:
if var not in list_variables:
list_variables.append(var)
list_variables = sorted(list_variables)
print '\033[95m' + str(list_variables) + '\033[0m'
# list of observations
list_obs = list()
for metric in list_metric:
dict_var_obs = dict_mc['metrics_list'][metric]['obs_name']
for var in dict_var_obs.keys():
for obs in dict_var_obs[var]:
if obs not in list_obs:
list_obs.append(obs)
list_obs = sorted(list_obs)
if mc_name == 'MC1':
list_obs = ['Tropflux']
elif mc_name == 'ENSO_perf':
list_obs = ['ERA-Interim']#['Tropflux','GPCPv2.3']#['HadISST']#['HadISST','Tropflux','GPCPv2.3']#
elif mc_name == 'ENSO_tel':
list_obs = ['ERA-Interim']#['HadISST','GPCPv2.3']
elif mc_name == 'ENSO_proc':
list_obs = ['AVISO', 'ERA-Interim', 'Tropflux']#['Tropflux', 'ERA-Interim', 'SODA3.4.2']#['ERA-Interim', 'SODA3.4.2']#['HadISST','GPCPv2.3']
elif mc_name == 'EVAL_IPSL':
list_obs = ['ERA-Interim']#
print '\033[95m' + str(list_obs) + '\033[0m'
#
# finding file and variable name in file for each observations dataset
#
dict_obs = dict()
for obs in list_obs:
# @jiwoo: be sure to add your datasets to EnsoCollectionsLib.ReferenceObservations if needed
dict_var = ReferenceObservations(obs)['variable_name_in_file']
dict_obs[obs] = dict()
for var in list_variables:
#
# finding variable name in file
#
# @jiwoo: correct / adapt the 'varname' in
# EnsoCollectionsLib.ReferenceObservations(obs)['variable_name_in_file'][var] if it is not correct or if you
# changed a name in the xml
# I usually alias the variable names from observations and models in the xml in order to have the same name
# for sst (or any other variable) in every xml. This way I don not need to go through this function to know the
# variable name in file
try:
var_in_file = dict_var[var]['var_name']
except:
print '\033[95m' + str(var) + " is not available for " + str(obs) + " or unscripted" + '\033[0m'
else:
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
if isinstance(var_in_file, list):
list_areacell, list_files, list_landmask, list_name_area, list_name_land = \
list(), list(), list(), list(), list()
for var1 in var_in_file:
file_name, file_areacell, file_landmask = find_xml(obs, frequency, var1)
list_files.append(file_name)
list_areacell.append(file_areacell)
list_name_area.append(areacell_in_file)
list_landmask.append(file_landmask)
list_name_land.append(landmask_in_file)
else:
file_name, file_areacell, file_landmask = find_xml(obs, frequency, var_in_file)
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
dict_obs[obs][var] = {'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
# models
list_models = ['IPSL-CM5B-LR']#['CNRM-CM5']#['IPSL-CM5B-LR']#['CNRM-CM5','IPSL-CM5B-LR']#
ens = 'r1i1p1'
#
# finding file and variable name in file for each observations dataset
#
dict_metric, dict_dive = dict(), dict()
dict_var = CmipVariables()['variable_name_in_file']
for mod in list_models:
dict_mod = {mod: {}}
# ------------------------------------------------
# @jiwoo: between these dash the program is a bit ad hoc...
# it works well for me because I am looking for sst and taux on the ocean grid, and fluxes [lhf, lwr, swr, shf, thf]
# on the atmosphere grid
# if you want to use atmosphere only, do not use this or create your own way to find the equivalent between the
# variable name in the program and the variable name in the file
for var in list_variables:
#
# finding variable name in file
#
var_in_file = dict_var[var]['var_name']
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
if isinstance(var_in_file, list):
list_areacell, list_files, list_landmask, list_name_area, list_name_land = \
list(), list(), list(), list(), list()
for var1 in var_in_file:
file_name, file_areacell, file_landmask = \
find_xml(mod, frequency, var1, project=project, experiment=experiment, ensemble=ens,
realm=realm)
list_files.append(file_name)
list_areacell.append(file_areacell)
list_name_area.append(areacell_in_file)
list_landmask.append(file_landmask)
list_name_land.append(landmask_in_file)
else:
file_name, file_areacell, file_landmask = \
find_xml(mod, frequency, var_in_file, project=project, experiment=experiment, ensemble=ens,
realm=realm)
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
dict_mod[mod][var] = {'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
# dictionary needed by nsoMetrics.ComputeMetricsLib.ComputeCollection
# @jiwoo the ComputeCollection function it still on development and it does not read the observations requirement
# defined in the metric collection, i.e., defCollection(mc_name)['metrics_list']['<metric name>']['obs_name']
# so the function does not take a specific obs to compute the metric so for every obs in 'dict_obs' we must include
# every variables needed by the metric collection [lhf, lwr, swr, shf, sst, taux, thf] even if its coming from
# another dataset
dictDatasets = {'model': dict_mod, 'observations': dict_obs}
# regridding dictionary (only if you want to specify the regridding)
dict_regrid = {}
# dict_regrid = {
# 'regridding': {
# 'model_orand_obs': 2, 'regridder': 'cdms', 'regridTool': 'esmf', 'regridMethod': 'linear',
# 'newgrid_name': 'generic 1x1deg'},
# }
# Computes the metric collection
#dict_metric[mod] = ComputeCollection(mc_name, dictDatasets, user_regridding=dict_regrid, debug=False)
#dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, user_regridding=dict_regrid,
# debug=False, dive_down=True)
netcdf_path = '/Users/yannplanton/Documents/Yann/Fac/2016_2018_postdoc_LOCEAN/data/Test'
netcdf_name = 'YANN_PLANTON_' + mc_name + '_' + mod
netcdf = join_path(netcdf_path, netcdf_name)
# dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, netcdf=True, netcdf_name=netcdf, debug=True, dive_down=True)
# dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, netcdf=True, netcdf_name=netcdf, debug=True)
dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, mod, netcdf=True, netcdf_name=netcdf, debug=True)
tmp = sorted(dict_metric[mod]['value'].keys(), key=lambda v: v.upper())
for kk in tmp:
print kk.ljust(13) + ': ' + str(dict_metric[mod]['value'][kk]['metric'])
stop
# Prints the metrics values
for ii in range(3): print ''
print '\033[95m' + str().ljust(5) + str(mod) + '\033[0m'
list_metric = dict_metric[mod]['value'].keys()
for metric in list_metric:
print '\033[95m' + str().ljust(10) + str(metric) + '\033[0m'
metric_dict = dict_metric[mod]['value'][metric]['metric']
for ref in metric_dict.keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' + str(metric_dict[ref]['value'])\
+ ', error = ' + str(metric_dict[ref]['value_error']) + '\033[0m'
if 'value2' in metric_dict[ref].keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' +\
str(metric_dict[ref]['value2']) + ', error = ' + str(metric_dict[ref]['value_error2']) + '\033[0m'
if 'value3' in metric_dict[ref].keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' + \
str(metric_dict[ref]['value3']) + ', error = ' + str(metric_dict[ref]['value_error3']) + '\033[0m'
# Plot
#stop
#if ' ':
for mod in list_models:
from numpy import arange as NUMPYarange
from cdms2 import createAxis as CDMS2createAxis
from MV2 import array as MV2array
from MV2 import masked_where as MV2masked_where
from MV2 import maximum as MV2maximum
from MV2 import minimum as MV2minimum
import plot_frame as PFRAME
import plot_functions as PF
path_plot = '/Users/yannplanton/Documents/Yann/Fac/2016_2018_postdoc_LOCEAN/data/Plots'
#if ' ':
if ' ':
for metric in list_metric:
print '\033[95m' + str().ljust(10) + str(metric) + '\033[0m'
metric_dict = dict_metric[mod]['value'][metric]['metric']
# metric
dict_m1, dict_m2, dict_m3 = dict(), dict(), dict()
for ref in metric_dict.keys():
dict_m1[ref] = metric_dict[ref]['value']
if 'value2' in metric_dict[ref].keys():
dict_m2[ref] = metric_dict[ref]['value2']
if 'value3' in metric_dict[ref].keys():
dict_m3[ref] = metric_dict[ref]['value3']
# dive down
dive_model = dict_dive[mod]['value'][metric]['model']
if metric in ['EnsoPrMap', 'EnsoSstMap']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp1 = dict_dive[mod]['metadata']['metrics'][metric][ref]['axisLat']
axis1 = CDMS2createAxis(MV2array(tmp1), id='latitude')
tmp2 = dict_dive[mod]['metadata']['metrics'][metric][ref]['axisLon']
axis2 = CDMS2createAxis(MV2array(tmp2), id='longitude')
tmp_axis['ref_' + ref] = [axis1, axis2]
del axis1, axis2, tmp1, tmp2
# plot
x_axis, inc = [0, 360], 60
x_dict = dict((ii, str(ii) + 'E') if ii <= 180 else (ii, str(abs(ii-360)) + 'W') for ii in
range(x_axis[0], x_axis[1] + inc, inc))
y_axis, inc = [-60, 60], 20
y_dict = dict((ii, str(abs(ii)) + 'S') if ii < 0 else ((ii, str(ii) + 'N') if ii>0 else (ii, 'Eq')) for
ii in range(y_axis[0], y_axis[1] + inc, inc))
dom = (y_axis[0], y_axis[1], x_axis[0], x_axis[1])
if metric in ['EnsoPrMap']:
label_col = MV2array(range(-3, 3 + 1, 1))
elif metric in ['EnsoSstMap']:
label_col = MV2array([round(ii, 1) for ii in NUMPYarange(-1.2, 1.2 + 0.4, 0.4)])
for ref in dict_m1.keys():
tab1 = MV2array(dive_model)
tab1.setAxisList(tmp_axis[ref])
m1 = 'Metric 1: ' + str("%.2f" % round(dict_m1[ref], 2))
m2 = 'Metric 2: ' + str("%.2f" % round(dict_m2[ref], 2))
m3 = 'Metric 3: ' + str("%.2f" % round(dict_m3[ref], 2))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList(tmp_axis[ref])
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
name = metric + ' in Historical (' + mod + ')'
name_png = path_plot + '/' + metric + '_' + mod
PFRAME.plot_my_map(tab1, label_col, dom, white_zero=0, x_dico=x_dict, y_dico=y_dict, name=name,
path_plus_name_png=name_png, bg=1)
name = metric + ' in Historical (' + ref + ')'
name_png = path_plot + '/' + metric + '_' + ref
PFRAME.plot_my_map(tab2, label_col, dom, white_zero=0, x_dico=x_dict, y_dico=y_dict, name=name,
path_plus_name_png=name_png, bg=1)
del m1, m2, m3, name, name_png, tab1, tab2
elif metric in ['EnsoPrJjaTel', 'EnsoPrNdjTel']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp_axis['ref_' + ref] = dict_dive[mod]['metadata']['metrics'][metric][ref]['axis']
# plot
y_axis, inc = [-2.0, 6.2], 0.2
y_dict = dict((round(elt, 1), "{0:.1f}".format(round(elt, 1)))
if (round(elt, 1) * 10) % round(5 * round(inc, 1) * 10, 1) == 0
else (round(elt, 1), '') for elt in NUMPYarange(y_axis[0], y_axis[1] + inc, inc))
for ref in dict_m1.keys():
axis = CDMS2createAxis(MV2array(range(len(tmp_axis[ref])), dtype='int32'), id='regions')
x_dict = dict((elt, tmp_axis[ref][elt]) for elt in range(len(tmp_axis[ref])))
x_axis = [-1.0, len(tmp_axis[ref])]
tab1 = MV2array(dive_model)
tab1.setAxisList([axis])
m1 = 'Metric 1: ' + str("%.2f" % round(dict_m1[ref], 2))
m2 = 'Metric 2: ' + str("%.1f" % round(dict_m2[ref]*100, 1))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList([axis])
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
list_curve = [tab1, tab2]
list_col = ['black', 'red']
# strings to write
l_w = [m1, m2]
l_w_xy = [[97, 100 - (ii + 1) * 6] for ii in range(len(l_w))]
l_w_si = [30 for ii in range(len(l_w))]
l_w_ha = ['right' for ii in range(len(l_w))]
# lines to plot
lines_y1y2 = [[round(ii, 1), round(ii, 1)] for ii in y_dict.keys() if y_dict[ii] != '' and
round(ii, 1) != 0 and round(ii, 1) not in y_axis]
lines_x1x2 = [x_axis for ii in range(len(lines_y1y2))]
lines_colo = ['grey' for ii in range(len(lines_y1y2))]
name = metric + ' metric in Historical (' + mod + ')'
yname = 'El Nino (PR) minus La Nina (PR)'
name_png = path_plot + '/' + metric + '_' + mod + '_ ' + ref
PFRAME.curves_plot(list_curve, list_col=list_col, x_axis=x_axis, x_dico=x_dict, y_axis=y_axis,
y_dico=y_dict, name_in_xlabel=True, name=name, xname='', yname=yname,
list_writings=l_w, list_writings_pos_xy=l_w_xy, list_writings_size=l_w_si,
list_writings_halign=l_w_ha, plot_lines=True, lines_x1x2=lines_x1x2,
lines_y1y2=lines_y1y2, lines_color=lines_colo, path_plus_name_png=name_png,
draw_white_background=True, save_ps=False, bg=1)
del l_w, l_w_ha, l_w_si, l_w_xy, lines_colo, lines_x1x2, lines_y1y2, list_curve, m1,\
m2, name, name_png, yname
elif metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'SeasonalSstLonRmse', 'NinaSstTsRmse',
'NinoSstTsRmse']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp_axis['ref_' + ref] = dict_dive[mod]['metadata']['metrics'][metric][ref]['axis']
# plot
for ref in dict_m1.keys():
axis = CDMS2createAxis(MV2array(tmp_axis[ref], dtype='float32'), id='axis')
tab1 = MV2array(dive_model)
tab1.setAxisList([axis])
tab1 = MV2masked_where(tab1>=1e20, tab1)
if metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'NinoSstLonRmse', 'SeasonalSstLonRmse']:
inc = 30
if min(axis[:])<0:
x_axis = [-250, -70]
tmp = [x_axis[0]+10, x_axis[1]-10]
x_dict = dict((ii, str(ii + 360) + 'E') if ii < -180 else (ii, str(abs(ii)) + 'W') for ii in
range(tmp[0], tmp[1] + inc, inc))
else:
x_axis = [110, 290]
tmp = [x_axis[0] + 10, x_axis[1] - 10]
x_dict = dict((ii, str(ii) + 'E') if ii < 180 else (ii, str(abs(ii - 360)) + 'W') for ii in
range(tmp[0], tmp[1] + inc, inc))
elif metric in ['NinoSstTsRmse']:
x_axis, inc = [-1, len(axis)], 1
tmp = ['M', 'J', 'S', 'D']
x_dict = dict((ii, tmp[(((ii + 1) / 3) % 4) - 1]) if (ii + 1) % 3 == 0 else (ii, '') for ii in
range(x_axis[0], x_axis[1] + inc, inc))
m1 = 'Metric: ' + str("%.2f" % round(dict_m1[ref], 2))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList([axis])
tab2 = MV2masked_where(tab2 >= 1e20, tab2)
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
y_axis, y_dict = PF.create_dico([min(MV2minimum(tab1),MV2minimum(tab2)),
max(MV2maximum(tab1),MV2maximum(tab2))])
list_curve = [tab1, tab2]
list_col = ['black', 'red']
# strings to write
l_w = [m1]
l_w_xy = [[97, 100 - (ii + 1) * 6] for ii in range(len(l_w))]
l_w_si = [30 for ii in range(len(l_w))]
l_w_ha = ['right' for ii in range(len(l_w))]
# lines to plot
lines_y1y2 = [[round(ii, 1), round(ii, 1)] for ii in y_dict.keys() if y_dict[ii] != '' and
round(ii, 1) != 0 and round(ii, 1) not in y_axis]
lines_x1x2 = [x_axis for ii in range(len(lines_y1y2))]
if metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'NinoSstLonRmse', 'SeasonalSstLonRmse']:
xname = 'longitude'
lines_x1x2 = lines_x1x2 + [[ii, ii] for ii in x_dict.keys() if x_dict[ii] != '' and ii != 0
and ii not in x_axis]
lines_y1y2 = lines_y1y2 + [y_axis for ii in x_dict.keys() if x_dict[ii] != '' and ii != 0
and ii not in x_axis]
elif metric in ['NinoSstTsRmse']:
xname = 'time'
lines_x1x2 = lines_x1x2 + [[ii, ii] for ii in x_dict.keys() if (ii + 1) % 12 == 0 and ii != 0
and ii not in x_axis]
lines_y1y2 = lines_y1y2 + [y_axis for ii in x_dict.keys() if (ii + 1) % 12 and ii != 0
and ii not in x_axis]
lines_colo = ['grey' for ii in range(len(lines_y1y2))]
name = metric + ' metric (' + mod + ')'
print metric, mod, ref
name_png = path_plot + '/' + metric + '_' + mod + '_ ' + ref
if metric in ['NinoSstLonRmse', 'NinoSstTsRmse', 'SeasonalSstLonRmse']:
yname = 'SSTA (degC)'
elif metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse']:
yname = 'SST (degC)'
PFRAME.curves_plot(list_curve, list_col=list_col, x_axis=x_axis, x_dico=x_dict, y_axis=y_axis,
y_dico=y_dict, name_in_xlabel=False, name=name, xname=xname, yname=yname,
list_writings=l_w, list_writings_pos_xy=l_w_xy, list_writings_size=l_w_si,
list_writings_halign=l_w_ha, plot_lines=True, lines_x1x2=lines_x1x2,
lines_y1y2=lines_y1y2, lines_color=lines_colo, path_plus_name_png=name_png,
draw_white_background=True, save_ps=False, bg=1)
del l_w, l_w_ha, l_w_si, l_w_xy, lines_colo, lines_x1x2, lines_y1y2, list_curve, m1,\
name, name_png, xname, yname
| bsd-3-clause | -6,984,128,249,758,549,000 | 57.221505 | 144 | 0.518635 | false | 3.342346 | false | false | false |
sglumac/pyislands | pyislands/evolution.py | 1 | 1937 | from itertools import islice
def evolution(island):
'''
Infinite generator for evolution of some population.
This generator yields population:
population - tuple together containing tuples/individuals
population_0 = create()
population_1 = evolve(population_0, info_0)
.
.
population_k = evolve(population_k-1, info_k-1)
.
.
Since population is a tuple/an immutable type, a population cannot be
influenced by outside functions. Population can be used only to gather
statistics
If no immigration and emmigration is used this island evolution
becomes a classical genetic algorithm.
'''
population = island.create_population()
while True:
for _ in range(island.migration_interval if island.migrate else 1):
yield population
# Immigration - Outside individuals are inhabiting an island
if island.assimilate:
population = island.assimilate(population)
# Evolution - Each island population is evolved into the next generation
population = island.evolve(population)
# Emmigration - Sends individuals (clones) from one population onto voyage
if island.migrate:
island.migrate(population)
def finite_evolution(num_iterations, island):
''' Same as evolution, except stopped after num_iterations '''
return islice(evolution(island), num_iterations)
def stagnation_evolution(max_stagnation, island):
''' Same as evolution, except stopped after max_stagnation '''
infinite_evolution = evolution(island)
population = next(infinite_evolution)
best = min(population)
stagnation = 0
while stagnation < max_stagnation:
stagnation += 1
yield population
population = next(infinite_evolution)
current_best = min(population)
if current_best < best:
stagnation = 0
best = current_best
| mit | 3,788,803,665,080,810,000 | 27.910448 | 75 | 0.681466 | false | 4.018672 | false | false | false |
pekin0609/- | agent/cognitive/interpreter.py | 1 | 27170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interpreter.py
=====
This module contains the class `NetworkBuilder` and `AgentkBuilder` which interprets
the contents of BriCA language files.
"""
# BriCA Language Interpreter for V1 (Interpreter version 1)
# Originally licenced for WBAI (wbai.jp) under the Apache License (?)
# Created: 2016-01-31
# TODO: import, subports
import json
import os
import sys
import brica1
import logging
from config.log import APP_KEY
app_logger = logging.getLogger(APP_KEY)
debug = False # True
class NetworkBuilder:
"""
The BriCA language interpreter.
- reads BriCA language files.
"""
unit_dic = {} # Map: BriCA unit name ⇒ unit object
super_modules = {} # Super modules
# base_name_space="" # Base Name Space
module_dictionary = {}
sub_modules = {}
__ports = {}
__connections = {}
__comments = {}
__network = {}
__super_sub_modules = {} # Super & Sub modules
__load_files = []
def __init__(self):
"""
NetworkBuilder Create a new `NetworkBuilder` instance.
Args:
None.
Returns:
NetworkBuilder: a new `NetworkBuilder` instance.
"""
unit_dic = {}
module_dictionary = {}
super_modules = {}
sub_modules = {}
__ports = {}
__connections = {}
__comments = {}
__load_files = []
def load_file(self, file_object):
"""
Load a BriCA language json file.
Args:
A file object
Returns:
success:True, failure:False
"""
self.__load_files.append(os.path.abspath(file_object.name))
dir_name = os.path.dirname(file_object.name)
try:
jsn = json.load(file_object)
except:
app_logger.error("File could not be read!")
return False
if "Header" not in jsn:
app_logger.error("Header must be specified!")
return False
header = jsn["Header"]
if "Import" in header:
import_files = header["Import"]
for import_file in import_files:
if "/" != import_file[0]: # not full path
import_file = dir_name + "/" + import_file
if not os.path.isfile(import_file):
app_logger.error("JSON file {} not found!".format(import_file))
return False
if os.path.abspath(import_file) in self.__load_files:
app_logger.error("Import file {} has been read!".format(import_file))
continue
f = open(import_file)
if self.load_file(f) is False:
return False
if "Name" not in header:
app_logger.error("Header name must be specified!")
return False
if "Base" not in header:
app_logger.error("Base name space must be specified!")
return False
self.base_name_space = header["Base"].strip()
if "Type" not in header:
app_logger.error("Type must be specified!")
return False
self.__type = header["Type"]
if "Comment" in header:
self.__comments["Header." + header["Name"]] = header["Comment"]
if self.__set_modules(jsn) is False:
return False
if self.__set_ports(jsn) is False:
return False
if self.__set_connections(jsn) is False:
return False
return True
def get_network(self):
"""
Args:
None
return:
the network created by load_file(self, file_object)
"""
return {"ModuleDictionary": self.module_dictionary, "SuperModules": self.super_modules,
"SubModules": self.sub_modules, "Ports": self.__ports, "Connections": self.__connections,
"Comments": self.__comments}
def check_consistency(self):
"""
Args:
None
return:
true iff no fatal inconsistency in the network
function:
see the consistency check section below.
"""
for module_name in self.module_dictionary:
if module_name not in self.unit_dic:
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating {}.".format(module_name))
self.unit_dic[module_name] = brica1.Module() # New Module instance
# SuperModules of consistency check
for module, superModule in self.super_modules.items():
if superModule not in self.module_dictionary:
app_logger.error("Super Module {} is not defined!".format(superModule))
return False
# Loop check
if self.__loop_check(superModule, module):
app_logger.error("Loop detected while trying to add {} as a subunit to {}!".format(module, superModule))
return False
# SubModules of consistency check
for superModule, subModules in self.sub_modules.items():
for subModule in subModules:
if subModule not in self.module_dictionary:
app_logger.error("Sub Module {} is not defined!".format(subModule))
return False
# Loop check
if self.__loop_check(superModule, subModule):
app_logger.error("Loop detected while trying to add {} as a subunit to {}!".format(
superModule, subModule))
return False
# Port of consistency check
for module_name in self.module_dictionary:
ports = self.module_dictionary[module_name]["Ports"]
if len(ports) == 0:
app_logger.error("The specified module {} does not have the port!".format(module_name))
return False
for port in ports:
if not module_name + "." + port in self.__ports:
app_logger.error("The specified module {} does not have the port!".format(module_name))
return False
for port_name, v in self.__ports.items():
# Fatal if the specified modules have not been defined.
if "Module" not in v:
app_logger.error("Module is not defined in the port {}!".format(port_name))
return False
module_name = v["Module"]
if module_name not in self.module_dictionary:
app_logger.error("Specified module {} is not defined in the port {}!".format(module_name, port_name))
return False
# Fatal if the shape has not been defined.
if "Shape" not in v:
app_logger.error("Shape is not defined in the port {}!".format(port_name))
return False
length = v["Shape"]
if length < 1:
app_logger.error("Incorrect length of Shape for the port {}!".format(port_name))
return False
# Fatal if the specified modules do not have the port, abort with a message.
module = self.module_dictionary[module_name]
pv = port_name.split(".")
last_port_name = pv[len(pv) - 1]
if last_port_name not in module["Ports"]:
app_logger.error("Port {} is not defined in the module {}!".format(last_port_name, module_name))
return False
module = self.unit_dic[module_name]
if v["IO"] == "Input":
module.make_in_port(last_port_name, length)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating an input port {} (length {}) to {}.".format(
last_port_name, length, module_name))
elif v["IO"] == "Output":
module.make_out_port(last_port_name, length)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating an output port {} (length {}) to {}.".format(
last_port_name, length, module_name))
# Connection of consistency check
for k, v in self.__connections.items():
# Fatal if the specified ports have not been defined.
if not v[0] in self.__ports:
app_logger.error("The specified port {} is not defined in connection {}.".format(v[0], k))
return False
if not v[1] in self.__ports:
app_logger.error("The specified port {} is not defined in connection {}.".format(v[1], k))
return False
tp = v[0].split(".")
to_port = tp[len(tp) - 1]
fp = v[1].split(".")
from_port = fp[len(fp) - 1]
to_unit = self.__ports[v[0]]["Module"]
from_unit = self.__ports[v[1]]["Module"]
# if from_unit & to_unit belong to the same level
if ((from_unit not in self.__super_sub_modules) and (to_unit not in self.__super_sub_modules)) or \
(from_unit in self.__super_sub_modules and to_unit in self.__super_sub_modules and (
self.__super_sub_modules[from_unit] == self.__super_sub_modules[to_unit])):
try:
fr_port_obj = self.unit_dic[from_unit].get_out_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_in_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection
brica1.connect((self.unit_dic[from_unit], from_port), (self.unit_dic[to_unit], to_port))
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection from {} of {} to {} of {}".format(
from_port, from_unit, to_port, to_unit))
except:
app_logger.error("adding a connection from {} to {} on the same level"
" but not from an output port to an input port!".format(from_unit, to_unit))
return False
# else if from_unit is the direct super module of the to_unit
elif to_unit in self.__super_sub_modules and self.__super_sub_modules[to_unit] == from_unit:
try:
fr_port_obj = self.unit_dic[from_unit].get_in_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_in_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection (alias)
self.unit_dic[to_unit].alias_in_port(self.unit_dic[from_unit], from_port, to_port)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection (alias) from {} of {} to {} of {}.".format(
from_port, from_unit, to_port, to_unit
))
except:
app_logger.error("Error adding a connection from the super module {} to {} "
"but not from an input port to an input port!".format(from_unit, to_unit))
return False
# else if to_unit is the direct super module of the from_unit
elif from_unit in self.__super_sub_modules and self.__super_sub_modules[from_unit] == to_unit:
try:
fr_port_obj = self.unit_dic[from_unit].get_out_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_out_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection (alias)
self.unit_dic[from_unit].alias_out_port(self.unit_dic[to_unit], to_port, from_port)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection (alias) from {} of {} to {} of {}.".format(
from_port, from_unit, to_port, to_unit
))
except:
app_logger.error("Error adding a connection from {} to its super module {} "
"but not from an output port to an output port!".format(from_unit, to_unit))
return False
# else connection level error!
else:
app_logger.error("Trying to add a connection between units {} and {} in a remote level!".format(
from_unit, to_unit
))
return False
return True
def check_grounding(self):
"""
Args:
None
return:
true iff the network is grounded, i.e., every module at the bottom of the hierarchy
has a component specification.
"""
for module_name, v in self.module_dictionary.items():
implclass = v["ImplClass"]
if implclass != "":
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Use the existing ImplClass {} for {}.".format(implclass, module_name))
try:
component_instance = eval(implclass + '()') # New ImplClass instance
except:
v = implclass.rsplit(".", 1)
mod_name = v[0]
class_name = v[1]
try:
mod = __import__(mod_name, globals(), locals(), [class_name], -1)
Klass = getattr(mod, class_name)
component_instance = Klass()
except:
app_logger.error("Module {} at the bottom not grounded as a Component!".format(module_name))
return False
try:
module = self.unit_dic[module_name]
module.add_component(module_name, component_instance)
for port in module.in_ports:
length = module.get_in_port(port).buffer.shape[0]
component_instance.make_in_port(port, length)
component_instance.alias_in_port(module, port, port)
for port in module.out_ports:
length = module.get_out_port(port).buffer.shape[0]
component_instance.make_out_port(port, length)
component_instance.alias_out_port(module, port, port)
except:
app_logger.error("Module {} at the bottom not grounded as a Component!".format(module_name))
return False
return True
def __set_modules(self, jsn):
""" Add modules from the JSON description
Args:
None
Returns:
None
"""
if "Modules" in jsn:
modules = jsn["Modules"]
for module in modules:
if self.__set_a_module(module) is False:
return False
else:
app_logger.warning("No Modules in the language file.")
return True
def __set_a_module(self, module):
if "Name" not in module:
app_logger.error("Module name must be specified!")
return False
module_name = module["Name"].strip()
if module_name == "":
app_logger.error("Module name must be specified!")
return False
module_name = self.__prefix_base_name_space(module_name) # Prefixing the base name space
defined_module = None
if module_name in self.module_dictionary:
defined_module = self.module_dictionary[module_name]
ports = []
if "Ports" in module:
ports = module["Ports"]
# Multiple registration
if defined_module:
for p in defined_module["Ports"]:
if p not in ports:
ports.append(p)
implclass = ""
if "ImplClass" in module:
# if an implementation class is specified
implclass = module["ImplClass"].strip()
elif self.__type == "C":
app_logger.error("ImplClass is necessary if the type C in the module {}!".format(module_name))
return False
# Multiple registration
if defined_module:
if implclass == "":
implclass = defined_module["ImplClass"]
else:
if defined_module["ImplClass"] != "":
app_logger.warning("ImplClass {} of {} is replaced with {}.".format(
defined_module["ImplClass"], module_name, implclass))
self.module_dictionary[module_name] = {"Ports": ports, "ImplClass": implclass}
supermodule = ""
if "SuperModule" in module:
supermodule = module["SuperModule"].strip()
supermodule = self.__prefix_base_name_space(supermodule)
if supermodule != "":
# Multiple registration
if module_name in self.super_modules:
app_logger.warning("Super module {} of {} is replaced with {}.".format(
self.super_modules[module_name], module_name, supermodule))
self.super_modules[module_name] = supermodule
self.__super_sub_modules[module_name] = supermodule
if "SubModules" in module:
for submodule in module["SubModules"]:
if submodule != "":
submodule = self.__prefix_base_name_space(submodule)
if module_name not in self.sub_modules:
self.sub_modules[module_name] = []
self.sub_modules[module_name].append(submodule)
self.__super_sub_modules[submodule] = module_name
if "Comment" in module:
self.__comments["Modules." + module_name] = module["Comment"]
return True
def __prefix_base_name_space(self, name):
if name.find(".") < 0:
return self.base_name_space + "." + name
else:
return name
def __loop_check(self, superunit, subunit):
if superunit == subunit:
return True
val = superunit
while val in self.__super_sub_modules:
val = self.__super_sub_modules[val]
if val == subunit:
return True
return False
def __set_ports(self, jsn):
""" Add ports from the JSON description
Args:
None
Returns:
None
"""
if "Ports" in jsn:
ports = jsn["Ports"]
for port in ports:
if self.__set_a_port(port) is False:
return False
else:
app_logger.warning("No Ports in the language file.")
return True
def __set_a_port(self, port):
if "Name" in port:
port_name = port["Name"].strip()
else:
app_logger.error("Name not specified while adding a port!")
return False
if "Module" in port:
port_module = port["Module"].strip()
port_module = self.__prefix_base_name_space(port_module)
else:
app_logger.error("Module not specified while adding a port!")
return False
port_name = port_module + "." + port_name
defined_port = None
if port_name in self.__ports:
defined_port = self.__ports[port_name]
# Multiple registration
if defined_port:
if port_module != defined_port["Module"]:
app_logger.error("Module {} defined in the port {} is already defined as a module {}.".format(
port_module, port_name, self.__ports[port_name]["Module"]))
return False
if "Type" in port:
port_type = port["Type"].strip()
if port_type != "Input" and port_type != "Output":
app_logger.error("Invalid port type {}!".format(port_type))
return False
elif defined_port and port_type != defined_port["IO"]:
app_logger.error("The port type of port {} differs from previously defined port type!".format(
port_name))
return False
else:
app_logger.error("Type not specified while adding a port!")
return False
if "Shape" in port:
shape = port["Shape"]
if len(shape) != 1:
app_logger.error("Shape supports only one-dimensional vector!")
return False
if not isinstance(shape[0], int):
app_logger.error("The value of the port is not a number!")
return False
if int(shape[0]) < 1:
app_logger.error("Port dimension < 1!")
return False
self.__ports[port_name] = {"IO": port_type, "Module": port_module, "Shape": shape[0]}
else:
self.__ports[port_name] = {"IO": port_type, "Module": port_module}
if "Comment" in port:
self.__comments["Ports." + port_name] = port["Comment"]
return True
def __set_connections(self, jsn):
""" Add connections from the JSON description
Args:
None
Returns:
None
"""
if "Connections" in jsn:
connections = jsn["Connections"]
for connection in connections:
if self.__set_a_connection(connection) is False:
return False
else:
if self.__type != "C":
app_logger.warning("No Connections in the language file.")
return True
def __set_a_connection(self, connection):
if "Name" in connection:
connection_name = connection["Name"]
else:
app_logger.error("Name not specified while adding a connection!")
return False
defined_connection = None
if connection_name in self.__connections:
defined_connection = self.__connections[connection_name]
if "FromModule" in connection:
from_unit = connection["FromModule"]
from_unit = self.__prefix_base_name_space(from_unit)
else:
app_logger.error("FromModule not specified while adding a connection!")
return False
if "FromPort" in connection:
from_port = connection["FromPort"]
else:
app_logger.error("FromPort not specified while adding a connection!")
return False
if "ToModule" in connection:
to_unit = connection["ToModule"]
to_unit = self.__prefix_base_name_space(to_unit)
else:
app_logger.error("ToModule not specified while adding a connection!")
return False
if "ToPort" in connection:
to_port = connection["ToPort"]
else:
app_logger.error("ToPort not specified while adding a connection!")
return False
# Multiple registration
if defined_connection and defined_connection[0] != to_unit + "." + to_port:
app_logger.error("Defined port {}.{} is different from the previous ones in connection {}!".format(
to_unit, to_port, connection_name))
return False
if defined_connection and defined_connection[1] != from_unit + "." + from_port:
app_logger.error("Defined port {}.{} is different from the previous ones in connection {}!".format(
from_unit, from_port, connection_name))
return False
if "Comment" in connection:
self.__comments["Connections." + connection_name] = connection["Comment"]
self.__connections[connection_name] = (to_unit + "." + to_port, from_unit + "." + from_port)
return True
class AgentBuilder:
"""
The BriCA language interpreter.
- creates a BriCA agent based on the file contents.
"""
def __init__(self):
self.INCONSISTENT = 1
self.NOT_GROUNDED = 2
self.COMPONENT_NOT_FOUND = 3
self.unit_dic = None
'''
def create_agent(self, scheduler, network):
if network.check_consistency() == False:
return self.INCONSISTENT
if network.check_grounding() == False:
return self.NOT_GROUNDED
for module, super_module in network.super_modules.items():
if super_module in network.module_dictionary:
network.unit_dic[super_module].add_submodule(module, network.unit_dic[module])
if debug:
print "Adding a module " + module + " to " + super_module + "."
# Main logic
top_module = brica1.Module()
for unit_key in network.unit_dic.keys():
if not unit_key in network.super_modules:
if isinstance(network.unit_dic[unit_key], brica1.Module):
top_module.add_submodule(unit_key, network.unit_dic[unit_key])
if debug:
print "Adding a module " + unit_key + " to a BriCA agent."
agent = brica1.Agent(scheduler)
agent.add_submodule("__Runtime_Top_Module", top_module)
self.unit_dic = network.unit_dic
return agent
'''
def create_agent(self, network):
if network.check_consistency() is False:
return self.INCONSISTENT
if network.check_grounding() is False:
return self.NOT_GROUNDED
for module, super_module in network.super_modules.items():
if super_module in network.module_dictionary:
network.unit_dic[super_module].add_submodule(module, network.unit_dic[module])
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Adding a module {} to {}.".format(module, super_module))
# Main logic
top_module = brica1.Module()
for unit_key in network.unit_dic.keys():
if unit_key not in network.super_modules:
if isinstance(network.unit_dic[unit_key], brica1.Module):
top_module.add_submodule(unit_key, network.unit_dic[unit_key])
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Adding a module {} to a BriCA agent.".format(unit_key))
# agent = brica1.Agent(scheduler)
agent = brica1.Agent()
agent.add_submodule("__Runtime_Top_Module", top_module)
self.unit_dic = network.unit_dic
return agent
def get_modules(self):
return self.unit_dic
| apache-2.0 | 1,350,167,149,328,328,400 | 39.549254 | 120 | 0.536109 | false | 4.38618 | false | false | false |
rossgoodwin/bizarromoma | bot.py | 1 | 5642 | import os
import time
import json
import string
from collections import defaultdict, Counter
from random import random
import tweepy
class TwitterAPI:
"""
Class for accessing the Twitter API.
Requires API credentials to be available in environment
variables. These will be set appropriately if the bot was created
with init.sh included with the heroku-twitterbot-starter
"""
def __init__(self):
consumer_key = "ZyyYUZVcGfbMBa644Ey77Tu5b"
consumer_secret = "FgL9UAXDin6YQwR1ILqMdE8aCLG9wPkhKDm8wJibyNnWLem2kc"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = "3299819928-mYYqdXnQmZTURU9iXaalXDq7BGnCESNfe7MGUJE"
access_token_secret = "1pkxjxkpIPQCnAM0zEttaCHKezdlW5Co3x5B2KY1j40qI"
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def tweet(self, message):
"""Send a tweet"""
self.api.update_status(status=message)
def train_char_lm(fname, order=4):
# data = file(fname).read()
data = fname
lm = defaultdict(Counter)
pad = "~" * order
data = pad + data
for i in xrange(len(data)-order):
history, char = data[i:i+order], data[i+order]
lm[history][char]+=1
def normalize(counter):
s = float(sum(counter.values()))
return [(c,cnt/s) for c,cnt in counter.iteritems()]
outlm = {hist:normalize(chars) for hist, chars in lm.iteritems()}
return outlm
def generate_letter(lm, history, order):
history = history[-order:]
dist = lm[history]
x = random()
for c,v in dist:
x = x - v
if x <= 0: return c
def generate_text(lm, order, nletters=5000):
history = "~" * order
out = []
for i in xrange(nletters):
c = generate_letter(lm, history, order)
history = history[-order:] + c
out.append(c)
return "".join(out)
# In[148]:
def fix_unmatched(l):
unmatched_locs = []
unmatched_locs_rev = []
def error(c, column_number):
# print 'Error: unmatched', c, 'column', column_number
if c in [')', ']', '}']:
unmatched_locs.append(column_number)
else:
unmatched_locs_rev.append(column_number)
def check(stack, wanted, c, column_number):
if stack:
if stack[-1] != wanted:
error(c, column_number)
else:
stack.pop()
else:
error(c, column_number)
def check_parentheses(line):
stack = list()
column_number = 0
for c in line:
if c == '(' or c == '[' or c == '{':
stack.append(c)
elif c == ')':
check(stack, '(', ')', column_number)
elif c == ']':
check(stack, '[', ']', column_number)
elif c == '}':
check(stack, '{', '}', column_number)
column_number += 1
def check_parentheses_rev(line):
stack = list()
column_number = 0
for c in line:
column_number += 1
if c == ')' or c == ']' or c == '}':
stack.append(c)
elif c == '(':
check(stack, ')', '(', column_number)
elif c == '[':
check(stack, ']', '[', column_number)
elif c == '{':
check(stack, '}', '{', column_number)
check_parentheses(l)
lchars = list(l)
newTitle = ''.join([i for j, i in enumerate(lchars) if j not in unmatched_locs])
check_parentheses_rev(newTitle[::-1])
real_unmatched_rev = map(lambda i: len(newTitle)-i, unmatched_locs_rev)
titChars = list(newTitle)
newTitle = ''.join([i for j, i in enumerate(titChars) if j not in real_unmatched_rev])
numDoubleQuotes = newTitle.count('\"')
if numDoubleQuotes % 2:
newTitle = string.replace(newTitle, '\"', '', 1)
numSingleQuotes = newTitle.count("\'")
if numSingleQuotes % 2:
newTitle = string.replace(newTitle, "\'", "", 1)
return newTitle
def main():
generatedTexts = map(lambda lm: generate_text(lm, 7), lms)
entry_candidates = map(lambda x: x.split('\n'), generatedTexts)
def remove_plagiarized(i):
plagiarized = set(entry_candidates[i]) & set(data[i])
keepers = map(fix_unmatched, list(set(entry_candidates[i]) - plagiarized))
return keepers
entries = map(remove_plagiarized, range(len(data)))
invented_art = zip(*entries)
def unpack(tup):
t, a, m = tup
outstr = "%s\n%s\n%s" % (t, a, m)
return outstr
output = filter(lambda x: len(x) <= 140, map(unpack, invented_art))
return output
fileObj = open('artworks.json', 'r')
art = json.load(fileObj)[:75000]
fileObj.close()
print "Artwork list loaded..."
titles = map(lambda d: d['title'], art)
artists = map(lambda d: d['artist'], art)
media = map(lambda d: d['medium'], art)
print "Got titles, artists, media..."
# dimensions = map(lambda d: d['dimensions'], art)
data = [titles, artists, media]
lms = map(lambda l: train_char_lm('\n'.join(l), order=7), data)
print "Got language models..."
if __name__ == "__main__":
twitter = TwitterAPI()
while True:
toTweet = main()
print "Got toTweet list..."
while toTweet:
curTweet = toTweet.pop()
print "Posting tweet..."
twitter.tweet(curTweet)
print "...tweet posted!"
time.sleep(120)
| gpl-3.0 | 573,573,843,229,703,900 | 28.694737 | 90 | 0.55654 | false | 3.457108 | false | false | false |
eugeneks/zmeyka | zmeyka_auth/user_auth.py | 1 | 3511 |
import datetime
from datetime import datetime
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey, UniqueConstraint
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
try:
from db_conf import dbtype, user, password, host, port, db
except:
from zmeyka_auth.db_conf import dbtype, user, password, host, port, db
try:
from alexey_logging import write_to_log
except:
from zmeyka_auth.alexey_logging import write_to_log
url = '{}://{}:{}@{}:{}/{}'.format(dbtype, user, password, host, port, db)
engine = create_engine(url)
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
#write_to_log(2)
##################################################
class Zmeyka_User_DB(Base):
__tablename__ = 'zmeyka_users'
id = Column(String(300), primary_key=True)
password = Column(String(300), nullable=False)
info = Column(String(300))
created_time = Column(DateTime)
last_login_time = Column(DateTime)
def __init__(self, id, password, info = None):
self.id = id
self.password = password
self.info = info
self.created_time = datetime.now()
def user_ping(self):
self.last_login_time = datetime.now()
def __repr__(self):
return '<Zmeyka_User_DB {} {} {} {} >'.format(self.id, '******', self.info, self.created_time, self.last_login_time)
#####################################################
class Zmeyka_User(object):
# proxy for a database of users
#user_database = {"admin": ("admin", "admin"),
# "Alexey": ("Alexey", "Alexey_password")}
def __init__(self, username, password=''):
self.id = username
self.password = password
def get(self):
try:
#if Zmeyka_User.user_database[self.id][1] == self.password:
if Zmeyka_User_DB.query.filter(Zmeyka_User_DB.id=='admin').first().password == self.password:
return self
else:
return None
except:
return None
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % (self.id)
##################### create_schema ##################
def create_schema():
try:
Base.metadata.create_all(bind=engine)
return 'OK'
except Exception as exception_desc:
db_session.rollback()
#print (exception_desc)
write_to_log(exception_desc)
return exception_desc
##################### drop_schema ##################
def drop_schema():
try:
Base.metadata.drop_all(bind=engine)
return 'OK'
except Exception as exception_desc:
db_session.rollback()
#print (exception_desc)
write_to_log(exception_desc)
return exception_desc
##################### clear_all ##################
def clear_all():
drop_result = drop_schema()
if drop_result == 'OK':
create_result = create_schema()
return create_result
else:
return drop_result
##################### MAIN ####################
if __name__ == "__main__":
pass
#Base.metadata.create_all(bind=engine)
#create_schema() | mit | -7,564,987,325,090,481,000 | 24.635036 | 124 | 0.569638 | false | 3.779333 | false | false | false |
steveb/heat | heat/common/exception.py | 1 | 15643 | #
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat exception subclasses"""
import sys
from oslo_log import log as logging
import six
from six import reraise as raise_
from heat.common.i18n import _
from heat.common.i18n import _LE
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
# TODO(kanagaraj-manickam): Expose this to user via REST API
ERROR_CODE_MAP = {
'99001': _("Service %(service_name)s is not available for resource "
"type %(resource_type)s, reason: %(reason)s")
}
@six.python_2_unicode_compatible
class HeatException(Exception):
"""Base Heat Exception.
To correctly use this class, inherit from it and define a 'msg_fmt'
property. That msg_fmt will get formatted with the keyword arguments
provided to the constructor.
"""
message = _("An unknown exception occurred.")
# error_code helps to provide an unique number for a given exception
# and is encoded in XXYYY format.
# Here, XX - For each of the entity type like stack, resource, etc
# an unique number will be provided. All exceptions for a entity will
# have same XX code.
# YYY - Specific error code for a given exception.
error_code = None
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
if self.error_code in ERROR_CODE_MAP:
self.msg_fmt = ERROR_CODE_MAP[self.error_code]
self.message = self.msg_fmt % kwargs
if self.error_code:
self.message = 'HEAT-E%s %s' % (self.error_code, self.message)
except KeyError:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value}) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise_(exc_info[0], exc_info[1], exc_info[2])
def __str__(self):
return self.message
def __deepcopy__(self, memo):
return self.__class__(**self.kwargs)
class MissingCredentialError(HeatException):
msg_fmt = _("Missing required credential: %(required)s")
class AuthorizationFailure(HeatException):
msg_fmt = _("Authorization failed.")
class NotAuthenticated(HeatException):
msg_fmt = _("You are not authenticated.")
class Forbidden(HeatException):
msg_fmt = _("You are not authorized to use %(action)s.")
def __init__(self, action='this action'):
super(Forbidden, self).__init__(action=action)
# NOTE(bcwaldon): here for backwards-compatibility, need to deprecate.
class NotAuthorized(Forbidden):
msg_fmt = _("You are not authorized to complete this action.")
class Invalid(HeatException):
msg_fmt = _("Data supplied was not valid: %(reason)s")
class UserParameterMissing(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not provided.")
class UnknownUserParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not defined in template.")
class InvalidTemplateVersion(HeatException):
msg_fmt = _("The template version is invalid: %(explanation)s")
class InvalidTemplateSection(HeatException):
msg_fmt = _("The template section is invalid: %(section)s")
class InvalidTemplateParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) has no attributes.")
class ImmutableParameterModified(HeatException):
msg_fmt = _("The following parameters are immutable and may not be "
"updated: %(keys)s")
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'keys': ", ".join(args)})
super(ImmutableParameterModified, self).__init__(**kwargs)
class InvalidTemplateAttribute(HeatException):
msg_fmt = _("The Referenced Attribute (%(resource)s %(key)s)"
" is incorrect.")
class InvalidTemplateReference(HeatException):
msg_fmt = _('The specified reference "%(resource)s" (in %(key)s)'
' is incorrect.')
class EntityNotFound(HeatException):
msg_fmt = _("The %(entity)s (%(name)s) could not be found.")
def __init__(self, entity=None, name=None, **kwargs):
self.entity = entity
self.name = name
super(EntityNotFound, self).__init__(entity=entity, name=name,
**kwargs)
class PhysicalResourceNameAmbiguity(HeatException):
msg_fmt = _(
"Multiple physical resources were found with name (%(name)s).")
class InvalidTenant(HeatException):
msg_fmt = _("Searching Tenant %(target)s "
"from Tenant %(actual)s forbidden.")
class StackExists(HeatException):
msg_fmt = _("The Stack (%(stack_name)s) already exists.")
class HeatExceptionWithPath(HeatException):
msg_fmt = _("%(error)s%(path)s%(message)s")
def __init__(self, error=None, path=None, message=None):
self.error = error or ''
self.path = []
if path is not None:
if isinstance(path, list):
self.path = path
elif isinstance(path, six.string_types):
self.path = [path]
result_path = ''
for path_item in self.path:
if isinstance(path_item, int) or path_item.isdigit():
result_path += '[%s]' % path_item
elif len(result_path) > 0:
result_path += '.%s' % path_item
else:
result_path = path_item
self.error_message = message or ''
super(HeatExceptionWithPath, self).__init__(
error=('%s: ' % self.error if self.error != '' else ''),
path=('%s: ' % result_path if len(result_path) > 0 else ''),
message=self.error_message
)
def error(self):
return self.error
def path(self):
return self.path
def error_message(self):
return self.error_message
class StackValidationFailed(HeatExceptionWithPath):
pass
class InvalidSchemaError(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotFound(EntityNotFound):
msg_fmt = _("The Resource (%(resource_name)s) could not be found "
"in Stack %(stack_name)s.")
class SnapshotNotFound(EntityNotFound):
msg_fmt = _("The Snapshot (%(snapshot)s) for Stack (%(stack)s) "
"could not be found.")
class InvalidGlobalResource(HeatException):
msg_fmt = _("There was an error loading the definition of the global "
"resource type %(type_name)s.")
class ResourceTypeUnavailable(HeatException):
error_code = '99001'
class InvalidBreakPointHook(HeatException):
msg_fmt = _("%(message)s")
class InvalidRestrictedAction(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotAvailable(HeatException):
msg_fmt = _("The Resource (%(resource_name)s) is not available.")
class ClientNotAvailable(HeatException):
msg_fmt = _("The client (%(client_name)s) is not available.")
class WatchRuleNotFound(EntityNotFound):
"""Keep this for AWS compatiblility."""
msg_fmt = _("The Watch Rule (%(watch_name)s) could not be found.")
class ResourceFailure(HeatExceptionWithPath):
def __init__(self, exception_or_error, resource, action=None):
self.resource = resource
self.action = action
if action is None and resource is not None:
self.action = resource.action
path = []
res_path = []
if resource is not None:
res_path = [resource.stack.t.get_section_name('resources'),
resource.name]
if isinstance(exception_or_error, Exception):
if isinstance(exception_or_error, ResourceFailure):
self.exc = exception_or_error.exc
error = exception_or_error.error
message = exception_or_error.error_message
path = exception_or_error.path
else:
self.exc = exception_or_error
error = six.text_type(type(self.exc).__name__)
message = six.text_type(self.exc)
path = res_path
else:
self.exc = None
res_failed = 'Resource %s failed: ' % action.upper()
if res_failed in exception_or_error:
(error, message, new_path) = self._from_status_reason(
exception_or_error)
path = res_path + new_path
else:
path = res_path
error = None
message = exception_or_error
super(ResourceFailure, self).__init__(error=error, path=path,
message=message)
def _from_status_reason(self, status_reason):
"""Split the status_reason up into parts.
Given the following status_reason:
"Resource DELETE failed: Exception : resources.AResource: foo"
we are going to return:
("Exception", "resources.AResource", "foo")
"""
parsed = [sp.strip() for sp in status_reason.split(':')]
if len(parsed) >= 4:
error = parsed[1]
message = ': '.join(parsed[3:])
path = parsed[2].split('.')
else:
error = ''
message = status_reason
path = []
return (error, message, path)
class NotSupported(HeatException):
msg_fmt = _("%(feature)s is not supported.")
class ResourceActionNotSupported(HeatException):
msg_fmt = _("%(action)s is not supported for resource.")
class ResourceActionRestricted(HeatException):
msg_fmt = _("%(action)s is restricted for resource.")
class ResourcePropertyConflict(HeatException):
msg_fmt = _('Cannot define the following properties '
'at the same time: %(props)s.')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(ResourcePropertyConflict, self).__init__(**kwargs)
class ResourcePropertyDependency(HeatException):
msg_fmt = _('%(prop1)s cannot be specified without %(prop2)s.')
class ResourcePropertyValueDependency(HeatException):
msg_fmt = _('%(prop1)s property should only be specified '
'for %(prop2)s with value %(value)s.')
class PropertyUnspecifiedError(HeatException):
msg_fmt = _('At least one of the following properties '
'must be specified: %(props)s.')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(PropertyUnspecifiedError, self).__init__(**kwargs)
class UpdateReplace(Exception):
"""Raised when resource update requires replacement."""
def __init__(self, resource_name='Unknown'):
msg = _("The Resource %s requires replacement.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class ResourceUnknownStatus(HeatException):
msg_fmt = _('%(result)s - Unknown status %(resource_status)s due to '
'"%(status_reason)s"')
def __init__(self, result=_('Resource failed'),
status_reason=_('Unknown'), **kwargs):
super(ResourceUnknownStatus, self).__init__(
result=result, status_reason=status_reason, **kwargs)
class ResourceInError(HeatException):
msg_fmt = _('Went to status %(resource_status)s '
'due to "%(status_reason)s"')
def __init__(self, status_reason=_('Unknown'), **kwargs):
super(ResourceInError, self).__init__(status_reason=status_reason,
**kwargs)
class UpdateInProgress(Exception):
def __init__(self, resource_name='Unknown'):
msg = _("The resource %s is already being updated.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions.
They can be handled by the webob fault application in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
class EgressRuleNotAllowed(HeatException):
msg_fmt = _("Egress rules are only allowed when "
"Neutron is used and the 'VpcId' property is set.")
class Error(HeatException):
msg_fmt = "%(message)s"
def __init__(self, msg):
super(Error, self).__init__(message=msg)
class NotFound(HeatException):
def __init__(self, msg_fmt=_('Not found')):
self.msg_fmt = msg_fmt
super(NotFound, self).__init__()
class InvalidContentType(HeatException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(HeatException):
msg_fmt = _('Request limit exceeded: %(message)s')
class StackResourceLimitExceeded(HeatException):
msg_fmt = _('Maximum resources per stack exceeded.')
class ActionInProgress(HeatException):
msg_fmt = _("Stack %(stack_name)s already has an action (%(action)s) "
"in progress.")
class StopActionFailed(HeatException):
msg_fmt = _("Failed to stop stack (%(stack_name)s) on other engine "
"(%(engine_id)s)")
class EventSendFailed(HeatException):
msg_fmt = _("Failed to send message to stack (%(stack_name)s) "
"on other engine (%(engine_id)s)")
class InterfaceAttachFailed(HeatException):
msg_fmt = _("Failed to attach interface (%(port)s) "
"to server (%(server)s)")
class InterfaceDetachFailed(HeatException):
msg_fmt = _("Failed to detach interface (%(port)s) "
"from server (%(server)s)")
class UnsupportedObjectError(HeatException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(HeatException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(HeatException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(HeatException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ReadOnlyFieldError(HeatException):
msg_fmt = _('Cannot modify readonly field %(field)s')
class ConcurrentTransaction(HeatException):
msg_fmt = _('Concurrent transaction for %(action)s')
class ObjectFieldInvalid(HeatException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class KeystoneServiceNameConflict(HeatException):
msg_fmt = _("Keystone has more than one service with same name "
"%(service)s. Please use service id instead of name")
class SIGHUPInterrupt(HeatException):
msg_fmt = _("System SIGHUP signal received.")
class NoActionRequired(Exception):
pass
class InvalidServiceVersion(HeatException):
msg_fmt = _("Invalid service %(service)s version %(version)s")
| apache-2.0 | -5,168,321,806,644,968,000 | 29.91502 | 78 | 0.623729 | false | 4.086468 | false | false | false |
pmarcis/nlp-example | train-truecaser.py | 1 | 1640 | """
This script trains the TrueCase System
"""
import nltk
import os
import sys
import argparse
import cPickle
script_path=os.path.dirname(os.path.realpath(__file__))
truecaser_script_dir = os.path.join(script_path,"dependencies","truecaser")
sys.path.insert(1,truecaser_script_dir)
from TrainFunctions import *
def main(input_file, output_file):
uniDist = nltk.FreqDist()
backwardBiDist = nltk.FreqDist()
forwardBiDist = nltk.FreqDist()
trigramDist = nltk.FreqDist()
wordCasingLookup = {}
sentences = []
for line in input_file:
sentences.append(line.strip().decode('utf-8'))
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
updateDistributionsFromSentences(tokens, wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist)
cPickle.dump(uniDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(backwardBiDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(forwardBiDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(trigramDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(wordCasingLookup, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i',
type=argparse.FileType('r'),
default=sys.stdin, metavar='PATH',
help="Input file (default: standard input)")
parser.add_argument('--output', '-o', type=argparse.FileType('wb'), metavar='PATH',
help="Output file (binary)")
args = parser.parse_args()
main(args.input, args.output)
| mit | -6,295,123,703,930,949,000 | 37.139535 | 115 | 0.710976 | false | 3.628319 | false | false | false |
jrydberg/edgy | src/edgy/xml/utils.py | 1 | 1436 | from edgy.xml.element import _namespace_map
def lookupPrefix(uri):
return _namespace_map.get(uri, None)
def findtext(n, qname, default=None):
for c in n.getchildren():
#print repr(c), qname
if c.tag == str(qname):
return c.text
return default
def find(n, qname, default=None):
for c in n.getchildren():
if c.tag == str(qname):
return c
return default
def findall(n, path):
"""Find all.
"""
new = n.getchildren()[:]
for comp in path:
n = [c for c in new if c.tag == comp]
#print repr(comp), repr(n)
if n:
new = []
for c in n:
new.extend(c.getchildren())
if not n:
break
return n
def findAndRemove(n, *path):
"""Find instance issued by path and remove it.
"""
for component in path:
if n is None:
break
parent, n = n, find(n, component)
if n is None:
raise Exception("Bad path")
parent.remove(n)
return n
def geturi(prefix, namespaces):
for p, uri in reversed(namespaces):
if p == prefix:
return uri
return None # not found
def splitTag(tag):
if tag[0] == '{':
return tag[1:].split('}', 1)
return None, tag
_split_tag = splitTag
def stripTag(tag):
tag = str(tag)
if tag[0] == '{':
return tag[1:].split('}', 1)[1]
return tag
| mit | -5,665,341,722,381,874,000 | 18.944444 | 50 | 0.533426 | false | 3.536946 | false | false | false |
EndPointCorp/lg_ros_nodes | lg_mirror/scripts/touch_router_node.py | 1 | 1598 | #!/usr/bin/env python3
from functools import partial
import rospy
import sys
from lg_mirror.touch_router import TouchRouter
from lg_common.helpers import on_new_scene, load_director_message
from lg_msg_defs.msg import StringArray
from lg_common.helpers import handle_initial_state
from lg_mirror.touch_router import SubscribeListener
from lg_msg_defs.srv import TouchRoutes
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'lg_mirror_router'
def main():
rospy.init_node(NODE_NAME)
default_viewport = rospy.get_param('~default_viewport', None)
device_id = rospy.get_param('~device_id', 'default')
router = TouchRouter(default_viewport)
route_topic = '/lg_mirror/{}/active_routes'.format(device_id)
def publish_active_routes(routes):
routes_pub.publish(StringArray(routes))
new_listener_cb = partial(router.handle_new_listener, publish_active_routes)
routes_pub = rospy.Publisher(
route_topic,
StringArray,
queue_size=10,
subscriber_listener=SubscribeListener(new_listener_cb)
)
# Hacky callback to parse the initial scene.
def handle_initial_scene_msg(msg):
d = load_director_message(msg)
router.handle_scene(publish_active_routes, d)
handle_initial_state(handle_initial_scene_msg)
rospy.Service(route_topic, TouchRoutes, router.handle_service_request)
scene_cb = partial(router.handle_scene, publish_active_routes)
on_new_scene(scene_cb)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| apache-2.0 | 7,490,353,579,126,744,000 | 27.535714 | 80 | 0.717772 | false | 3.451404 | false | false | false |
felipenaselva/repo.felipe | plugin.video.salts/scrapers/moviewatcher_scraper.py | 1 | 5058 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import urllib
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://moviewatcher.to'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'MovieWatcher'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, allow_redirect=False, cache_limit=0)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
for item in dom_parser.parse_dom(html, 'a', {'class': 'full-torrent1'}):
stream_url = dom_parser.parse_dom(item, 'span', ret='onclick')
host = dom_parser.parse_dom(item, 'div', {'class': 'small_server'})
match = re.search('Views:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
views = match.group(1) if match else None
match = re.search('Size:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
size = int(match.group(1)) * 1024 * 1024 if match else None
if stream_url and host:
stream_url = stream_url[0]
host = host[0].lower()
host = host.replace('stream server: ', '')
match = re.search("'(/redirect/[^']+)", stream_url)
if match:
stream_url = match.group(1)
quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': views, 'rating': None, 'url': stream_url, 'direct': False}
if size is not None: hoster['size'] = scraper_utils.format_size(size, 'B')
hosters.append(hoster)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]*/s0*%se0*%s(?!\d)[^"]*)' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
results = []
if video_type == VIDEO_TYPES.MOVIE:
vid_type = 'movies'
else:
vid_type = 'series'
search_url = urlparse.urljoin(self.base_url, '/search?query=%s&type=%s')
search_url = search_url % (urllib.quote_plus(title), vid_type)
html = self._http_get(search_url, allow_redirect=False, cache_limit=8)
if html.startswith('http'):
results = [{'url': scraper_utils.pathify_url(html), 'title': scraper_utils.cleanse_title(title), 'year': ''}]
else:
for item in dom_parser.parse_dom(html, 'div', {'class': 'one_movie-item'}):
match_url = dom_parser.parse_dom(item, 'a', ret='href')
match_title = dom_parser.parse_dom(item, 'img', ret='alt')
match_year = ''
if match_url and match_title:
match_url = match_url[0]
match_title = match_title[0]
if match_year:
match_year = match_year[0]
else:
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| gpl-2.0 | 9,100,844,278,614,173,000 | 41.864407 | 167 | 0.566825 | false | 3.826021 | false | false | false |
xArm-Developer/xArm-Python-SDK | xarm/x3/utils.py | 1 | 4241 | # !/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
import time
import functools
from ..core.utils.log import logger
from .code import APIState
def check_modbus_baud(baud=2000000, _type='set', default=None):
def _check_modbus_baud(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
code = args[0].checkset_modbus_baud(baud)
if code != 0:
logger.error('check modbus baud is failed, code={}'.format(code))
return code if _type == 'set' else (code, default if default != -99 else [])
else:
return func(*args, **kwargs)
return decorator
return _check_modbus_baud
def xarm_is_connected(_type='set'):
def _xarm_is_connected(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if args[0].connected:
return func(*args, **kwargs)
else:
logger.error('xArm is not connect')
return APIState.NOT_CONNECTED if _type == 'set' else (APIState.NOT_CONNECTED, 'xArm is not connect')
return decorator
return _xarm_is_connected
def xarm_is_ready(_type='set'):
def _xarm_is_ready(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if args[0].connected and kwargs.get('auto_enable', False):
if not args[0].ready:
args[0].motion_enable(enable=True)
args[0].set_mode(0)
args[0].set_state(0)
if args[0].connected:
if args[0].state_is_ready:
return func(*args, **kwargs)
else:
logger.error('xArm is not ready')
logger.info('Please check the arm for errors. If so, please clear the error first. '
'Then enable the motor, set the mode and set the state')
return APIState.NOT_READY if _type == 'set' else (APIState.NOT_READY, 'xArm is not ready')
else:
logger.error('xArm is not connect')
return APIState.NOT_CONNECTED if _type == 'set' else (APIState.NOT_CONNECTED, 'xArm is not connect')
return decorator
return _xarm_is_ready
def xarm_is_pause(_type='set'):
def _xarm_is_pause(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
args[0].check_is_pause()
return func(*args, **kwargs)
return decorator
return _xarm_is_pause
def xarm_wait_until_cmdnum_lt_max(only_wait=False):
def _xarm_wait_until_cmdnum_lt_max(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
ret = args[0].wait_until_cmdnum_lt_max()
if not only_wait and ret is not None:
args[0].log_api_info('API -> {} -> code={}'.format(func.__name__, ret), code=ret)
return ret
return func(*args, **kwargs)
return decorator
return _xarm_wait_until_cmdnum_lt_max
def xarm_is_not_simulation_mode(ret=0):
def _xarm_is_not_simulation_mode(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if not args[0].check_is_simulation_robot():
return func(*args, **kwargs)
else:
return ret
return decorator
return _xarm_is_not_simulation_mode
def api_log(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
ret = func(*args, **kwargs)
logger.info('{}, ret={}, args={}, kwargs={}'.format(func.__name__, ret, args[1:], kwargs))
return ret
return decorator
def compare_time(time1, time2):
try:
s_time = time.mktime(time.strptime(time1, '%Y-%m-%d'))
e_time = time.mktime(time.strptime(time2, '%Y-%m-%d'))
return int(s_time) - int(e_time) > 0
except:
return False
def compare_version(v1, v2):
for i in range(3):
if v1[i] > v2[i]:
return True
elif v1[i] < v2[i]:
return False
return False
| bsd-3-clause | -534,950,684,337,418,000 | 32.928 | 116 | 0.560481 | false | 3.615516 | false | false | false |
yassersouri/omgh | src/utils.py | 1 | 1183 | import numpy
import sklearn.metrics
import os
import cv2
import numpy as np
def mean_accuracy(groundtruth, predictions):
groundtruth_cm = sklearn.metrics.confusion_matrix(groundtruth, groundtruth).astype(numpy.float32)
predictions_cm = sklearn.metrics.confusion_matrix(predictions, groundtruth).astype(numpy.float32)
return numpy.mean(numpy.diag(predictions_cm) / numpy.diag(groundtruth_cm))
def ensure_dir(address):
if not os.path.exists(address):
os.makedirs(address)
def draw_bbox(img, bbox, color=100, width=2):
try:
bx, by, bw, bh = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
except:
bx, by, bw, bh = bbox
new_img = img.copy()
cv2.rectangle(new_img, (bx, by), (bx+bw, by+bh), color, width)
return new_img
def get_rect(img, rect_info):
xmin, xmax, ymin, ymax = rect_info
return img[xmin:xmax, ymin:ymax]
def get_rect_from_bbox(img, bbox):
by, bx, bw, bh = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
return img[bx:bx+bh, by:by+bw]
def l2_feat_norm(feat):
row_norms = np.linalg.norm(feat, axis=1)
new_feat = feat / row_norms[:, np.newaxis]
return new_feat
| mit | 7,947,427,460,157,325,000 | 26.511628 | 101 | 0.662722 | false | 2.935484 | false | false | false |
abinashk-inf/AstroBox | src/astroprint/api/cloud.py | 1 | 7527 | # coding=utf-8
__author__ = "AstroPrint Product Team <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import json
import uuid
from flask import request, jsonify, abort
from flask.ext.login import current_user
from requests import ConnectionError
from octoprint.server import restricted_access, SUCCESS
from octoprint.server.api import api
from octoprint.events import eventManager, Events
from astroprint.cloud import astroprintCloud, AstroPrintCloudNoConnectionException
from astroprint.printfiles import FileDestinations
from astroprint.printfiles.downloadmanager import downloadManager
from astroprint.printer.manager import printerManager
#~~ Cloud Slicer control
@api.route('/astroprint', methods=['DELETE'])
@restricted_access
def cloud_slicer_logout():
astroprintCloud().signout()
return jsonify(SUCCESS)
@api.route('/astroprint/private-key', methods=['POST'])
def set_private_key():
email = request.values.get('email')
password = request.values.get('password')
if email and password:
try:
if astroprintCloud().signin(email, password):
return jsonify(SUCCESS)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
else:
abort(400)
abort(401)
@api.route('/astroprint/login-key', methods=['GET'])
@restricted_access
def get_login_key():
try:
key = astroprintCloud().get_login_key()
if key:
return jsonify(key)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
abort(401)
@api.route('/astroprint/upload-data', methods=['GET'])
@restricted_access
def upload_data():
filePath = request.args.get('file')
if filePath:
uploadInfo = astroprintCloud().get_upload_info(filePath)
if uploadInfo:
if 'error' in uploadInfo:
if uploadInfo['error'] == 'no_user':
abort(401)
else:
abort(500)
else:
return json.dumps(uploadInfo)
else:
abort(500)
abort(400)
@api.route("/astroprint/print-files", methods=["GET"])
@restricted_access
def designs():
forceSyncCloud = request.args.get('forceSyncCloud')
cloud_files = json.loads(astroprintCloud().print_files(forceSyncCloud))
local_files = list(printerManager().fileManager.getAllFileData())
if cloud_files:
for p in cloud_files:
p['local_filename'] = None
p['last_print'] = None
p['uploaded_on'] = None
for i in range(len(local_files)):
if "cloud_id" in local_files[i] and p['id'] == local_files[i]['cloud_id']:
local_file = local_files[i]
p['local_filename'] = local_file['name']
p['local_only'] = False
p['uploaded_on'] = local_file['date']
if 'prints' in local_file \
and 'last' in local_file['prints'] \
and local_file['prints']['last'] \
and 'date' in local_file['prints']['last']:
p['last_print'] = local_file['prints']['last']['date']
del local_files[i]
break
cloud_files = sorted(cloud_files, key=lambda e: e['local_filename'] is None)
else:
cloud_files = []
if local_files:
for p in local_files:
p['id'] = uuid.uuid4().hex
p['local_filename'] = p['name']
p['local_only'] = True
p['last_print'] = None
p['uploaded_on'] = p['date']
if 'gcodeAnalysis' in p:
p['info'] = p['gcodeAnalysis']
del p['gcodeAnalysis']
else:
p['info'] = None
if 'prints' in p \
and 'last' in p['prints'] \
and p['prints']['last'] \
and 'date' in p['prints']['last']:
p['last_print'] = p['prints']['last']['date']
del p['prints']
else:
local_files = []
files = sorted(local_files + cloud_files, key=lambda e: e['last_print'], reverse=True)
return json.dumps(files)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["GET"])
@restricted_access
def design_download(print_file_id):
# ask chintan
# if request.headers.get("X-Api-Key") != settings().get(["api", "key"]):
if current_user is None or not current_user.is_authenticated or not current_user.publicKey:
abort(401)
em = eventManager()
def progressCb(progress):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "progress",
"id": print_file_id,
"progress": progress
}
)
def successCb(destFile, fileInfo):
if fileInfo is True:
#This means the files was already on the device
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id
}
)
else:
if printerManager().fileManager.saveCloudPrintFile(destFile, fileInfo, FileDestinations.LOCAL):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id,
"filename": printerManager().fileManager._getBasicFilename(destFile),
"info": fileInfo["info"]
}
)
else:
errorCb(destFile, "Couldn't save the file")
def errorCb(destFile, error):
if error == 'cancelled':
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "cancelled",
"id": print_file_id
}
)
else:
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "error",
"id": print_file_id,
"reason": error
}
)
if destFile and os.path.exists(destFile):
os.remove(destFile)
if astroprintCloud().download_print_file(print_file_id, progressCb, successCb, errorCb):
return jsonify(SUCCESS)
return abort(400)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["DELETE"])
@restricted_access
def cancel_design_download(print_file_id):
if downloadManager().cancelDownload(print_file_id):
return jsonify(SUCCESS)
else:
return abort(404)
@api.route("/astroprint/print-jobs/<string:print_job_id>/add-reason", methods=["PUT"])
@restricted_access
def update_cancel_reason(print_job_id):
if not "application/json" in request.headers["Content-Type"]:
return abort(400)
data = request.json
#get reason
reason = {}
if 'reason' in data:
reason['reason_id'] = data['reason']
if 'other_text' in data:
reason['other_text'] = data['other_text']
if reason:
if not astroprintCloud().updateCancelReason(print_job_id, reason):
return abort(500)
else:
return jsonify(SUCCESS)
else:
return abort(400)
| agpl-3.0 | 6,595,280,205,975,772,000 | 29.848361 | 107 | 0.554271 | false | 4.055496 | false | false | false |
bcgov/gwells | app/backend/aquifers/models/__init__.py | 1 | 31074 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import zipfile
import tempfile
import os
import copy
import reversion
from reversion.models import Version
from django.utils import timezone
from django.contrib.gis.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.contrib.contenttypes.fields import GenericRelation
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ValidationError
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.contrib.gis.geos.prototypes.io import wkt_w
from django.contrib.gis import geos
from gwells.models import AuditModel, CodeTableModel, BasicCodeTableModel
from gwells.db_comments.patch_fields import patch_fields
from .vertical_aquifer_extents import *
patch_fields()
class DynamicMaxValueValidator(MaxValueValidator):
"""
MaxValueValidator cannot validate using a fn, so we created this class to allow that
Now we can set a models validators to include a DynamicMaxValueValidator that accepts a fn
(such as) get_current_year is passed into this validator allowing us to validate by current year
rather than the constant value of current year
"""
def __call__(self, value):
cleaned = self.clean(value)
limit_value = self.limit_value() if callable(self.limit_value) else self.limit_value
params = {'limit_value': limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def get_current_year() -> int:
return timezone.now().year
class WaterRightsPurpose(AuditModel):
"""
Material choices for describing Aquifer Material
"""
code = models.CharField(primary_key=True, max_length=10,
db_column='water_rights_purpose_code')
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField(default=0)
effective_date = models.DateTimeField(default=timezone.now, null=False)
expiry_date = models.DateTimeField(default=timezone.make_aware(
timezone.datetime.max, timezone.get_default_timezone()), null=False)
class Meta:
db_table = 'water_rights_purpose_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Water Rights Purpose Codes'
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class WaterRightsLicence(AuditModel):
"""
Water rights licences for an aquifer
"""
# Unique in the water rights database we import from.
wrl_sysid = models.IntegerField(
primary_key=True,
verbose_name="Water Rights Licence System ID")
purpose = models.ForeignKey(
WaterRightsPurpose,
db_column='water_rights_purpose_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Water Rights Purpose Reference",
related_name='licences')
# A non-unique licence number, used to calculate allocations along with
# the quantity flag, below.
licence_number = models.BigIntegerField(db_index=True)
# QUANTITY FLAG is the code used to identify how the total quantity is assigned
# across multiple Points of Well Diversion (PWD) for a particular licence and purpose use,
# i.e., T, M, D, or P.
# Only in the case of 'M', the quantity is shared across wells in the licence.
quantity_flag = models.CharField(
max_length=1,
default='T',
choices=(('T', 'T'), ('M', 'M'), ('D', 'D'), ('P', 'P')))
quantity = models.DecimalField(
max_digits=12, decimal_places=3, blank=True, null=True, verbose_name='Quanitity')
effective_date = models.DateTimeField(default=timezone.now, null=False)
class Meta:
verbose_name_plural = 'Aquifer Licences'
def __str__(self):
return '{}'.format(self.licence_number)
class AquiferMaterial(CodeTableModel):
"""
Material choices for describing Aquifer Material
"""
code = models.CharField(
primary_key=True, max_length=10, db_column='aquifer_material_code',
db_comment=('Code for valid options for the broad grouping of geological material found in the'
' aquifer, i.e. SG, S, G, B'))
description = models.CharField(
max_length=100,
db_comment=('Describes the broad grouping of geological material found in the aquifer, i.e.,'
' Sand and Gravel, Sand, Gravel, Bedrock'))
class Meta:
db_table = 'aquifer_material_code'
ordering = ['code']
verbose_name_plural = 'Aquifer Material Codes'
db_table_comment = ('Describes the broad grouping of geological material found in the aquifer, i.e., '
'Sand and Gravel, Sand, Gravel, Bedrock')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferSubtype(CodeTableModel):
"""
Subtypes of Aquifer
"""
code = models.CharField(
primary_key=True, max_length=3, db_column='aquifer_subtype_code',
db_comment=("Categorizes an aquifer based on how it was formed geologically (depositional description). Understanding of how aquifers were formed governs important attributes such as their productivity, vulnerability to contamination as well as proximity and likelihood of hydraulic connection to streams. The code value is a combination of an aquifer type represented by a number and an optional letter representing a more specific aquifer sub-type. There are six major aquifer types, some with multiple subtypes. E.g. aquifer sub-type code 6b is comprised of the aquifer type number (6: Crystalline bedrock aquifers) and subtype letter (b) specifically described as: Fractured crystalline (igneous intrusive or metamorphic, meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from 1a to 6b."))
description = models.CharField(
max_length=100,
db_comment=('Descriptions that define how the aquifer was formed geologically'
' (depositional description). E.g. Unconfined sand and gravel - large river system,'
' Confined sand and gravel - glacial, Flat-lying to gently-dipping volcanic bedrock.'))
class Meta:
db_table = 'aquifer_subtype_code'
db_table_comment = ('Categorizes an aquifer based on how it was formed geologically (depositional'
' description). Understanding of how aquifers were formed governs important'
' attributes such as their productivity, vulnerability to contamination as well as'
' proximity and likelihood of hydraulic connection to streams. The code value is a'
' combination of an aquifer type represented by a number and an optional letter'
' representing a more specific aquifer sub-type. E.g. Crystalline bedrock aquifers)'
' and subtype letter, Fractured crystalline (igneous intrusive or metamorphic,'
' meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from'
' 1a to 6b.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferProductivity(CodeTableModel):
"""
Productivity choices for describing Aquifer
-------------------
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_productivity_code',
db_comment=('Valid code for the aquifer\'s productivity, which represent an aquifers ability to'
' transmit and yield groundwater; i.e., L, M, H'))
description = models.CharField(
max_length=100,
db_comment=('Describes the aquifer\'s productivity which represent an aquifers ability to'
' transmit and yield groundwater; i.e., Low, Moderate, High'))
class Meta:
db_table = 'aquifer_productivity_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Productivity Codes'
db_table_comment = ('Describes the aquifer\'s productivity which represent an aquifers ability to '
'transmit and yield groundwater; i.e., Low, Moderate, High')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferDemand(CodeTableModel):
"""
Demand choices for describing Aquifer
Note on db_comments: db_comment properties on model columns are
overriden by the db_column_supplemental_comments provided below.
db_column_supplemental_comments provides an easier way for the DA to add/update
comments in bulk.
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_demand_code',
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
description = models.CharField(
max_length=100,
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
class Meta:
db_table = 'aquifer_demand_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Demand Codes'
db_table_comment = ('Describes the level of groundwater use at the time aquifer was mapped; i.e., High, '
'Moderate, Low.')
db_column_supplemental_comments = {
}
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class WaterUse(CodeTableModel):
"""
Type of Known Water Use choices for describing Aquifer
-------------------
"""
code = models.CharField(
primary_key=True, max_length=2, db_column='water_use_code',
db_comment=('Standard terms that define the type of known water use of an aquifer at the time of'
' mapping. It indicates the variability or diversity of uses of the aquifer water as'
' a supply source. I.e. Domestic, Multiple, Potential Domestic'))
description = models.CharField(
max_length=100,
db_comment=('Description of the standard terms that define the type of known water use of an'
' aquifer at the time of mapping. It indicates the variability or diversity of uses'
' of the aquifer water as a supply source. I.e. Domestic, Multiple, Potential'
' Domestic'))
class Meta:
db_table = 'water_use_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Water Use Codes'
db_table_comment = ('Describes the type of known water use of an aquifer at the time of mapping. It'
' indicates the variability or diversity of uses of the aquifer water as a supply'
' source. I.e. Domestic, Multiple, Potential Domestic')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class QualityConcern(CodeTableModel):
code = models.CharField(
primary_key=True, max_length=2, db_column='quality_concern_code',
db_comment=('Standard terms used to represent the extent of documented concerns of contaminants'
' in the aquifer at the time of mapping. i.e. isloated, local, regional, none.'))
description = models.CharField(
max_length=100,
db_comment=('Description of the standard terms used to represent the extent of documented'
' concerns of contaminants in the aquifer at the time of mapping. i.e. isloated,'
' local, regional, none.'))
class Meta:
db_table = 'quality_concern_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Quality Concern Codes'
db_table_comment = ('Extent of documented concerns of contaminants in the aquifer at the time of'
' mapping. i.e. isloated, local, regional, none.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferVulnerabilityCode(CodeTableModel):
"""
Demand choices for describing Aquifer
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_vulnerability_code',
db_comment=('Code for the aquifer’s relative intrinsic vulnerability to impacts from human'
' activities on the land surface. Vulnerability is based on: the type, thickness,'
' and extent of geologic materials above the aquifer, depth to water table (or to'
' top of confined aquifer), and type of aquifer materials, i.e., L, M, H.'))
description = models.CharField(
max_length=100,
db_comment=('Describes an aquifer’s relative intrinsic vulnerability to impacts from human'
' activities on the land surface. Vulnerability is based on: the type, thickness,'
' and extent of geologic materials above the aquifer, depth to water table (or to'
' top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.'))
class Meta:
db_table = 'aquifer_vulnerability_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Vulnerability Codes'
db_table_comment = ('Describes an aquifer’s relative intrinsic vulnerability to impacts from human '
'activities on the land surface. Vulnerability is based on: the type, thickness, '
'and extent of geologic materials above the aquifer, depth to water table (or to '
'top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
@reversion.register()
class Aquifer(AuditModel):
"""
An underground layer of water-bearing permeable rock, rock fractures or unconsolidated materials
(gravel, sand, or silt), from which groundwater is extracted using a water well.
This table holds ONLY the aquifers to which we have associated one or more wells. It is not
the definitive source of all aquifers in the province.
Note on db_comments: db_comment properties on model columns are
overriden by the db_column_supplemental_comments provided below.
db_column_supplemental_comments provides an easier way for the DA to add/update
comments in bulk.
"""
aquifer_id = models.AutoField(
primary_key=True, verbose_name="Aquifer ID Number",
db_comment=('System generated unique sequential number assigned to each mapped aquifer. The'
' aquifer_id identifies which aquifer a well is in. An aquifer can have multiple'
' wells, while a single well can only be in one aquifer.'))
aquifer_name = models.CharField(
max_length=100, blank=True, null=True,
db_comment=('Name assigned for a specific aquifer. Typically derived from geographic names or names '
'in common use, but may also be lithologic or litho-stratigraphic units, e.g. '
'Abbotsford-Sumas, McDougall Creek Deltaic.'))
location_description = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Description of Location',
db_comment=('Brief description of the geographic location of the aquifer. The description is usually '
'referenced to a nearby major natural geographic area or community, e.g., Grand Forks.'))
material = models.ForeignKey(
AquiferMaterial,
db_column='aquifer_material_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Material Reference",
related_name='aquifers',
db_comment=('Code for valid options for the broad grouping of geological material found in the'
' aquifer, i.e. SG, S, G, B'))
subtype = models.ForeignKey(
AquiferSubtype,
db_column='aquifer_subtype_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Subtype Reference",
related_name='aquifers',
db_comment=('Categorizes an aquifer based on how it was formed geologically (depositional'
' description). Understanding of how aquifers were formed governs important'
' attributes such as their productivity, vulnerability to contamination as well as'
' proximity and likelihood of hydraulic connection to streams. The code value is a'
' combination of an aquifer type represented by a number and an optional letter'
' representing a more specific aquifer sub-type. E.g. 1a, 2, 6a.'))
area = models.DecimalField(
max_digits=5, decimal_places=1, blank=True, null=True, verbose_name='Size (square km)',
db_comment='Approximate size of the aquifer in square kilometers.')
vulnerability = models.ForeignKey(
AquiferVulnerabilityCode,
# TODO: Spelling mistake below!
db_column='aquifer_vulnerablity_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Aquifer Vulnerabiliy",
db_comment=('Standard terms used to define an aquifer’s relative intrinsic vulnerability to'
' impacts from human activities on the land surface. Vulnerability is based on: the'
' type, thickness, and extent of geologic materials above the aquifer, depth to'
' water table (or to top of confined aquifer), and type of aquifer materials, i.e.,'
' Low, Moderate, High.'))
productivity = models.ForeignKey(
AquiferProductivity,
db_column='aquifer_productivity_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Productivity Reference",
related_name='aquifers',
db_comment=('Valid code for the aquifer\'s productivity, which represent an aquifers ability to'
' transmit and yield groundwater; i.e., L, M, H'))
demand = models.ForeignKey(
AquiferDemand,
db_column='aquifer_demand_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Demand Reference",
related_name='aquifers',
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
known_water_use = models.ForeignKey(
WaterUse,
db_column='water_use_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Known Water Use Reference",
related_name='aquifers',
db_comment=('Standard terms that define the type of known water use of an aquifer at the time of'
' mapping. It indicates the variability or diversity of uses of the aquifer water as'
' a supply source. I.e. Domestic, Multiple, Potential Domestic'))
quality_concern = models.ForeignKey(
QualityConcern,
db_column='quality_concern_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Quality Concern Reference",
related_name='aquifers',
db_comment=('Standard terms used to represent the extent of documented concerns of contaminants'
' in the aquifer at the time of mapping. i.e. isloated, local, regional, none.'))
litho_stratographic_unit = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Lithographic Stratographic Unit',
db_comment=('Permeable geologic unit (where available) that comprises the aquifer. It is typically '
'either; the era of deposition, the name of a specific formation and/or the broad '
'material types, e.g., Paleozoic to Mesozoic Era, Cache Creek Complex, Intrusive Rock.'))
mapping_year = models.PositiveIntegerField(
validators=[
MinValueValidator(1990),
DynamicMaxValueValidator(get_current_year())],
blank=True,
null=True,
verbose_name="Date of Mapping",
help_text="Use the following format: <YYYY>",
db_comment='The year the aquifer was initially mapped or last updated.')
notes = models.TextField(
max_length=2000,
blank=True,
null=True,
verbose_name='Notes on Aquifer, for internal use only.',
db_comment=('Details about the mapped aquifer that the province deems important to maintain such as'
' local knowledge about the aquifer or decisions for changes related to attributes of'
' the mapped aquifer.'))
effective_date = models.DateTimeField(
default=timezone.now, null=False,
db_comment='The date and time that the aquifer became published.')
expiry_date = models.DateTimeField(
default=timezone.make_aware(timezone.datetime.max, timezone.get_default_timezone()), null=False,
db_comment='The date and time after which the aquifer became unpublished.')
retire_date = models.DateTimeField(
default=timezone.make_aware(timezone.datetime.max, timezone.get_default_timezone()), null=False,
db_comment='The date and time after which the aquifer is considered to be retired')
geom = models.MultiPolygonField(srid=3005, null=True)
# This version is pre-rendered in WGS 84 for display on web-maps.
# Only used by the v1 API
geom_simplified = models.MultiPolygonField(srid=4326, null=True)
history = GenericRelation(Version)
@property
def status_retired(self):
return timezone.now() > self.retire_date
@property
def status_draft(self):
return timezone.now() < self.effective_date
@property
def status_published(self):
now = timezone.now()
return now >= self.effective_date and now < self.expiry_date
@property
def status_unpublished(self):
now = timezone.now()
return now >= self.expiry_date
def load_shapefile(self, f):
"""
Given a shapefile with a single feature, update spatial fields of the aquifer.
You must still call aquifer.save() afterwards.
"""
try:
zip_ref = zipfile.ZipFile(f)
except zipfile.BadZipFile as e:
raise Aquifer.BadShapefileException(str(e))
ret = zip_ref.testzip()
if ret is not None:
raise Aquifer.BadShapefileException("Bad zipfile, info: %s" % ret)
the_shapefile = None
output_dir = tempfile.mkdtemp()
for item in zip_ref.namelist():
# Check filename endswith shp
zip_ref.extract(item, output_dir)
if item.endswith('.shp'):
# Extract a single file from zip
the_shapefile = os.path.join(output_dir, item)
# break
zip_ref.close()
if the_shapefile is None:
raise Aquifer.BadShapefileException("Bad zipfile. No shapefile found.")
ds = DataSource(the_shapefile)
self.update_geom_from_feature(ds[0][0])
def update_geom_from_feature(self, feat):
"""
Given a spatial feature with Geometry, update spatial fields of the aquifer.
You must still call aquifer.save() afterwards.
"""
geom = feat.geom
if not geom.srid:
raise Aquifer.BadShapefileException("Shapefile contains no projection information")
# Make a GEOSGeometry object using the string representation.
# Eliminate any 3d geometry so it fits in PostGIS' 2d geometry schema.
wkt = wkt_w(dim=2).write(GEOSGeometry(geom.wkt, srid=geom.srid)).decode()
geos_geom = GEOSGeometry(wkt, srid=geom.srid)
geos_geom.transform(3005)
# Convert plain Polygons to MultiPolygons,
if isinstance(geos_geom, geos.MultiPolygon):
geos_geom_out = geos_geom
elif isinstance(geos_geom, geos.Polygon):
geos_geom_out = MultiPolygon(geos_geom)
else:
raise Aquifer.BadShapefileException("Bad geometry type: {}, skipping.".format(geos_geom.__class__))
self.geom = geos_geom_out
class Meta:
db_table = 'aquifer'
ordering = ['aquifer_id']
verbose_name_plural = 'Aquifers'
db_table_comment = ('A geological formation, a group of geological formations, or a part of one or more '
'geological formations that is groundwater bearing and capable of storing, '
'transmitting and yielding groundwater.')
class BadShapefileException(Exception):
pass
def __str__(self):
return '{} - {}'.format(self.aquifer_id, self.aquifer_name)
db_column_supplemental_comments = {
"aquifer_demand_code": "Describes the level of groundwater use at the time the aquifer was mapped; i.e., High, Moderate, Low.",
"aquifer_id": "System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.",
"aquifer_material_code": "Describes the broad grouping of geological material found in the aquifer, i.e., Sand and Gravel, Sand, Gravel, Bedrock",
"aquifer_productivity_code": "Describes the aquifer's productivity which represent an aquifers ability to transmit and yield groundwater; i.e., Low, Moderate, High",
"aquifer_subtype_code": "Categorizes an aquifer based on how it was formed geologically (depositional description). Understanding of how aquifers were formed governs important attributes such as their productivity, vulnerability to contamination as well as proximity and likelihood of hydraulic connection to streams. The code value is a combination of an aquifer type represented by a number and an optional letter representing a more specific aquifer sub-type. There are six major aquifer types, some with multiple subtypes. E.g. aquifer sub-type code 6b is comprised of the aquifer type number (6: Crystalline bedrock aquifers) and subtype letter (b) specifically described as: Fractured crystalline (igneous intrusive or metamorphic, meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from 1a to 6b.",
"aquifer_vulnerablity_code": "Describes an aquifer’s relative intrinsic vulnerability to impacts from human activities on the land surface. Vulnerability is based on: the type, thickness, and extent of geologic materials above the aquifer, depth to water table (or to top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.",
"quality_concern_code": "Extent of documented concerns of contaminants in the aquifer at the time of mapping. i.e. isloated, local, regional, none.",
"water_use_code": "Describes the type of known water use of an aquifer at the time of mapping. It indicates the variability or diversity of uses of the aquifer water as a supply source. I.e. Domestic, Multiple, Potential Domestic",
}
@receiver(pre_save, sender=Aquifer)
def update_geom_simplified(sender, instance, **kwargs):
geos_geom_simplified = None
if instance.geom:
simplified_polygons = []
for poly in instance.geom:
geos_geom_simplified = poly.simplify(40, preserve_topology=True)
geos_geom_simplified.transform(4326)
simplified_polygons.append(geos_geom_simplified)
geos_geom_simplified = MultiPolygon(simplified_polygons)
instance.geom_simplified = geos_geom_simplified
@receiver(pre_save, sender=Aquifer)
def update_area(sender, instance, **kwargs):
area = None
if instance.geom:
area = instance.geom.area / 1_000_000 # convert to km²
instance.area = area
class AquiferResourceSection(BasicCodeTableModel):
"""
Defines the available sections (categories) of aquifer resources.
"""
code = models.CharField(primary_key=True, max_length=1,
db_column='aquifer_resource_section_code')
name = models.CharField(max_length=100)
description = models.CharField(max_length=100, default="")
class Meta:
ordering = ['name']
verbose_name_plural = 'Aquifer Resource Sections'
db_table = 'aquifer_resource_section_code'
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferResource(AuditModel):
"""
A PDF document associated with a given aquifer.
"""
id = models.AutoField(
primary_key=True,
verbose_name="Aquifer Resource Identifier",
db_column='aquifer_resource_id')
aquifer = models.ForeignKey(
Aquifer,
related_name='resources',
on_delete=models.CASCADE,
db_comment=('System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.'))
section = models.ForeignKey(
AquiferResourceSection,
db_column='aquifer_resource_section_code',
verbose_name="Aquifer Resource Section",
on_delete=models.PROTECT,
help_text="The section (category) of this resource.")
name = models.CharField(
max_length=255,
verbose_name="Aquifer Resource Name",
help_text="",
db_comment=('Descriptive name of the document at the URL that contains the internal or external information about the aquifer.')
)
url = models.URLField(
verbose_name="PDF Document URL",
max_length=255,
help_text="A resolvable link to the PDF document associated with this aquifer resource.",
db_comment=('The web address where the internal or external information about the aquifer can be found.'))
class Meta:
ordering = ['name']
verbose_name_plural = 'Aquifer Resource'
def __str__(self):
return self.name
| apache-2.0 | 2,642,793,636,649,225,000 | 46.496942 | 841 | 0.661655 | false | 3.890656 | false | false | false |
wfxiang08/Nuitka | misc/make-pypi-upload.py | 1 | 2245 | #!/usr/bin/env python
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Make PyPI upload of Nuitka, and check success of it. """
from __future__ import print_function
import os
import subprocess
import time
import xmlrpclib
nuitka_version = subprocess.check_output(
"./bin/nuitka --version", shell = True
).strip()
branch_name = subprocess.check_output(
"git name-rev --name-only HEAD".split()
).strip()
assert branch_name == "master", branch_name
assert "pre" not in nuitka_version
# Need to remove the contents from the Rest, or else PyPI will not render
# it. Stupid but true.
contents = open("README.rst", "rb").read()
contents = contents.replace(b".. contents::", b"")
open("README.rst", "wb").write(contents)
contents = open("README.rst", "rb").read()
assert b".. contents" not in contents
assert 0 == os.system("misc/make-doc.py")
assert 0 == os.system("python setup.py sdist upload")
# A delay might be necessary before making the check.
for i in range(60):
# Wait some time for PyPI to catch up with us. Without delay
# the old version will still appear. Since this is running
# in a Buildbot, we need not be optimal.
time.sleep(5*60)
pypi = xmlrpclib.ServerProxy("http://pypi.python.org/pypi")
pypi_versions = pypi.package_releases("Nuitka")
assert len(pypi_versions) == 1, pypi_versions
if nuitka_version == pypi_versions[0]:
break
print("Version check failed:", nuitka_version, pypi_versions)
print("Uploaded OK:", pypi_versions[0])
| apache-2.0 | -1,278,441,740,454,644,000 | 32.014706 | 78 | 0.700223 | false | 3.541009 | false | false | false |
Parallels/githooks | test.py | 1 | 19045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
'''
Unit tests for githooks
How it works:
* Create a workspace tmp/ in cwd, set up a dummy STASH_HOME,
a remote repo and a local repo there.
* Replace temp/remote_repo.git/hooks/update in the remote repo with
hook_fixture.py. The hook_fixture.py script doesn’t do anything
but dumps the arguments it is called with to a file (branch and
2 hashes, old and new).
* Each unit test in test.py modifies the local repo somehow, commits
the changes and then runs `git push` asynchronously. `git push`
invokes the update hook (hook_fixture.py) in tmp/remote_repo.git.
The hook script dumps its arguments to a file tmp/request.json.
* The unit test (test.py) waits until tmp/request.json is written,
reads it in and removes the file. Then, it instantiates the Hook
object from the hook module it tests, and performs various testing
using the data from tmp/request.json.
* When the testing is done, the unit test writes a response file
tmp/response.json for the update script (the update script waits
until it is able to read this file). The response file contains
the testing exit code. The update script reads in the file, removes
it and returns the exit code to git (asynchronously called from the
unit test in test.py).
'''
import unittest
import subprocess
import shutil
import os
import multiprocessing
import json
import sys
import logging
from time import sleep
import githooks
def git(cmd, repo=None):
if repo:
return subprocess.check_output(['git', '-C', repo] + cmd,
stderr=subprocess.STDOUT)
else:
return subprocess.check_output(['git'] + cmd,
stderr=subprocess.STDOUT)
def git_async(cmd, repo=None):
def call_git(cmd, repo=None, result=None):
try:
result.put([0, git(cmd, repo)])
except subprocess.CalledProcessError, e:
result.put([e.returncode, e.output])
result = multiprocessing.Queue()
proc = multiprocessing.Process(target=call_git, args=(cmd, repo, result))
proc.start()
return [proc, result]
def git_async_result(git_call):
git_call[0].join()
result = git_call[1].get()
if result[0] == 0:
return result[1]
else:
raise subprocess.CalledProcessError(result[0], 'git', result[1])
def write_string(filename, string):
with open(filename, 'w+') as f:
f.write(string)
class TestBase(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
self.base = os.path.join(self.cwd, 'tmp')
self.cleanUp()
os.mkdir(self.base)
self.remote_repo = os.path.join(self.base, 'remote_repo.git')
self.repo = os.path.join(self.base, 'repo')
# Create tmp/test.conf
with open(os.path.join(self.base, 'test.conf'), 'w') as f:
f.write(json.dumps({"line_endings":[],
"notify":[],
"email_mention":[]},
indent=4))
gh = githooks.Githooks(conf_file='test.conf', ini_file='testhooks.ini',
repo_dir=self.remote_repo)
self.hooks = dict(zip(gh.conf.keys(), gh.hooks))
# Set up repositories
self.__setup_remote_repo()
self.__setup_local_repo()
self.__add_remote_repo()
self.hook_request = os.path.join(self.base, 'request.json')
self.hook_response = os.path.join(self.base, 'response.json')
os.chdir(self.repo)
def cleanUp(self):
base = self.base
if os.path.isdir(base):
shutil.rmtree(base)
def __setup_remote_repo(self):
git(['init', '--bare', self.remote_repo])
shutil.copy(os.path.join(self.cwd, 'hook_fixture.py'),
os.path.join(self.remote_repo, 'hooks', 'update'))
def __setup_local_repo(self):
git(['init', self.repo])
git(['config', 'push.default', 'simple'], self.repo)
def __add_remote_repo(self):
git(['remote', 'add', 'origin', self.remote_repo], self.repo)
def get_request(self):
request = self.hook_request
attempts = 0
while 1:
if not os.path.exists(request):
attempts = attempts + 1
sleep(0.1)
else:
break
if attempts >= 200:
raise RuntimeError('Timeout exceeded')
with open(request) as f:
data = f.read()
os.remove(request)
return json.loads(data)
def write_response(self, code, data):
with open(self.hook_response, 'w+') as f:
f.write(json.dumps([code, data]))
def tearDown(self):
os.chdir(self.cwd)
#self.cleanUp()
class TestBasicHooks(TestBase):
def test_successful_hook(self):
write_string('foo.txt', 'data')
git(['add', 'foo.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
def test_failed_hook(self):
write_string('foo.txt', 'otherdata')
git(['add', 'foo.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(1, 'hook_failed')
with self.assertRaises(subprocess.CalledProcessError) as cm:
git_async_result(git_call)
self.assertRegexpMatches(cm.exception.output, ".*hook_failed.*")
class TestLineEndings(TestBase):
def test_get_attr(self):
write_string('a.txt', 'data')
write_string('b.txt', 'data')
write_string('c.txt', 'data')
write_string('.gitattributes', 'a.txt binary\nb.txt text')
git(['add', 'a.txt', 'b.txt', 'c.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
import hookutil
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'a.txt', 'binary'),
'set')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'a.txt', 'text'),
'unset')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'b.txt', 'binary'),
'unspecified')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'b.txt', 'text'),
'set')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'c.txt', 'binary'),
'unspecified')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'c.txt', 'text'),
'unspecified')
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook(self):
write_string('a.txt', 'data\n')
write_string('.gitattributes', 'a.txt text')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["line_endings"]
self.assertTrue(hook.check(request[0], request[1], request[2])[0])
self.write_response(0, 'success')
git_async_result(git_call)
def test_failed_hook(self):
git(['config', 'core.autocrlf', 'false'])
write_string('a.txt', 'data\r\n\n')
write_string('b.txt', 'data\r\n\n')
write_string('c.txt', 'data\r\n\n')
# git will normalize eols when attr 'text' is set
write_string('.gitattributes', 'a.txt text')
git(['add', 'a.txt', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git(['add', 'c.txt'])
git(['commit', '-m', 'second commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["line_endings"]
permit, messages = hook.check(request[0], request[1], request[2])
self.assertFalse(permit)
self.assertTrue(len(messages) == 2)
self.assertTrue([message['text'] for message in messages] == [
"Error: file 'c.txt' has mixed line endings (CRLF/LF)",
"Error: file 'b.txt' has mixed line endings (CRLF/LF)"
])
self.write_response(0, 'success')
git_async_result(git_call)
class TestNotify(TestBase):
def test_compose_mail(self):
write_string('a.txt', 'data')
write_string('b.txt', 'data')
write_string('.gitattributes', 'a.txt [email protected],[email protected]\nb.txt [email protected]')
git(['add', 'a.txt', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue('<b>Branch:</b> master' in text)
self.assertTrue('Commit: %s' % request[2] in text)
self.assertTrue('A a.txt' in text)
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue('<b>Branch:</b> master' in text)
self.assertTrue('Commit: %s' % request[2] in text)
self.assertTrue('A a.txt' in text)
self.assertTrue('A b.txt' in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_merge_commit(self):
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', '-b', 'hotfix'])
write_string('a.txt', 'newdata')
write_string('.gitattributes', 'b.txt [email protected]')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'hotfix'])
git(['checkout', 'master'])
git(['checkout', '-b', 'feature'])
write_string('b.txt', 'reallynewdata')
git(['add', 'b.txt'])
git(['commit', '-m', 'feature'])
git(['checkout', 'master'])
git(['merge', 'hotfix'])
git(['merge', 'feature'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue("Merge branch 'feature'\n\n\tA b.txt" in text)
self.assertTrue("feature\n\n\tA b.txt" in text)
self.assertFalse("hotfix\n\n\tM a.txt" in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook(self):
write_string('a.txt', 'data')
write_string('.gitattributes', '*.txt owners=somebody,andmore')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', '-b', 'feature/test'])
write_string('a.txt', 'newdata')
git(['add', 'a.txt'])
git(['commit', '-m', 'update a.txt'])
write_string('c.txt', 'data')
write_string('a.txt', 'againnewdata')
git(['add', 'c.txt', 'a.txt'])
git(['commit', '-m', 'create c.txt, update a.txt'])
git_call = git_async(['push', '-u', 'origin', 'feature/test'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', 'master'])
write_string('b.txt', 'data')
git(['add', 'b.txt'])
git(['commit', '-m', 'create b.txt'])
git(['merge', 'feature/test'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
hook.settings = [
"refs/heads/master"
]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('somebody' in owners)
text = owners['somebody']
self.assertTrue('andmore' in owners)
self.assertTrue(text == owners['andmore'])
self.assertTrue("Merge branch 'feature/test'\n\n\tM a.txt\n\tA c.txt" in text)
self.assertTrue("create b.txt\n\n\tA b.txt" in text)
self.assertTrue("create c.txt, update a.txt\n\n\tM a.txt\n\tA c.txt" in text)
self.assertTrue("update a.txt\n\n\tM a.txt" in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook_send(self):
hook = self.hooks["notify"]
assert hook.params['smtp_from'], 'please configure smtp_from to run this test'
write_string('a.txt', 'data')
write_string('b.txt', 'data')
git(['add', 'a.txt', 'b.txt'])
git(['commit', '-m', 'initial commit'])
sleep(1)
git_call = git_async(['push', '-u', 'origin', 'master:another'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
write_string('b.txt', 'dat')
write_string('.gitattributes', '*.txt owners=%s' % hook.params['smtp_from'])
git(['add', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'second commit'])
sleep(1)
write_string('a.txt', 'dat')
git(['add', 'a.txt'])
# Test long commit message trimming
mes = ' length over one hundred symbols'
git(['commit', '-m', 'third commit' + mes + mes + mes])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook.settings = [
"refs/heads/master"
]
hook.check(request[0], request[1], request[2])
self.write_response(0, 'success')
git_async_result(git_call)
class TestEmailMention(TestBase):
'''
Test email_mention hook.
'''
def test_compose_mail_simple(self):
'''
Test simpliest commit message:
Some feature.
@somebody
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature.\n@somebody'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_dot_end(self):
'''
Test dot in the end of username:
Some feature.
CC @somebody.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature.\nCC @somebody.'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_mention_at_begin(self):
'''
Test in the beginning of commit message:
@somebody check it
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', '@somebody check it'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_many_mentions(self):
'''
Test a list of usernames:
Some feature @somebody,@andmore
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody,@andmore'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 2)
self.assertTrue('[email protected]' in mails)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_many_mentions_and_commits(self):
'''
Test composing mails across several commits.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody'])
write_string('a.txt', 'newdata')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody @andmore.'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 2)
self.assertTrue('[email protected]' in mails)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_mention_email(self):
'''
Test do not parse email addresses.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature [email protected]'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 0)
self.write_response(0, 'success')
git_async_result(git_call)
if __name__ == '__main__':
unittest.main()
| mit | -1,477,752,506,485,911,800 | 32.350263 | 123 | 0.564722 | false | 3.531059 | true | false | false |
jackyyf/paste.py | src/paste.py | 1 | 2800 | _version = '0.0.1'
import sys
import os
import argparse
from lib import logger, config, uri
from lib.provider import ProviderBase, getProvider
# Change default encoding to UTF-8
reload(sys)
sys.setdefaultencoding('UTF-8')
del sys.setdefaultencoding
sys.path = [os.path.abspath('.')] + sys.path
class _Smart_formatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if '\n' in text:
return text.splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def run():
parser = argparse.ArgumentParser(prog='paste.py', description='Push to or pull from paste pads!',
conflict_handler='resolve', add_help=False,
formatter_class=_Smart_formatter)
opt_common = parser.add_argument_group('Common Options')
opt_common.add_argument('-h', '--help', action='help',
help='Print this help message and exit.\n'
'Use `paste.py provider -h` for specific information.')
opt_common.add_argument('-V', '--version', action='version', version='%(prog)s ' + _version)
opt_log = parser.add_argument_group('Logging Options')
opt_log.add_argument('--verbose', '-v', action='store_const', dest='log.level', const=logger.Level.INFO,
default=logger.Level.WARN, help='Enable verbose output.')
opt_log.add_argument('--debug', '-g', action='store_const', dest='log.level', const=logger.Level.DEBUG,
help='Enable debug output. (VERY VERBOSE!)')
opt_log.add_argument('--quiet', '-q', action='store_const', dest='log.level', const=logger.Level.ERROR,
help='Just be quiet, output only error message.')
opt_log.add_argument('--simple-log', action='store_const', dest='log.format', const='{message}',
default=None, help='Output just simple message without timestamp, log level etc.')
opt_log.add_argument('--no-color', action='store_const', dest='log.colorize', const=False,
default=True, help='Disable colorful output. Note: colorful is always false if output file is not a terminal.')
opt_action = parser.add_subparsers(title='Paste pads', help='introduction', metavar='provider', dest='provider')
__import__('providers', globals(), locals())
for provider in ProviderBase.__subclasses__():
ins = provider()
opt_ins = opt_action.add_parser(ins._name, help=ins._info, conflict_handler='resolve')
ins.add_args(opt_ins)
args = parser.parse_args()
conf = config.getConfig()
for arg in args._get_kwargs():
conf.set(arg[0], arg[1])
logger.init(colorize=conf.getboolean('log.colorize'), level=conf.getint('log.level'), log_format=conf.get('log.format'))
getProvider(conf.get('provider')).run()
if __name__ == '__main__':
run()
| mit | 9,107,660,406,084,468,000 | 46.457627 | 121 | 0.666786 | false | 3.553299 | false | false | false |
AnhellO/DAS_Sistemas | Ene-Jun-2021/guerrero-lopez-cristian-edgardo/PARCIAL 1 PRACTICA 1/Practica 1.py | 1 | 1520 | class Automovil:
def __init__(self, Vel, Kilometraje_Marcado, Cupo_Max):
self.Vel = Vel
self.Kilometraje_Marcado = Kilometraje_Marcado
self.Cupo_Max = Cupo_Max
def Costo(self, Cupo_Max):
Costo = self.Cupo_Max*100
return Costo
def Precio(self, Cupo_Max):
Precio = self.Cupo_Max*100
Precio1 = (Precio*.1) + Precio
return Precio1
random=(50)
def __str__(self):
return f"AUTOMOVIL CON VELOCIDAD MAXIMA DE: {self.Vel}\n Kilometraje_Marcado MARCADO: {self.Kilometraje_Marcado}\n Cupo_Max TOTAL: {self.Cupo_Max}\n "
class Camion(Automovil):
def __init__(self, Vel, Kilometraje_Marcado, Cupo_Max):
Automovil.__init__(self, Vel, Kilometraje_Marcado, Cupo_Max)
def __str__(self):
return f"CAMION CON VELOCIDAD MAXIMA DE: {self.Vel}\n Kilometraje_Marcado MARCADO: {self.Kilometraje_Marcado}\n Cupo_Max TOTAL: {self.Cupo_Max}\n"
if __name__ == "__main__":
Camion1=Camion(300,100000,45)
Auto1=Automovil(150,3251212,4)
Camion2=Camion(400,60000,50)
Auto2=Automovil(100,5160,8)
Lista_Chida = [Camion1,Auto1,Camion2,Auto2]
for z in Lista_Chida:
if isinstance(z, Camion):
m = z.Precio(z.Cupo_Max)
print(f"{z} EL TOTAL EN ESTA OCACION ES DE: {m} ")
elif isinstance(z, Automovil):
n = z.Costo(z.Cupo_Max)
print(f"{z} EL TOTAL EN ESTA OCACION ES DE: {n} ") | mit | -3,127,128,528,588,065,300 | 34.372093 | 157 | 0.589474 | false | 2.593857 | false | false | false |
Nablaquabla/sns-analysis | run-am-analysis-v4.py | 1 | 3313 | import os
import time as tm
import sys
# Handles the creation of condor files for a given set of directories
# -----------------------------------------------------------------------------
def createCondorFile(dataDir,outDir,run,day,times):
# Condor submission file name convention: run-day-time.condor
with open('/home/bjs66/CondorFiles/%s-%s.condor'%(run,day),'w') as f:
# Fixed program location'
f.write('Executable = /home/bjs66/GitHub/sns-analysis/sns-analysis-v4\n')
# Arguments passed to the exe:
# Set main run directory, e.g. Run-15-10-02-27-32-23/151002
# Set current time to be analzyed (w/o .zip extension!), e.g. 184502
# Set output directory, eg Output/ Run-15-10-02-27-32-23/151002
f.write('Arguments = \"2 %s $(Process) %s 0\"\n'%(dataDir,outDir))
# Standard cluster universe
f.write('universe = vanilla\n')
f.write('getenv = true\n')
# Program needs at least 300 MB of free memory to hold unzipped data
f.write('request_memory = 300\n')
# Output, error and log name convention: run-day-time.log/out/err
f.write('log = ../../Logs/%s-%s-$(Process).log\n'%(run,day))
f.write('Output = ../../Outs/%s-%s-$(Process).out\n'%(run,day))
f.write('Error = ../../Errs/%s-%s-$(Process).err\n'%(run,day))
# Do not write any emails
f.write('notification = never\n')
f.write('+Department = Physics\n')
f.write('should_transfer_files = NO\n')
# Add single job to queue
f.write('Queue %i'%times)
# Main function handling all internals
# -----------------------------------------------------------------------------
def main(r):
# Choose main directory, i.e. ~/csi/beam_on_data/Run-15-06-25-xyz/
mainRunDir = '/var/phy/project/phil/grayson/COHERENT/CsI/'
# Choose output directory, i.e. ~/output/Run-15-06-25-xyz/
mainOutDir = '/var/phy/project/phil/grayson/COHERENT/CsI/bjs-analysis/'
# Choose run to analyze
run = 'Position-%s'%r
subdirs = {}
subdirs[run] = 'am_calibration_1350v'
days_in = {}
days_in[run] = ['150617']
# Iterate through all days in a given run folder, create a condor file and run it.
for day in days_in[run]:
# Prepare paths for further processing
dataRunDir = mainRunDir + '%s/%s/%s'%(subdirs[run],run,day)
outDir = mainOutDir + '%s/%s'%(run,day)
# Create output directory if it does not exist
if not os.path.exists(outDir):
os.makedirs(outDir)
# Get all times within the day folder chosen and prepare condor submit files
tList = [x.split('.')[0] for x in os.listdir(dataRunDir)]
createCondorFile(dataRunDir,outDir,run,day,len(tList))
# createCondorFile(dataRunDir,outDir,run,day,2)
cmd = 'condor_submit /home/bjs66/CondorFiles/%s-%s.condor'%(run,day)
os.system(cmd)
tm.sleep(1)
if __name__ == '__main__':
main(sys.argv[1])
| gpl-3.0 | -7,294,928,957,756,611,000 | 31.165049 | 102 | 0.532146 | false | 3.535752 | false | false | false |
PeytonXu/learn-python | cases/henghan_oa_checkin_checkout/test_fun.py | 1 | 1511 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging.handlers
import configparser
import re
import time
handler = logging.handlers.TimedRotatingFileHandler(filename="test", when='s', interval=2, backupCount=5,
encoding='UTF-8')
handler.suffix = '%Y-%m-%d-%H-%M-%S.log'
handler.extMatch = re.compile(r'^\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}.log$')
formatter = logging.Formatter("%(asctime)s %(message)s")
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
handler2 = logging.handlers.RotatingFileHandler(filename='test.log', maxBytes=1024, backupCount= 3)
handler2.setFormatter(formatter)
# root_logger.removeHandler(handler)
root_logger.addHandler(handler2)
def test():
for i in range(100):
root_logger.info("test" + str(i))
# time.sleep(1)
def test_config():
conf = configparser.ConfigParser()
conf.read('config.ini', encoding='utf-8')
name = conf.get('login', 'name')
passwd = conf.get('login', 'password')
if name == 'name' and passwd == 'password':
name = input("Please input your login name: ")
passwd = input("Please input your login password: ")
conf.set('login', 'name', name)
conf.set('login', 'password', passwd)
with open('config.ini', 'w', encoding='utf-8') as f:
conf.write(f)
print(name)
print(passwd)
if __name__ == '__main__':
test_config()
| mit | 6,081,286,916,630,466,000 | 30.479167 | 105 | 0.632694 | false | 3.410835 | true | false | false |
smlbiobot/SML-Cogs | royalerant/royalerant.py | 1 | 6221 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import re
from collections import defaultdict
import aiohttp
import discord
import peony
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from peony.exceptions import PeonyException
PATH = os.path.join("data", "royalerant")
JSON = os.path.join(PATH, "settings.json")
ROLES = ['Member', 'Guest', 'SUPERMOD', 'MOD', 'Patron', 'Wrapper', 'Showcase', 'Collaborator']
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class RoyaleRant:
"""RoyaleRant Twitter client.
User type !royalerant message which gets broadcasted to @RoyaleRant
"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
if self.settings.get("twitter_api") is None:
self.settings["twitter_api"] = {
"consumer_key": '12345',
"consumer_secret": '12345',
"access_token": '12345',
"access_token_secret": '12345'
}
dataIO.save_json(JSON, self.settings)
def peony_client(self, **kwargs):
"""Return Twitter API instance."""
return peony.PeonyClient(**self.settings['twitter_api'], **kwargs)
@commands.group(pass_context=True)
async def royalerantset(self, ctx):
"""Settings."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.is_owner()
@royalerantset.command(name="twitterapi", pass_context=True)
async def royalerantset_twitterapi(self,
ctx, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""Twitter API settings"""
if not any([consumer_key, consumer_secret, access_token, access_token_secret]):
await self.bot.send_cmd_help(ctx)
em = discord.Embed(title="RoyaleRant Settings")
for k, v in self.settings['twitter_api'].items():
em.add_field(name=k, value=v)
await self.bot.send_message(ctx.message.author, embed=em)
return
self.settings.update({
"twitter_api": {
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"access_token": access_token,
"access_token_secret": access_token_secret
}
})
dataIO.save_json(JSON, self.settings)
await self.bot.say("Settings updated")
await self.bot.delete_message(ctx.message)
@commands.has_any_role(*ROLES)
@commands.command(aliases=['rrant'], pass_context=True, no_pm=True)
async def royalerant(self, ctx, *, msg):
"""Post a Tweet from @RoyaleRant."""
clean_content = ctx.message.clean_content
msg = clean_content[clean_content.index(' '):]
async with aiohttp.ClientSession() as session:
client = self.peony_client(session=session)
author = ctx.message.author
author_initials = "".join(re.findall("[a-zA-Z0-9]+", author.display_name))[:2]
attachment_urls = [attachment['url'] for attachment in ctx.message.attachments]
try:
media_ids = []
if len(attachment_urls):
for url in attachment_urls:
media = await client.upload_media(url, chunk_size=2 ** 18, chunked=True)
media_ids.append(media.media_id)
tweet = "[{}] {}".format(author_initials, msg)
resp = await client.api.statuses.update.post(status=tweet, media_ids=media_ids)
except peony.exceptions.PeonyException as e:
await self.bot.say("Error tweeting: {}".format(e.response))
return
url = "https://twitter.com/{0[user][screen_name]}/status/{0[id_str]}".format(resp)
await self.bot.say("Tweeted: <{}>".format(url))
@commands.has_any_role(*ROLES)
@commands.command(aliases=['rrantrt'], pass_context=True, no_pm=True)
async def royalerant_retweet(self, ctx, arg):
"""Retweet by original tweet URL or status ID."""
client = self.peony_client()
status_id = arg
if arg.startswith('http'):
status_id = re.findall("[0-9]+$", arg)[0]
try:
resp = await client.api.statuses.retweet.post(id=status_id)
except PeonyException as e:
await self.bot.say("Error tweeting: {}".format(e.response))
return
url = "https://twitter.com/{0[user][screen_name]}/status/{0[id_str]}".format(resp)
await self.bot.say("Tweeted: <{}>".format(url))
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = RoyaleRant(bot)
bot.add_cog(n)
| mit | -1,134,916,283,987,317,100 | 35.810651 | 96 | 0.620961 | false | 3.825953 | false | false | false |
rupak0577/ginga | ginga/misc/plugins/SaveImage.py | 1 | 17618 | """Save output images local plugin for Ginga."""
from __future__ import absolute_import, division, print_function
from ginga.util.six import itervalues
from ginga.util.six.moves import map
# STDLIB
import os
import shutil
# THIRD-PARTY
from astropy.io import fits
# GINGA
from ginga.GingaPlugin import GlobalPlugin
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga.util.iohelper import shorten_name
try:
from ginga.gw.GwHelp import DirectorySelection
except ImportError: # This is needed for RTD to build
pass
__all__ = []
class SaveImage(GlobalPlugin):
"""Save images to output files.
"""
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(SaveImage, self).__init__(fv)
# Image listing
self.columns = [('Image', 'IMAGE'), ('Mod. Ext.', 'MODEXT')]
# User preferences. Some are just default values and can also be
# changed by GUI.
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_SaveImage')
self.settings.addDefaults(output_directory = '.',
output_suffix = 'ginga',
include_chname = True,
clobber = False,
modified_only = True,
max_mosaic_size = 1e8,
max_rows_for_col_resize = 5000)
self.settings.load(onError='silent')
self.outdir = os.path.abspath(
self.settings.get('output_directory', '.'))
self.suffix = self.settings.get('output_suffix', 'ginga')
self.fv.add_callback('add-image', lambda *args: self.redo())
self.fv.add_callback('remove-image', lambda *args: self.redo())
self.fv.add_callback('add-channel',
lambda *args: self.update_channels())
self.fv.add_callback('delete-channel',
lambda *args: self.update_channels())
self.chnames = []
self.chname = None
self.gui_up = False
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
msgFont = self.fv.getFont('sansFont', 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msgFont)
self.tw = tw
fr = Widgets.Expander('Instructions')
fr.set_widget(tw)
container.add_widget(fr, stretch=0)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels()
def instructions(self):
self.tw.set_text("""Enter output directory and suffix, if different than default. Left click to select image name to save. Multiple images can be selected using click with Shift or CTRL key. Click Save to save the selected image(s).
Output image will have the filename of <inputname>_<suffix>.fits.""")
def redo(self, *args):
"""Generate listing of images that user can save."""
if not self.gui_up:
return
mod_only = self.w.modified_only.get_state()
treedict = Bunch.caselessDict()
self.treeview.clear()
self.w.status.set_text('')
channel = self.fv.get_channelInfo(self.chname)
if channel is None:
return
# Only list modified images for saving. Scanning Datasrc is enough.
if mod_only:
all_keys = channel.datasrc.keys(sort='alpha')
# List all images in the channel.
else:
all_keys = channel.get_image_names()
# Extract info for listing and saving
for key in all_keys:
iminfo = channel.get_image_info(key)
path = iminfo.get('path')
idx = iminfo.get('idx')
t = iminfo.get('time_modified')
if path is None: # Special handling for generated buffer, eg mosaic
infile = key
is_fits = True
else:
infile = os.path.basename(path)
infile_ext = os.path.splitext(path)[1]
infile_ext = infile_ext.lower()
is_fits = False
if 'fit' in infile_ext:
is_fits = True
# Only list FITS files unless it is Ginga generated buffer
if not is_fits:
continue
# Only list modified buffers
if mod_only and t is None:
continue
# More than one ext modified, append to existing entry
if infile in treedict:
if t is not None:
treedict[infile].extlist.add(idx)
elist = sorted(treedict[infile].extlist)
treedict[infile].MODEXT = ';'.join(
map(self._format_extname, elist))
# Add new entry
else:
if t is None:
s = ''
extlist = set()
else:
s = self._format_extname(idx)
extlist = set([idx])
treedict[infile] = Bunch.Bunch(
IMAGE=infile, MODEXT=s, extlist=extlist, path=path)
self.treeview.set_tree(treedict)
# Resize column widths
n_rows = len(treedict)
if n_rows == 0:
self.w.status.set_text('Nothing available for saving')
elif n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.treeview.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
def update_channels(self):
"""Update the GUI to reflect channels and image listing.
"""
if not self.gui_up:
return
self.logger.debug("channel configuration has changed--updating gui")
try:
channel = self.fv.get_channelInfo(self.chname)
except KeyError:
channel = self.fv.get_channelInfo()
if channel is None:
raise ValueError('No channel available')
self.chname = channel.name
w = self.w.channel_name
w.clear()
self.chnames = list(self.fv.get_channelNames())
#self.chnames.sort()
for chname in self.chnames:
w.append_text(chname)
# select the channel that is the current one
try:
i = self.chnames.index(channel.name)
except IndexError:
i = 0
self.w.channel_name.set_index(i)
# update the image listing
self.redo()
def select_channel_cb(self, w, idx):
self.chname = self.chnames[idx]
self.logger.debug("channel name changed to '%s'" % (self.chname))
self.redo()
def _format_extname(self, ext):
"""Pretty print given extension name and number tuple."""
if ext is None:
outs = ext
else:
outs = '{0},{1}'.format(ext[0], ext[1])
return outs
def browse_outdir(self):
"""Browse for output directory."""
self.dirsel.popup(
'Select directory', self.w.outdir.set_text, initialdir=self.outdir)
self.set_outdir()
def set_outdir(self):
"""Set output directory."""
dirname = self.w.outdir.get_text()
if os.path.isdir(dirname):
self.outdir = dirname
self.logger.debug('Output directory set to {0}'.format(self.outdir))
else:
self.w.outdir.set_text(self.outdir)
self.logger.error('{0} is not a directory'.format(dirname))
def set_suffix(self):
"""Set output suffix."""
self.suffix = self.w.suffix.get_text()
self.logger.debug('Output suffix set to {0}'.format(self.suffix))
def _write_history(self, pfx, hdu, linechar=60, indentchar=2):
"""Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
"""
channel = self.fv.get_channelInfo(self.chname)
if channel is None:
return
history_plgname = 'ChangeHistory'
try:
history_obj = self.fv.gpmon.getPlugin(history_plgname)
except:
self.logger.error(
'{0} plugin is not loaded. No HISTORY will be written to '
'{1}.'.format(history_plgname, pfx))
return
if channel.name not in history_obj.name_dict:
self.logger.error(
'{0} channel not found in {1}. No HISTORY will be written to '
'{2}.'.format(channel.name, history_plgname, pfx))
return
file_dict = history_obj.name_dict[channel.name]
chistory = []
ind = ' ' * indentchar
# NOTE: List comprehension too slow!
for key in file_dict:
if not key.startswith(pfx):
continue
for bnch in itervalues(file_dict[key]):
chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP))
# Add each HISTORY prettily into header, sorted by timestamp
for s in sorted(chistory):
for i in range(0, len(s), linechar):
subs = s[i:i+linechar]
if i > 0:
subs = ind + subs.lstrip()
hdu.header.add_history(subs)
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channelInfo(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
hdu.writeto(outfile, clobber=True)
def _write_mef(self, key, extlist, outfile):
"""Write out regular multi-extension FITS data."""
channel = self.fv.get_channelInfo(self.chname)
with fits.open(outfile, mode='update') as pf:
# Process each modified data extension
for idx in extlist:
k = '{0}[{1}]'.format(key, self._format_extname(idx))
image = channel.datasrc[k]
# Insert data and header into output HDU
pf[idx].data = image.get_data()
self._write_header(image, pf[idx])
# Write history to PRIMARY
self._write_history(key, pf['PRIMARY'])
def toggle_save_cb(self, w, res_dict):
"""Only enable saving if something is selected."""
if len(res_dict) > 0:
self.w.save.set_enabled(True)
else:
self.w.save.set_enabled(False)
def save_images(self):
"""Save selected images.
This uses Astropy FITS package to save the outputs no matter
what user chose to load the images.
"""
res_dict = self.treeview.get_selected()
clobber = self.settings.get('clobber', False)
self.treeview.clear_selection() # Automatically disables Save button
# If user gives empty string, no suffix.
if self.suffix:
sfx = '_' + self.suffix
else:
sfx = ''
# Also include channel name in suffix. This is useful if user likes to
# open the same image in multiple channels.
if self.settings.get('include_chname', True):
sfx += '_' + self.chname
# Process each selected file. Each can have multiple edited extensions.
for infile in res_dict:
f_pfx = os.path.splitext(infile)[0] # prefix
f_ext = '.fits' # Only FITS supported
oname = f_pfx + sfx + f_ext
outfile = os.path.join(self.outdir, oname)
self.w.status.set_text(
'Writing out {0} to {1} ...'.format(shorten_name(infile, 10),
shorten_name(oname, 10)))
self.logger.debug(
'Writing out {0} to {1} ...'.format(infile, oname))
if os.path.exists(outfile) and not clobber:
self.logger.error('{0} already exists'.format(outfile))
continue
bnch = res_dict[infile]
if bnch.path is None or not os.path.isfile(bnch.path):
self._write_mosaic(f_pfx, outfile)
else:
shutil.copyfile(bnch.path, outfile)
self._write_mef(f_pfx, bnch.extlist, outfile)
self.logger.info('{0} written'.format(outfile))
self.w.status.set_text('Saving done, see log')
def close(self):
self.fv.stop_global_plugin(str(self))
def start(self):
self.instructions()
self.resume()
def resume(self):
# turn off any mode user may be in
try:
self.modes_off()
except AttributeError:
pass
self.fv.showStatus('See instructions')
def stop(self):
self.gui_up = False
self.fv.showStatus('')
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'saveimage'
# Replace module docstring with config doc for auto insert by Sphinx.
# In the future, if we need the real docstring, we can append instead of
# overwrite.
from ginga.util.toolbox import generate_cfg_example
__doc__ = generate_cfg_example('plugin_SaveImage', package='ginga')
| bsd-3-clause | -5,133,613,292,894,115,000 | 33.887129 | 240 | 0.566353 | false | 3.972492 | false | false | false |
MatthieuDartiailh/eapii | eapii/core/iprops/proxies.py | 1 | 4998 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright 2014 by Eapii Authors, see AUTHORS for more details.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENCE, distributed with this software.
#------------------------------------------------------------------------------
"""Proxies used to provide per instance variation of the IProperty behaviour.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from types import MethodType, FunctionType
from weakref import WeakKeyDictionary
from .i_property import get_chain, set_chain
class _ProxyManager(object):
"""Manager caching the custom class used for proxying the different types
of IProperty.
This class is not meant to be instantiated by user code.
"""
def __init__(self):
super(_ProxyManager, self).__init__()
self._proxy_cache = {}
def make_proxy(self, iprop, instance, kwargs):
"""Build a proxy for the given iprop.
For each type of IProperty a new mixin Proxy type is created by mixing
the IPropProxy class and the iprop class. This class is then cached and
used to build to create the proxy instance.
Parameters
----------
iprop : IProperty
Instance whose behaviour should be altered by the use of a proxy.
instance : HasIProps
Object for which the IProperty should have a peculiar behaviour.
attrs : dict
Dict containing the attributes whose values should be overriden.
"""
iprop_class = type(iprop)
if iprop_class not in self._proxy_cache:
# Python 2 compatibility cast
proxy = type(str(iprop_class.__name__+'Proxy'),
(IPropertyProxy, iprop_class), {})
self._proxy_cache[iprop_class] = proxy
return self._proxy_cache[iprop_class](iprop, instance, kwargs)
make_proxy = _ProxyManager().make_proxy
"""Build a proxy for the given iprop.
This used the singleton _ProxyManager instance to handle the caching of the
proxy classes.
"""
class IPropertyProxy(object):
"""Generic proxy for IProperty, used to get per HasIProps instance
behaviour.
Parameters
----------
iprop : IProperty
Instance whose behaviour should be altered by the use of a proxy.
instance : HasIProps
Object for which the IProperty should have a peculiar behaviour.
attrs : dict
Dict containing the attributes whose values should be overriden.
"""
def __init__(self, iprop, instance, attrs):
self._iprop = iprop
# This is created now to avoid creating lots of those for nothing.
if not iprop._proxies:
iprop._proxies = WeakKeyDictionary()
# First get all the instance attr of the IProperty to preserve the
# special behaviours imparted by the HasIProps object.
aux = iprop.__dict__.copy()
aux.update(attrs)
self.patch(aux)
iprop._proxies[instance] = self
def patch(self, attrs):
"""Update the proxy with new values.
Parameters
----------
attrs : dict
New values to give to the proxy attributes.
"""
for k, v in attrs.items():
# Make sure the instance method are correctly redirected to the
# proxy and the functions are bound to the proxy.
if isinstance(v, MethodType):
v = MethodType(v.__func__, self)
elif isinstance(v, FunctionType):
v = MethodType(v, self)
setattr(self, k, v)
def unpatch(self, attrs):
"""Reverse the proxy behaviour to the original IProperty behaviour.
Parameters
----------
attrs : iterable
Names of the attrs whose values should match again the one of the
IProperty.
"""
i_dir = self._iprop.__dict__
for attr in attrs:
if attr in i_dir:
v = i_dir[attr]
if isinstance(v, MethodType):
v = MethodType(v.__func__, self)
setattr(self, attr, getattr(self._iprop, attr))
else:
delattr(self, attr)
@property
def obsolete(self):
"""Boolean indicating whether the proxy differ from the original.
"""
ip_dict = self._iprop.__dict__
test_meth = MethodType(lambda: None, object())
for k, v in self.__dict__.items():
if isinstance(v, MethodType):
if v.__func__ != ip_dict.get(k, test_meth).__func__:
return False
elif k not in ('_iprop', 'instance'):
if k not in ip_dict or v != ip_dict[k]:
return False
return True
proxy_get = get_chain
proxy_set = set_chain
| bsd-3-clause | -3,187,524,225,825,217,500 | 31.666667 | 79 | 0.576631 | false | 4.606452 | false | false | false |
victronenergy/dbus-fronius | test/src/fronius_sim/app.py | 1 | 5868 | #!/usr/bin/python -u
import datetime
import modbus_tcp_sim
import os
import sys
from twisted.internet import reactor
from fronius_sim import FroniusSim
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
bottle_dir = os.path.normpath(os.path.join(app_dir, '..', '..', 'software', 'ext', 'bottle'))
sys.path.extend([bottle_dir, app_dir])
import bottle
application = bottle.default_app()
inverters = [
FroniusSim(id='1', device_type=232, unique_id='1234', custom_name='SouthWest', has_3phases=True, modbus_enabled=False),
FroniusSim(id='2', device_type=224, unique_id='4321', custom_name='', has_3phases=False, modbus_enabled=False),
FroniusSim(id='3', device_type=208, unique_id='1111', custom_name='Tmp', has_3phases=False, modbus_enabled=True)
]
sma_inverter = FroniusSim(id='126', device_type=None, unique_id='10988912', custom_name='SMA', has_3phases=False, modbus_enabled=True)
@bottle.route('/solar_api/GetAPIVersion.cgi')
def get_api_version():
return dict(APIVersion=1, BaseUrl='solar_api/v1/')
@bottle.route('/solar_api/v1/GetInverterInfo.cgi')
def get_inverter_info():
return {
'Head': create_head({}),
'Body': {
'Data': dict((x.id, {
'DT': x.device_type,
'PVPower': 5000,
'Show': 1,
'UniqueID': x.unique_id,
'ErrorCode': 0,
'StatusCode': 7,
'CustomName': x.custom_name })
for x in inverters)}}
@bottle.route('/solar_api/v1/GetInverterRealtimeData.cgi')
def get_inverter_realtime_data():
scope = bottle.request.query.Scope
device_id = bottle.request.query.DeviceId
data_collection = bottle.request.query.DataCollection
if scope == 'Device':
try:
inverter = next((i for i in inverters if i.id == device_id))
except StopIteration:
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection},
error_code=1,
error_message='device not found')}
if data_collection == 'CumulationInverterData':
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'PAC': {'Value': 3373, 'Unit': 'W'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': 45000, 'Unit': 'Wh'},
'DeviceStatus': {
'StatusCode': 7,
'MgmtTimerRemainingTime': -1,
'ErrorCode': 0,
'LEDCode': 0,
'LEDColor': 2,
'LEDState': 0,
'StateToReset': False}}}}
if data_collection == 'CommonInverterData':
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'PAC': {'Value': inverter.main.power, 'Unit': 'W'},
'SAC': {'Value': 3413, 'Unit': 'VA'},
'IAC': {'Value': inverter.main.current, 'Unit': 'Hz'},
'UAC': {'Value': inverter.main.voltage, 'Unit': 'V'},
'FAC': {'Value': 50, 'Unit': 'Hz'},
'IDC': {'Value': 8.2, 'Unit': 'A'},
'UDC': {'Value': 426, 'Unit': 'V'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': inverter.main.energy, 'Unit': 'Wh'},
'DeviceStatus': {
'StatusCode': 7,
'MgmtTimerRemainingTime': -1,
'ErrorCode': 0,
'LEDCode': 0,
'LEDColor': 2,
'LEDState': 0,
'StateToReset': False}}}}
if data_collection == '3PInverterData':
if not inverter.has_3phases:
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection},
error_code=2,
error_message='not supported')}
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'IAC_L1': {'Value': inverter.l1.current, 'Unit': 'A'},
'IAC_L2': {'Value': inverter.l2.current, 'Unit': 'A'},
'IAC_L3': {'Value': inverter.l3.current, 'Unit': 'A'},
'UAC_L1': {'Value': inverter.l1.voltage, 'Unit': 'V'},
'UAC_L2': {'Value': inverter.l2.voltage, 'Unit': 'V'},
'UAC_L3': {'Value': inverter.l3.voltage, 'Unit': 'V'},
'T_AMBIENT': {'Value': 27, 'Unit': 'V'},
'ROTATION_SPEED_FAN_FL': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_FR': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_BL': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_BR': {'Value': 83, 'Unit': 'RPM'}}}}
elif scope == 'System':
return {
'Head': create_head({'Scope': scope}),
'Body': {
'Data': {
'PAC': {'Value': 3373, 'Unit': 'W'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': 45000, 'Unit': 'Wh'}}}}
else:
raise Exception('Unknown scope')
def create_head(args, error_code=0, error_message=''):
return {
'RequestArguments': args,
'Status': {
"Code": error_code,
"Reason": error_message,
"UserMessage": ""},
'Timestamp': datetime.datetime.now().isoformat()}
class TwistedServer(bottle.ServerAdapter):
def start(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool(minthreads=0, maxthreads=1)
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
# reactor.run()
if __name__ == '__main__':
# host='0.0.0.0': accept connections from all sources
server = TwistedServer(host='0.0.0.0', port=8080, debug=True)
server.start(application)
modbus_tcp_sim.start_server(inverters + [sma_inverter])
reactor.run()
| mit | -4,941,800,377,040,512,000 | 31.966292 | 134 | 0.605658 | false | 2.794286 | false | false | false |
cloud9ers/j25framework | j25/loaders/ControllerLoader.py | 1 | 2345 | from j25.web import Controller
import inspect
import logging
import pkgutil
import traceback
logger = logging.getLogger("ControllerLoader")
class AutoControllerLoader(object):
@classmethod
def load(cls, app_name, router, dispatcher, package_or_packages):
if not isinstance(package_or_packages, list):
package_or_packages = [package_or_packages]
total = 0
logger.debug("Scanning package(s) %s for controllers.", str(package_or_packages))
controllers = {}
for base_package in package_or_packages:
for _, modname, ispkg in pkgutil.iter_modules(base_package.__path__):
if ispkg == False:
module = __import__(base_package.__name__ + "." + modname, fromlist="t")
for class_name in dir(module):
klass = getattr(module, class_name)
if inspect.isclass(klass):
if klass is Controller:
continue
if not issubclass(klass, Controller):
logger.debug("Class %s was found in '%s' package but is not a subclass of j25.web.Controller -- ignoring...", klass.__name__, base_package.__path__)
continue
# load it
try:
# dispatcher.registerServiceFactory(klass.PATH, klass.BASE_SERVICE.createFactory(klass.NAME, config, klass))
controllers[klass.__name__] = klass
logger.debug("Controller %s is loaded.", klass.__name__)
total += 1
except:
logger.error("Failed to load controller %s:%s", klass.__name__, traceback.format_exc())
if controllers:
# app_package = importlib.import_module(app_name)
app_package = __import__(app_name, fromlist="t")
if not dispatcher.register_app(app_package, controllers, router):
logger.error("Couldn't register application %s", app_name)
return 0
if total > 0:
logger.info("%s controller(s) are/is loaded successfully from app (%s)", total, app_name)
return total | lgpl-3.0 | -2,055,934,310,574,117,400 | 51.133333 | 180 | 0.53049 | false | 5 | false | false | false |
nejstastnejsistene/gardendb | gardendb/postgres.py | 1 | 5218 | import binascii
import threading
import psycopg2
from psycopg2.extensions import new_type, register_type
try:
import cPickles as pickle
except ImportError:
import pickle
from . import BaseGarden
def adapt_bytea(obj):
'''Adapt an object to a bytea by pickling.'''
if isinstance(obj, str):
# Convert strings to unicodes when possible
try:
obj = unicode(obj)
except UnicodeDecodeError:
pass
p = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
return psycopg2.Binary(p)
def cast_bytea(value, cur):
'''Convert a bytea to a python value by unpickling.'''
# Decode the bytea using the original typecast object.
value = psycopg2.BINARY(value, cur)
try:
return pickle.loads(value)
except pickle.UnpicklingError:
mesg = 'unable to unpickle buffer: {!r}'.format(value)
raise psycopg2.InterfaceError(mesg)
# Register cast_bytea with psycopg2.
PICKLE = new_type(psycopg2.BINARY.values, 'PICKLE', cast_bytea)
register_type(PICKLE)
def dummy_pool(conn):
class DummyPool(object):
def getconn(self):
return conn
def putconn(self, conn):
pass
return DummyPool()
class PgGarden(BaseGarden):
table_def_fmt = '''
CREATE TABLE {name}
( key bytea NOT NULL UNIQUE
, value bytea NOT NULL
, mtime timestamp NOT NULL DEFAULT localtimestamp
)
'''
replace_def_fmt = '''
CREATE RULE "replace_{name}" AS
ON INSERT TO "{name}"
WHERE
EXISTS(SELECT 1 FROM {name} WHERE key=NEW.key)
DO INSTEAD
UPDATE {name}
SET value = NEW.value, mtime = localtimestamp
WHERE key = NEW.key
'''
select_all_cmd_fmt = 'SELECT key, value FROM {name}'
select_cmd_fmt = 'SELECT value FROM {name} WHERE key = %s'
insert_cmd_fmt = 'INSERT INTO {name} (key, value) VALUES '
delete_cmd_fmt = 'DELETE FROM {name} WHERE key = %s'
def __init__(self, name, pool, cls=None):
BaseGarden.__init__(self, cls)
self.name = name
self.pool = pool
# Format the various sql commands that we use.
for name, value in PgGarden.__dict__.items():
if name.endswith('_fmt'):
setattr(self, name[:-4], value.format(name=self.name))
conn = self.pool.getconn()
# Create the table and replacement rule if not already defined.
with conn.cursor() as cur:
cur.execute('''
SELECT 1 FROM pg_tables WHERE tablename = '{name}'
'''.format(name=self.name))
if not cur.fetchone():
cur.execute(self.table_def)
cur.execute('''
SELECT 1 FROM pg_rules WHERE rulename = 'replace_{name}'
'''.format(name=self.name))
if not cur.fetchone():
cur.execute(self.replace_def)
conn.commit()
self.pool.putconn(conn)
def lock(garden, key, default=None):
lock = threading.Lock()
class Ctx(object):
def __enter__(self):
lock.acquire()
self.value = garden.get(key, default)
return self
def __exit__(self, *args):
garden[key] = self.value
lock.release()
return Ctx()
def getall(self):
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.select_all_cmd)
pairs = cur.fetchall()
self.pool.putconn(conn)
return {k: self.unpack_state(v) for k, v in pairs}
def putmany(self, dct):
'''Place/replace many cucumbers into the Garden.'''
if not dct:
# Silently ignore requests to put nothing.
return
# Pack values.
dct = {k: self.pack_state(v) for k, v in dct.items()}
# Calculate the SQL command format.
cmd = self.insert_cmd + ', '.join(['(%s, %s)'] * len(dct))
# Generate the SQL parameters.
args = []
for pair in dct.items():
args += map(adapt_bytea, pair)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(cmd, args)
conn.commit()
self.pool.putconn(conn)
def __getitem__(self, key):
'''Retrieve a cucumber from the Garden.'''
_key = adapt_bytea(key)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.select_cmd, (_key,))
value = cur.fetchone()
self.pool.putconn(conn)
if value is None:
raise KeyError, key
return self.unpack_state(value[0])
def __setitem__(self, key, value):
'''Place/replace a cucumber into the Garden.'''
self.putmany({key: value})
def __delitem__(self, key):
'''Delete a cucumber from the Garden.
If the key does not exist, no exception is raised.'
'''
key = adapt_bytea(key)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.delete_cmd, (key,))
conn.commit()
self.pool.putconn(conn)
| bsd-3-clause | 2,349,762,603,324,349,000 | 28.150838 | 72 | 0.561518 | false | 3.873794 | false | false | false |
zhaipro/AlphaLineupPuzzle | AlphaLineupPuzzle/models/__init__.py | 1 | 1118 | # coding: utf-8
import numpy as np
import chainer.links as L
import chainer.functions as F
from chainer import serializers, Variable
import policy
from AlphaLineupPuzzle.preprocessing import preprocessing
def load_policy_network(name):
in_dim = preprocessing.state_to_tensor.features
out_dim = preprocessing.action_to_tensor.features
model = L.Classifier(policy.Policy(in_dim, out_dim))
serializers.load_npz('%s.model.npz' % name, model)
def policy_network(gs):
state = preprocessing.state_to_tensor(gs)
Y = model.predictor([state]).data[0]
actions = []
for idx, pos in gs.legal_moves():
action = preprocessing.action_to_tensor(gs, idx, pos, gs.size)
actions.append(action)
# 确保即使actions为空列表,也要构造一个int型的空np数组
actions = np.array(actions, dtype=np.int32)
Y = Y[actions]
Y = Y.reshape((1, Y.size))
Y = Variable(Y)
P = F.softmax(Y).data[0]
for idx, pos in enumerate(gs.legal_moves()):
yield pos, P[idx]
return policy_network
| mit | 446,345,322,601,292,350 | 28.135135 | 74 | 0.648423 | false | 3.358255 | false | false | false |
estaban/pyload | module/plugins/hoster/YoupornCom.py | 1 | 1531 | # -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
class YoupornCom(Hoster):
__name__ = "YoupornCom"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?youporn\.com/watch/.+'
__version__ = "0.2"
__description__ = """Youporn.com hoster plugin"""
__author_name__ = "willnix"
__author_mail__ = "[email protected]"
def process(self, pyfile):
self.pyfile = pyfile
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.html = self.load(url, post={"user_choice": "Enter"}, cookies=False)
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if not self.html:
self.download_html()
return re.search(r'(http://download\.youporn\.com/download/\d+\?save=1)">', self.html).group(1)
def get_file_name(self):
if not self.html:
self.download_html()
file_name_pattern = r"<title>(.*) - Free Porn Videos - YouPorn</title>"
return re.search(file_name_pattern, self.html).group(1).replace("&", "&").replace("/", "") + '.flv'
def file_exists(self):
""" returns True or False
"""
if not self.html:
self.download_html()
if re.search(r"(.*invalid video_id.*)", self.html) is not None:
return False
else:
return True
| gpl-3.0 | 1,178,821,542,456,942,600 | 28.442308 | 111 | 0.556499 | false | 3.448198 | false | false | false |
ExCiteS/geokey-airquality | geokey_airquality/serializers.py | 1 | 10302 | """All serializers for the extension."""
import json
from django.core.exceptions import ValidationError
from django.contrib.gis.geos import Point
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from rest_framework.serializers import BaseSerializer
from geokey_airquality.models import AirQualityLocation, AirQualityMeasurement
class LocationSerializer(BaseSerializer):
"""
Serialiser for geokey_airquality.models.AirQualityLocation.
"""
def is_valid(self, raise_exception=False):
"""
Checks if location is valid.
Parameter
---------
raise_exception : Boolean
Indicates if an exeption should be raised if the data is invalid.
If set to false, the method will return False rather than raising
an exception.
Returns
-------
Boolean
Indicating if data is valid.
Raises
------
ValidationError
If data is invalid. Exception is raised only when raise_exception
is set to True.
"""
self._errors = {}
self._validated_data = {}
# Validate name
name = self.initial_data.get('name')
try:
if name is not None:
self._validated_data['name'] = name
else:
raise ValidationError('Name must be specified.')
except ValidationError, error:
self._errors['name'] = error
# Validate geometry
geometry = self.initial_data.get('geometry')
try:
if geometry.get('type') == 'Point':
coordinates = geometry.get('coordinates')
if coordinates is not None:
x = coordinates[0]
y = coordinates[1]
if x is not None and y is not None:
self._validated_data['geometry'] = Point(x, y)
else:
raise ValidationError('Coordinates are incorrect.')
else:
raise ValidationError('Coordinates are not set.')
else:
raise ValidationError('Only points can be used.')
except ValidationError, error:
self._errors['geometry'] = error
# Validate properties
properties = self.initial_data.get('properties') or {}
self._validated_data['properties'] = {}
if properties is not None:
for key, value in properties.iteritems():
if key in ['height', 'distance', 'characteristics']:
self._validated_data['properties'][key] = value
# Raise the exception
if self._errors and raise_exception:
raise ValidationError(self._errors)
return not bool(self._errors)
def create(self, validated_data):
"""
Creates a new location and returns the instance.
Parameter
---------
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityLocation
The instance created.
"""
data = self.context.get('data')
created = data.get('created')
called = data.get('called')
now = timezone.now()
if created is None or called is None:
created = now
else:
timedelta = parse_datetime(called) - parse_datetime(created)
created = now - timedelta
self.instance = AirQualityLocation.objects.create(
name=validated_data.get('name'),
geometry=validated_data.get('geometry'),
creator=self.context.get('user'),
created=created,
properties=validated_data.get('properties')
)
return self.instance
def update(self, instance, validated_data):
"""
Updates an existing location and returns the instance.
Parameter
---------
instance : geokey_airquality.models.AirQualityLocation
The instance to be updated.
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityLocation
The instance updated.
"""
instance.name = validated_data.get('name')
instance.geometry = validated_data.get('geometry')
instance.properties = validated_data.get('properties')
instance.save()
return instance
def to_representation(self, object):
"""
Returns the native representation of a location.
Parameter
---------
object : geokey_airquality.models.AirQualityLocation
The instance that is serialised.
Returns
-------
dict
Native represenation of the location.
"""
measurement_serializer = MeasurementSerializer(
object.measurements.all(),
many=True,
context=self.context
)
return {
'type': 'Feature',
'geometry': json.loads(object.geometry.geojson),
'id': object.id,
'name': object.name,
'created': str(object.created),
'properties': object.properties,
'measurements': measurement_serializer.data
}
class MeasurementSerializer(BaseSerializer):
"""
Serialiser for geokey_airquality.models.AirQualityMeasurement.
"""
def is_valid(self, raise_exception=False):
"""
Checks if measurement is valid.
Parameter
---------
raise_exception : Boolean
Indicates if an exeption should be raised if the data is invalid.
If set to false, the method will return False rather than raising
an exception.
Returns
-------
Boolean
Indicating if data is valid.
Raises
------
ValidationError
If data is invalid. Exception is raised only when raise_exception
is set to True.
"""
self._errors = {}
self._validated_data = {}
# Validate barcode
barcode = self.initial_data.get('barcode')
try:
if barcode is not None:
self._validated_data['barcode'] = barcode
else:
raise ValidationError('Barcode must be specified.')
except ValidationError, error:
self._errors['barcode'] = error
# Validate properties
properties = self.initial_data.get('properties') or {}
self._validated_data['properties'] = {}
if properties is not None:
for key, value in properties.iteritems():
if key in [
'results',
'additional_details',
'made_by_students'
]:
self._validated_data['properties'][key] = value
# Raise the exception
if self._errors and raise_exception:
raise ValidationError(self._errors)
return not bool(self._errors)
def create(self, validated_data):
"""
Creates a new measurement and returns the instance.
Parameter
---------
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityMeasurement
The instance created.
"""
data = self.context.get('data')
started = data.get('started', None)
finished = data.get('finished', None)
called = data.get('called', None)
now = timezone.now()
if started is None or called is None:
started = now
else:
timedelta = parse_datetime(called) - parse_datetime(started)
started = now - timedelta
if finished is not None:
if called is None:
finished = now
else:
timedelta = parse_datetime(called) - parse_datetime(finished)
finished = now - timedelta
self.instance = AirQualityMeasurement.objects.create(
location=self.context.get('location'),
barcode=validated_data.get('barcode'),
creator=self.context.get('user'),
started=started,
finished=finished,
properties=validated_data.get('properties')
)
return self.instance
def update(self, instance, validated_data):
"""
Updates an existing measurement and returns the instance.
Parameter
---------
instance : geokey_airquality.models.AirQualityMeasurement
The instance to be updated.
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityMeasurement
The instance updated.
"""
data = self.context.get('data')
finished = data.get('finished', None)
called = data.get('called', None)
now = timezone.now()
if finished is not None:
if called is None:
finished = now
else:
timedelta = parse_datetime(called) - parse_datetime(finished)
finished = now - timedelta
instance.finished = finished
instance.barcode = validated_data.get('barcode')
instance.properties = validated_data.get('properties')
instance.save()
return instance
def to_representation(self, object):
"""
Returns the native representation of a measurement.
Parameter
---------
object : geokey_airquality.models.AirQualityMeasurement
The instance that is serialised.
Returns
-------
dict
Native represenation of the measurement.
"""
finished = object.finished or None
if finished is not None:
finished = str(finished)
return {
'id': object.id,
'barcode': object.barcode,
'started': str(object.started),
'finished': finished,
'properties': object.properties
}
| mit | 3,727,138,254,284,817,400 | 28.019718 | 78 | 0.553582 | false | 5.158738 | false | false | false |
arnau126/django-mysql | src/django_mysql/forms.py | 1 | 9665 | import json
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.text import format_lazy
from django.utils.translation import ugettext_lazy as _
from django_mysql.validators import (
ListMaxLengthValidator,
ListMinLengthValidator,
SetMaxLengthValidator,
SetMinLengthValidator,
)
class SimpleListField(forms.CharField):
default_error_messages = {
"item_n_invalid": _("Item %(nth)s in the list did not validate: "),
"no_double_commas": _("No leading, trailing, or double commas."),
}
def __init__(self, base_field, max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
super().__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(ListMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(ListMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, list):
return ",".join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = []
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(
ValidationError(
self.error_messages["no_double_commas"], code="no_double_commas"
)
)
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
error.message,
),
code="item_n_invalid",
params={"nth": i},
)
)
values.append(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
message,
),
code="item_invalid",
params={"nth": i},
)
)
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
message,
),
code="item_n_invalid",
params={"nth": i},
)
)
if errors:
raise ValidationError(errors)
class SimpleSetField(forms.CharField):
empty_values = list(validators.EMPTY_VALUES) + [set()]
default_error_messages = {
"item_invalid": _('Item "%(item)s" in the set did not validate: '),
"item_n_invalid": _("Item %(nth)s in the set did not validate: "),
"no_double_commas": _("No leading, trailing, or double commas."),
"no_duplicates": _(
"Duplicates are not supported. " "'%(item)s' appears twice or more."
),
}
def __init__(self, base_field, max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
super().__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(SetMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(SetMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, set):
return ",".join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = set()
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(
ValidationError(
self.error_messages["no_double_commas"], code="no_double_commas"
)
)
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
error.message,
),
code="item_n_invalid",
params={"nth": i},
)
)
if value in values:
errors.append(
ValidationError(
self.error_messages["no_duplicates"],
code="no_duplicates",
params={"item": item},
)
)
else:
values.add(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for item in value:
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}", self.error_messages["item_invalid"], message
),
code="item_invalid",
params={"item": item},
)
)
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for item in value:
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}", self.error_messages["item_invalid"], message
),
code="item_invalid",
params={"item": item},
)
)
if errors:
raise ValidationError(errors)
class InvalidJSONInput(str):
pass
class JSONString(str):
pass
class JSONField(forms.CharField):
default_error_messages = {"invalid": _("'%(value)s' value must be valid JSON.")}
widget = forms.Textarea
def to_python(self, value):
if self.disabled:
return value
if value in self.empty_values:
return None
elif isinstance(value, (list, dict, int, float, JSONString)):
return value
try:
converted = json.loads(value)
except ValueError:
raise forms.ValidationError(
self.error_messages["invalid"], code="invalid", params={"value": value}
)
if isinstance(converted, str):
return JSONString(converted)
else:
return converted
def bound_data(self, data, initial):
if self.disabled:
return initial
try:
return json.loads(data)
except ValueError:
return InvalidJSONInput(data)
def prepare_value(self, value):
if isinstance(value, InvalidJSONInput):
return value
return json.dumps(value)
| bsd-3-clause | -4,095,754,749,629,425,700 | 32.793706 | 88 | 0.455354 | false | 5.213053 | false | false | false |
takeflight/wagtailannotatedimage | wagtailannotatedimage/edit_handlers.py | 1 | 2726 | import json
from django import forms
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from wagtail.wagtailadmin.edit_handlers import (BaseCompositeEditHandler,
FieldPanel, widget_with_script)
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailimages.widgets import AdminImageChooser
from .forms import BaseAnnotationForm
class HiddenJsonInput(forms.HiddenInput):
def render(self, name, value, attrs=None):
if value is None or value == '{}':
value = '{}'
elif isinstance(value, dict):
value = json.dumps(value)
return super(HiddenJsonInput, self).render(name, value, attrs)
class BaseAnnotatedImagePanel(BaseCompositeEditHandler):
template = 'annotated_image.html'
js_template = 'annotated_image.js'
@classmethod
def widget_overrides(cls):
return {
cls.children[0].field_name: AdminImageChooser,
cls.children[1].field_name: HiddenJsonInput}
def __init__(self, instance=None, form=None):
super(BaseAnnotatedImagePanel, self).__init__(instance=instance,
form=form)
self.image_field = self.children[0]
self.image_field_id = self.image_field.bound_field.auto_id
self.annotations_field = self.children[1]
def render(self):
html = mark_safe(render_to_string(self.template, {
'panel': self,
'image_field_id': self.image_field_id, # Used as js container id
'image_field': self.image_field,
'annotations_field': self.annotations_field,
'annotation_form': self.annotation_form.as_p(),
'heading': self.heading,
}))
js = self.render_js_init()
return widget_with_script(html, js)
def render_js_init(self):
return mark_safe(render_to_string(self.js_template, {
'image_field_id': self.image_field_id,
}))
class AnnotatedImagePanel(object):
def __init__(self, image_field, annotations_field,
annotation_form=BaseAnnotationForm(), heading=''):
self.children = [
ImageChooserPanel(image_field), FieldPanel(annotations_field)]
self.heading = heading
self.annotation_form = annotation_form
def bind_to_model(self, model):
return type(str('_AnnotatedImagePanel'), (BaseAnnotatedImagePanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'annotation_form': self.annotation_form
})
| bsd-3-clause | -207,208,430,859,362,340 | 36.861111 | 80 | 0.628393 | false | 4.074738 | false | false | false |
ultrabug/py3status | py3status/modules/wanda_the_fish.py | 1 | 5557 | """
Display a fortune-telling, swimming fish.
Wanda has no use what-so-ever. It only takes up disk space and compilation time,
and if loaded, it also takes up precious bar space, memory, and cpu cycles.
Anybody found using it should be promptly sent for a psychiatric evaluation.
Configuration parameters:
cache_timeout: refresh interval for this module (default 0)
format: display format for this module
(default '{nomotion}[{fortune} ]{wanda}{motion}')
fortune_timeout: refresh interval for fortune (default 60)
Format placeholders:
{fortune} one of many aphorisms or vague prophecies
{wanda} name of one of the most commonly kept freshwater aquarium fish
{motion} biologically propelled motion through a liquid medium
{nomotion} opposite behavior of motion to prevent modules from shifting
Optional:
fortune-mod: the fortune cookie program from bsd games
Examples:
```
# disable motions when not in use
wanda_the_fish {
format = '[\\?if=fortune {nomotion}][{fortune} ]'
format += '{wanda}[\\?if=fortune {motion}]'
}
# no updates, no motions, yes fortunes, you click
wanda_the_fish {
format = '[{fortune} ]{wanda}'
cache_timeout = -1
}
# wanda moves, fortunes stays
wanda_the_fish {
format = '[{fortune} ]{nomotion}{wanda}{motion}'
}
# wanda is swimming too fast, slow down wanda
wanda_the_fish {
cache_timeout = 2
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'innovate, v.: To annoy people.'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
idle
[
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>3', 'color': '#ff8c00'},
]
py3status
[
{'full_text': 'py3status is so cool!'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
"""
from time import time
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 0
format = "{nomotion}[{fortune} ]{wanda}{motion}"
fortune_timeout = 60
def post_config_hook(self):
body = (
r"[\?color=orange&show <"
r"[\?color=lightblue&show º]"
r"[\?color=darkorange&show ,]))"
r"[\?color=darkorange&show ))>{}]]"
)
wanda = [body.format(fin) for fin in ("<", ">", "<", "3")]
self.wanda = [self.py3.safe_format(x) for x in wanda]
self.wanda_length = len(self.wanda)
self.index = 0
self.fortune_command = ["fortune", "-as"]
self.fortune = self.py3.storage_get("fortune") or None
self.toggled = self.py3.storage_get("toggled") or False
self.motions = {"motion": " ", "nomotion": ""}
# deal with {new,old} timeout between storage
fortune_timeout = self.py3.storage_get("fortune_timeout")
timeout = None
if self.fortune_timeout != fortune_timeout:
timeout = time() + self.fortune_timeout
self.time = (
timeout or self.py3.storage_get("time") or (time() + self.fortune_timeout)
)
def _set_fortune(self, state=None, new=False):
if not self.fortune_command:
return
if new:
try:
fortune_data = self.py3.command_output(self.fortune_command)
except self.py3.CommandError:
self.fortune = ""
self.fortune_command = None
else:
self.fortune = " ".join(fortune_data.split())
self.time = time() + self.fortune_timeout
elif state is None:
if self.toggled and time() >= self.time:
self._set_fortune(new=True)
else:
self.toggled = state
if state:
self._set_fortune(new=True)
else:
self.fortune = None
def _set_motion(self):
for k in self.motions:
self.motions[k] = "" if self.motions[k] else " "
def _set_wanda(self):
self.index += 1
if self.index >= self.wanda_length:
self.index = 0
def wanda_the_fish(self):
self._set_fortune()
self._set_motion()
self._set_wanda()
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format,
{
"fortune": self.fortune,
"motion": self.motions["motion"],
"nomotion": self.motions["nomotion"],
"wanda": self.wanda[self.index],
},
),
}
def kill(self):
self.py3.storage_set("toggled", self.toggled)
self.py3.storage_set("fortune", self.fortune)
self.py3.storage_set("fortune_timeout", self.fortune_timeout)
self.py3.storage_set("time", self.time)
def on_click(self, event):
if not self.fortune_command:
return
self._set_fortune(not self.toggled)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | -7,311,590,463,705,585,000 | 29.032432 | 86 | 0.559935 | false | 3.353048 | false | false | false |
bmccary/csvu | csvu/tail.py | 1 | 1800 |
import traceback
from csvu import (
reader_make,
writer_make,
)
from csvu.cli import (
default_arg_parser,
positive_int,
)
def cli_arg_parser():
description = 'CSVU Tail is like GNU Tail, but for CSV files.'
parser = default_arg_parser(
description=description,
file0='input',
file1='output',
dialect0='input',
dialect1='output',
headless=True,
)
parser.add_argument(
'count',
type=positive_int,
help='''Return the last :count: rows.'''
)
return parser
def filter_g(row_g, count, debug=False):
L = list(row_g)
for row in L[-count:]:
yield row
def cli():
parser = cli_arg_parser()
args = parser.parse_args()
try:
reader_d = reader_make(
file_or_path=args.file0,
dialect=args.dialect0,
headless=args.headless,
)
dialect0 = reader_d['dialect']
fieldnames = reader_d['fieldnames']
reader_g = reader_d['reader']
g = filter_g(
row_g=reader_g,
count=args.count,
)
dialect1 = args.dialect1
if dialect1 == 'dialect0':
dialect1 = dialect0
writer_f = writer_make(
file_or_path=args.file1,
dialect=dialect1,
headless=args.headless,
fieldnames=fieldnames,
)
writer_f(g)
except Exception as exc:
m = traceback.format_exc()
parser.error(m)
| mit | -4,675,949,467,743,082,000 | 20.95122 | 66 | 0.455556 | false | 4.368932 | false | false | false |
chkothe/pylsl | pylsl/pylsl.py | 1 | 51549 | """Python API for the lab streaming layer.
The lab streaming layer provides a set of functions to make instrument data
accessible in real time within a lab network. From there, streams can be
picked up by recording programs, viewing programs or custom experiment
applications that access data streams in real time.
The API covers two areas:
- The "push API" allows to create stream outlets and to push data (regular
or irregular measurement time series, event data, coded audio/video frames,
etc.) into them.
- The "pull API" allows to create stream inlets and read time-synched
experiment data from them (for recording, viewing or experiment control).
pylsl has been tested with Python 2.7 and 3.4.
"""
import os
import platform
import struct
from ctypes import CDLL, util, byref, c_char_p, c_void_p, c_double, c_int, \
c_long, c_float, c_short, c_byte, c_longlong
__all__ = ['IRREGULAR_RATE', 'DEDUCED_TIMESTAMP', 'FOREVER', 'cf_float32',
'cf_double64', 'cf_string', 'cf_int32', 'cf_int16', 'cf_int8',
'cf_int64', 'cf_undefined', 'protocol_version', 'library_version',
'local_clock', 'StreamInfo', 'StreamOutlet', 'resolve_streams',
'resolve_byprop', 'resolve_bypred', 'StreamInlet', 'XMLElement',
'ContinuousResolver', 'TimeoutError', 'LostError',
'InvalidArgumentError', 'InternalError', 'stream_info',
'stream_outlet', 'stream_inlet', 'xml_element', 'timeout_error',
'lost_error', 'vectorf', 'vectord', 'vectorl', 'vectori',
'vectors', 'vectorc', 'vectorstr', 'resolve_stream']
# =================
# === Constants ===
# =================
# Constant to indicate that a stream has variable sampling rate.
IRREGULAR_RATE = 0.0
# Constant to indicate that a sample has the next successive time stamp
# according to the stream's defined sampling rate. Optional optimization to
# transmit less data per sample.
DEDUCED_TIMESTAMP = -1.0
# A very large time value (ca. 1 year); can be used in timeouts.
FOREVER = 32000000.0
# Value formats supported by LSL. LSL data streams are sequences of samples,
# each of which is a same-size vector of values with one of the below types.
# For up to 24-bit precision measurements in the appropriate physical unit (
# e.g., microvolts). Integers from -16777216 to 16777216 are represented
# accurately.
cf_float32 = 1
# For universal numeric data as long as permitted by network and disk budget.
# The largest representable integer is 53-bit.
cf_double64 = 2
# For variable-length ASCII strings or data blobs, such as video frames,
# complex event descriptions, etc.
cf_string = 3
# For high-rate digitized formats that require 32-bit precision. Depends
# critically on meta-data to represent meaningful units. Useful for
# application event codes or other coded data.
cf_int32 = 4
# For very high bandwidth signals or CD quality audio (for professional audio
# float is recommended).
cf_int16 = 5
# For binary signals or other coded data.
cf_int8 = 6
# For now only for future compatibility. Support for this type is not
# available on all languages and platforms.
cf_int64 = 7
# Can not be transmitted.
cf_undefined = 0
# ==========================================================
# === Free Functions provided by the lab streaming layer ===
# ==========================================================
def protocol_version():
"""Protocol version.
The major version is protocol_version() / 100;
The minor version is protocol_version() % 100;
Clients with different minor versions are protocol-compatible with each
other while clients with different major versions will refuse to work
together.
"""
return lib.lsl_protocol_version()
def library_version():
"""Version of the underlying liblsl library.
The major version is library_version() / 100;
The minor version is library_version() % 100;
"""
return lib.lsl_library_version()
def local_clock():
"""Obtain a local system time stamp in seconds.
The resolution is better than a milisecond. This reading can be used to
assign time stamps to samples as they are being acquired.
If the "age" of a sample is known at a particular time (e.g., from USB
transmission delays), it can be used as an offset to lsl_local_clock() to
obtain a better estimate of when a sample was actually captured. See
StreamOutlet.push_sample() for a use case.
"""
return lib.lsl_local_clock()
# ==========================
# === Stream Declaration ===
# ==========================
class StreamInfo:
"""The StreamInfo object stores the declaration of a data stream.
Represents the following information:
a) stream data format (#channels, channel format)
b) core information (stream name, content type, sampling rate)
c) optional meta-data about the stream content (channel labels,
measurement units, etc.)
Whenever a program wants to provide a new stream on the lab network it will
typically first create a StreamInfo to describe its properties and then
construct a StreamOutlet with it to create the stream on the network.
Recipients who discover the outlet can query the StreamInfo; it is also
written to disk when recording the stream (playing a similar role as a file
header).
"""
def __init__(self, name='untitled', type='', channel_count=1,
nominal_srate=IRREGULAR_RATE, channel_format=cf_float32,
source_id='', handle=None):
"""Construct a new StreamInfo object.
Core stream information is specified here. Any remaining meta-data can
be added later.
Keyword arguments:
name -- Name of the stream. Describes the device (or product series)
that this stream makes available (for use by programs,
experimenters or data analysts). Cannot be empty.
type -- Content type of the stream. By convention LSL uses the content
types defined in the XDF file format specification where
applicable (code.google.com/p/xdf). The content type is the
preferred way to find streams (as opposed to searching by name).
channel_count -- Number of channels per sample. This stays constant for
the lifetime of the stream. (default 1)
nominal_srate -- The sampling rate (in Hz) as advertised by the data
source, regular (otherwise set to IRREGULAR_RATE).
(default IRREGULAR_RATE)
channel_format -- Format/type of each channel. If your channels have
different formats, consider supplying multiple
streams or use the largest type that can hold
them all (such as cf_double64). It is also allowed
to pass this as a string, without the cf_ prefix,
e.g., 'float32' (default cf_float32)
source_id -- Unique identifier of the device or source of the data, if
available (such as the serial number). This is critical
for system robustness since it allows recipients to
recover from failure even after the serving app, device or
computer crashes (just by finding a stream with the same
source id on the network again). Therefore, it is highly
recommended to always try to provide whatever information
can uniquely identify the data source itself.
(default '')
"""
if handle is not None:
self.obj = c_void_p(handle)
else:
if isinstance(channel_format, str):
channel_format = string2fmt[channel_format]
self.obj = lib.lsl_create_streaminfo(c_char_p(str.encode(name)),
c_char_p(str.encode(type)),
channel_count,
c_double(nominal_srate),
channel_format,
c_char_p(str.encode(source_id)))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream description "
"object.")
def __del__(self):
""" Destroy a previously created StreamInfo object. """
# noinspection PyBroadException
try:
lib.lsl_destroy_streaminfo(self.obj)
except:
pass
# === Core Information (assigned at construction) ===
def name(self):
"""Name of the stream.
This is a human-readable name. For streams offered by device modules,
it refers to the type of device or product series that is generating
the data of the stream. If the source is an application, the name may
be a more generic or specific identifier. Multiple streams with the
same name can coexist, though potentially at the cost of ambiguity (for
the recording app or experimenter).
"""
return lib.lsl_get_name(self.obj).decode('utf-8')
def type(self):
"""Content type of the stream.
The content type is a short string such as "EEG", "Gaze" which
describes the content carried by the channel (if known). If a stream
contains mixed content this value need not be assigned but may instead
be stored in the description of channel types. To be useful to
applications and automated processing systems using the recommended
content types is preferred.
"""
return lib.lsl_get_type(self.obj).decode('utf-8')
def channel_count(self):
"""Number of channels of the stream.
A stream has at least one channel; the channel count stays constant for
all samples.
"""
return lib.lsl_get_channel_count(self.obj)
def nominal_srate(self):
"""Sampling rate of the stream, according to the source (in Hz).
If a stream is irregularly sampled, this should be set to
IRREGULAR_RATE.
Note that no data will be lost even if this sampling rate is incorrect
or if a device has temporary hiccups, since all samples will be
transmitted anyway (except for those dropped by the device itself).
However, when the recording is imported into an application, a good
data importer may correct such errors more accurately if the advertised
sampling rate was close to the specs of the device.
"""
return lib.lsl_get_nominal_srate(self.obj)
def channel_format(self):
"""Channel format of the stream.
All channels in a stream have the same format. However, a device might
offer multiple time-synched streams each with its own format.
"""
return lib.lsl_get_channel_format(self.obj)
def source_id(self):
"""Unique identifier of the stream's source, if available.
The unique source (or device) identifier is an optional piece of
information that, if available, allows that endpoints (such as the
recording program) can re-acquire a stream automatically once it is
back online.
"""
return lib.lsl_get_source_id(self.obj).decode('utf-8')
# === Hosting Information (assigned when bound to an outlet/inlet) ===
def version(self):
"""Protocol version used to deliver the stream."""
return lib.lsl_get_version(self.obj)
def created_at(self):
"""Creation time stamp of the stream.
This is the time stamp when the stream was first created
(as determined via local_clock() on the providing machine).
"""
return lib.lsl_get_created_at(self.obj)
def uid(self):
"""Unique ID of the stream outlet instance (once assigned).
This is a unique identifier of the stream outlet, and is guaranteed to
be different across multiple instantiations of the same outlet (e.g.,
after a re-start).
"""
return lib.lsl_get_uid(self.obj).decode('utf-8')
def session_id(self):
"""Session ID for the given stream.
The session id is an optional human-assigned identifier of the
recording session. While it is rarely used, it can be used to prevent
concurrent recording activitites on the same sub-network (e.g., in
multiple experiment areas) from seeing each other's streams
(can be assigned in a configuration file read by liblsl, see also
documentation on configuration files).
"""
return lib.lsl_get_session_id(self.obj).decode('utf-8')
def hostname(self):
"""Hostname of the providing machine."""
return lib.lsl_get_hostname(self.obj).decode('utf-8')
# === Data Description (can be modified) ===
def desc(self):
"""Extended description of the stream.
It is highly recommended that at least the channel labels are described
here. See code examples in the documentation. Other information, such
as amplifier settings, measurement units if deviating from defaults,
setup information, subject information, etc., can be specified here, as
well. See Meta-Data Recommendations in the docs.
Important: if you use a stream content type for which meta-data
recommendations exist, please try to lay out your meta-data in
agreement with these recommendations for compatibility with other
applications.
"""
return XMLElement(lib.lsl_get_desc(self.obj))
def as_xml(self):
"""Retrieve the entire stream_info in XML format.
This yields an XML document (in string form) whose top-level element is
<description>. The description element contains one element for each
field of the stream_info class, including:
a) the core elements <name>, <type>, <channel_count>, <nominal_srate>,
<channel_format>, <source_id>
b) the misc elements <version>, <created_at>, <uid>, <session_id>,
<v4address>, <v4data_port>, <v4service_port>, <v6address>,
<v6data_port>, <v6service_port>
c) the extended description element <desc> with user-defined
sub-elements.
"""
return lib.lsl_get_xml(self.obj).decode('utf-8')
# =====================
# === Stream Outlet ===
# =====================
class StreamOutlet:
"""A stream outlet.
Outlets are used to make streaming data (and the meta-data) available on
the lab network.
"""
def __init__(self, info, chunk_size=0, max_buffered=360):
"""Establish a new stream outlet. This makes the stream discoverable.
Keyword arguments:
description -- The StreamInfo object to describe this stream. Stays
constant over the lifetime of the outlet.
chunk_size --- Optionally the desired chunk granularity (in samples)
for transmission. If unspecified, each push operation
yields one chunk. Inlets can override this setting.
(default 0)
max_buffered -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). The default is 6 minutes of data.
Note that, for high-bandwidth data, you will want to
use a lower value here to avoid running out of RAM.
(default 360)
"""
self.obj = lib.lsl_create_outlet(info.obj, chunk_size, max_buffered)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream outlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_push_sample = fmt2push_sample[self.channel_format]
self.do_push_chunk = fmt2push_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
def __del__(self):
"""Destroy an outlet.
The outlet will no longer be discoverable after destruction and all
connected inlets will stop delivering data.
"""
# noinspection PyBroadException
try:
lib.lsl_destroy_outlet(self.obj)
except:
pass
def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.")
def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
if len(x) % self.channel_count == 0:
constructor = self.value_type*len(x)
# noinspection PyCallingNonCallable
handle_error(self.do_push_chunk(self.obj, constructor(*x),
c_long(len(x)),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("each sample must have the same number of "
"channels.")
def have_consumers(self):
"""Check whether consumers are currently registered.
While it does not hurt, there is technically no reason to push samples
if there is no consumer.
"""
return bool(lib.lsl_have_consumers(self.obj))
def wait_for_consumers(self, timeout):
"""Wait until some consumer shows up (without wasting resources).
Returns True if the wait was successful, False if the timeout expired.
"""
return bool(lib.lsl_wait_for_consumers(self.obj, c_double(timeout)))
# =========================
# === Resolve Functions ===
# =========================
def resolve_streams(wait_time=1.0):
"""Resolve all streams on the network.
This function returns all currently available streams from any outlet on
the network. The network is usually the subnet specified at the local
router, but may also include a group of machines visible to each other via
multicast packets (given that the network supports it), or list of
hostnames. These details may optionally be customized by the experimenter
in a configuration file (see configuration file in the documentation).
Keyword arguments:
wait_time -- The waiting time for the operation, in seconds, to search for
streams. Warning: If this is too short (<0.5s) only a subset
(or none) of the outlets that are present on the network may
be returned. (default 1.0)
Returns a list of StreamInfo objects (with empty desc field), any of which
can subsequently be used to open an inlet. The full description can be
retrieved from the inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
Example: results = resolve_Stream_byprop("type","EEG")
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_byprop(byref(buffer), 1024,
c_char_p(str.encode(prop)),
c_char_p(str.encode(value)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_bypred(byref(buffer), 1024,
c_char_p(str.encode(predicate)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# ====================
# === Stream Inlet ===
# ====================
class StreamInlet:
"""A stream inlet.
Inlets are used to receive streaming data (and meta-data) from the lab
network.
"""
def __init__(self, info, max_buflen=360, max_chunklen=0, recover=True):
"""Construct a new stream inlet from a resolved stream description.
Keyword arguments:
description -- A resolved stream description object (as coming from one
of the resolver functions). Note: the stream_inlet may also be
constructed with a fully-specified stream_info, if the desired
channel format and count is already known up-front, but this is
strongly discouraged and should only ever be done if there is
no time to resolve the stream up-front (e.g., due to
limitations in the client program).
max_buflen -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). Recording applications want to use a
fairly large buffer size here, while real-time
applications would only buffer as much as they need to
perform their next calculation. (default 360)
max_chunklen -- Optionally the maximum size, in samples, at which
chunks are transmitted (the default corresponds to the
chunk sizes used by the sender). Recording programs
can use a generous size here (leaving it to the network
how to pack things), while real-time applications may
want a finer (perhaps 1-sample) granularity. If left
unspecified (=0), the sender determines the chunk
granularity. (default 0)
recover -- Try to silently recover lost streams that are recoverable
(=those that that have a source_id set). In all other cases
(recover is False or the stream is not recoverable)
functions may throw a lost_error if the stream's source is
lost (e.g., due to an app or computer crash). (default True)
"""
if type(info) is list:
raise TypeError("description needs to be of type StreamInfo, "
"got a list.")
self.obj = lib.lsl_create_inlet(info.obj, max_buflen, max_chunklen,
recover)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream inlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_pull_sample = fmt2pull_sample[self.channel_format]
self.do_pull_chunk = fmt2pull_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
self.sample = self.sample_type()
self.buffers = {}
def __del__(self):
"""Destructor. The inlet will automatically disconnect if destroyed."""
# noinspection PyBroadException
try:
lib.lsl_destroy_inlet(self.obj)
except:
pass
def info(self, timeout=FOREVER):
"""Retrieve the complete information of the given stream.
This includes the extended description. Can be invoked at any time of
the stream's lifetime.
Keyword arguments:
timeout -- Timeout of the operation. (default FOREVER)
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_get_fullinfo(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return StreamInfo(handle=result)
def open_stream(self, timeout=FOREVER):
"""Subscribe to the data stream.
All samples pushed in at the other end from this moment onwards will be
queued and eventually be delivered in response to pull_sample() or
pull_chunk() calls. Pulling a sample without some preceding open_stream
is permitted (the stream will then be opened implicitly).
Keyword arguments:
timeout -- Optional timeout of the operation (default FOREVER).
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode))
handle_error(errcode)
def close_stream(self):
"""Drop the current data stream.
All samples that are still buffered or in flight will be dropped and
transmission and buffering of data for this inlet will be stopped. If
an application stops being interested in data from a source
(temporarily or not) but keeps the outlet alive, it should call
lsl_close_stream() to not waste unnecessary system and network
resources.
"""
lib.lsl_close_stream(self.obj)
def time_correction(self, timeout=FOREVER):
"""Retrieve an estimated time correction offset for the given stream.
The first call to this function takes several miliseconds until a
reliable first estimate is obtained. Subsequent calls are instantaneous
(and rely on periodic background updates). The precision of these
estimates should be below 1 ms (empirically within +/-0.2 ms).
Keyword arguments:
timeout -- Timeout to acquire the first time-correction estimate
(default FOREVER).
Returns the current time correction estimate. This is the number that
needs to be added to a time stamp that was remotely generated via
local_clock() to map it into the local clock domain of this
machine.
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_time_correction(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return result
def pull_sample(self, timeout=FOREVER, sample=None):
"""Pull a sample from the inlet and return it.
Keyword arguments:
timeout -- The timeout for this operation, if any. (default FOREVER)
If this is passed as 0.0, then the function returns only a
sample if one is buffered for immediate pickup.
Returns a tuple (sample,timestamp) where sample is a list of channel
values and timestamp is the capture time of the sample on the remote
machine, or (None,None) if no new sample was available. To remap this
time stamp to the local clock, add the value returned by
.time_correction() to it.
Throws a LostError if the stream source has been lost. Note that, if
the timeout expires, no TimeoutError is thrown (because this case is
not considered an error).
"""
# support for the legacy API
if type(timeout) is list:
assign_to = timeout
timeout = sample if type(sample) is float else 0.0
else:
assign_to = None
errcode = c_int()
timestamp = self.do_pull_sample(self.obj, byref(self.sample),
self.channel_count, c_double(timeout),
byref(errcode))
handle_error(errcode)
if timestamp:
sample = [v for v in self.sample]
if self.channel_format == cf_string:
sample = [v.decode('utf-8') for v in sample]
if assign_to is not None:
assign_to[:] = sample
return sample, timestamp
else:
return None, None
def pull_chunk(self, timeout=0.0, max_samples=1024):
"""Pull a chunk of samples from the inlet.
Keyword arguments:
timeout -- The timeout of the operation; if passed as 0.0, then only
samples available for immediate pickup will be returned.
(default 0.0)
max_samples -- Maximum number of samples to return. (default
1024)
Returns a tuple (samples,timestamps) where samples is a list of samples
(each itself a list of values), and timestamps is a list of time-stamps.
Throws a LostError if the stream source has been lost.
"""
# look up a pre-allocated buffer of appropriate length
num_channels = self.channel_count
max_values = max_samples*num_channels
if max_samples not in self.buffers:
# noinspection PyCallingNonCallable
self.buffers[max_samples] = ((self.value_type*max_values)(),
(c_double*max_samples)())
buffer = self.buffers[max_samples]
# read data into it
errcode = c_int()
# noinspection PyCallingNonCallable
num_elements = self.do_pull_chunk(self.obj, byref(buffer[0]),
byref(buffer[1]), max_values,
max_samples, c_double(timeout),
byref(errcode))
handle_error(errcode)
# return results (note: could offer a more efficient format in the
# future, e.g., a numpy array)
num_samples = num_elements/num_channels
samples = [[buffer[0][s*num_channels+c] for c in range(num_channels)]
for s in range(int(num_samples))]
if self.channel_format == cf_string:
samples = [[v.decode('utf-8') for v in s] for s in samples]
timestamps = [buffer[1][s] for s in range(int(num_samples))]
return samples, timestamps
def samples_available(self):
"""Query whether samples are currently available for immediate pickup.
Note that it is not a good idea to use samples_available() to determine
whether a pull_*() call would block: to be sure, set the pull timeout
to 0.0 or an acceptably low value. If the underlying implementation
supports it, the value will be the number of samples available
(otherwise it will be 1 or 0).
"""
return lib.lsl_samples_available(self.obj)
def was_clock_reset(self):
"""Query whether the clock was potentially reset since the last call.
This is rarely-used function is only needed for applications that
combine multiple time_correction values to estimate precise clock
drift if they should tolerate cases where the source machine was
hot-swapped or restarted.
"""
return bool(lib.lsl_was_clock_reset(self.obj))
# ===================
# === XML Element ===
# ===================
class XMLElement:
"""A lightweight XML element tree modeling the .desc() field of StreamInfo.
Has a name and can have multiple named children or have text content as
value; attributes are omitted. Insider note: The interface is modeled after
a subset of pugixml's node type and is compatible with it. See also
http://pugixml.googlecode.com/svn/tags/latest/docs/manual/access.html for
additional documentation.
"""
def __init__(self, handle):
"""Construct new XML element from existing handle."""
self.e = c_void_p(handle)
# === Tree Navigation ===
def first_child(self):
"""Get the first child of the element."""
return XMLElement(lib.lsl_first_child(self.e))
def last_child(self):
"""Get the last child of the element."""
return XMLElement(lib.lsl_last_child(self.e))
def child(self, name):
"""Get a child with a specified name."""
return XMLElement(lib.lsl_child(self.e, str.encode(name)))
def next_sibling(self, name=None):
"""Get the next sibling in the children list of the parent node.
If a name is provided, the next sibling with the given name is returned.
"""
if name is None:
return XMLElement(lib.lsl_next_sibling(self.e))
else:
return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name)))
def previous_sibling(self, name=None):
"""Get the previous sibling in the children list of the parent node.
If a name is provided, the previous sibling with the given name is
returned.
"""
if name is None:
return XMLElement(lib.lsl_previous_sibling(self.e))
else:
return XMLElement(lib.lsl_previous_sibling_n(self.e,
str.encode(name)))
def parent(self):
"""Get the parent node."""
return XMLElement(lib.lsl_parent(self.e))
# === Content Queries ===
def empty(self):
"""Whether this node is empty."""
return bool(lib.lsl_empty(self.e))
def is_text(self):
"""Whether this is a text body (instead of an XML element).
True both for plain char data and CData.
"""
return bool(lib.lsl_is_text(self.e))
def name(self):
"""Name of the element."""
return lib.lsl_name(self.e).decode('utf-8')
def value(self):
"""Value of the element."""
return lib.lsl_value(self.e).decode('utf-8')
def child_value(self, name=None):
"""Get child value (value of the first child that is text).
If a name is provided, then the value of the first child with the
given name is returned.
"""
if name is None:
res = lib.lsl_child_value(self.e)
else:
res = lib.lsl_child_value_n(self.e, str.encode(name))
return res.decode('utf-8')
# === Modification ===
def append_child_value(self, name, value):
"""Append a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_append_child_value(self.e,
str.encode(name),
str.encode(value)))
def prepend_child_value(self, name, value):
"""Prepend a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_prepend_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_child_value(self, name, value):
"""Set the text value of the (nameless) plain-text child of a named
child node."""
return XMLElement(lib.lsl_set_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_name(self, name):
"""Set the element's name. Returns False if the node is empty."""
return bool(lib.lsl_set_name(self.e, str.encode(name)))
def set_value(self, value):
"""Set the element's value. Returns False if the node is empty."""
return bool(lib.lsl_set_value(self.e, str.encode(value)))
def append_child(self, name):
"""Append a child element with the specified name."""
return XMLElement(lib.lsl_append_child(self.e, str.encode(name)))
def prepend_child(self, name):
"""Prepend a child element with the specified name."""
return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name)))
def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
return XMLElement(lib.lsl_append_copy(self.e, elem.e))
def prepend_copy(self, elem):
"""Prepend a copy of the specified element as a child."""
return XMLElement(lib.lsl_prepend_copy(self.e, elem.e))
def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
lib.lsl_remove_child(self.e, rhs.e)
else:
lib.lsl_remove_child_n(self.e, rhs)
# ==========================
# === ContinuousResolver ===
# ==========================
class ContinuousResolver:
"""A convenience class resolving streams continuously in the background.
This object can be queried at any time for the set of streams that are
currently visible on the network.
"""
def __init__(self, prop=None, value=None, pred=None, forget_after=5.0):
"""Construct a new continuous_resolver.
Keyword arguments:
forget_after -- When a stream is no longer visible on the network
(e.g., because it was shut down), this is the time in
seconds after which it is no longer reported by the
resolver.
"""
if pred is not None:
if prop is not None or value is not None:
raise ValueError("you can only either pass the prop/value "
"argument or the pred argument, but not "
"both.")
self.obj = lib.lsl_create_continuous_resolver_bypred(str.encode(pred),
c_double(forget_after))
elif prop is not None and value is not None:
self.obj = lib.lsl_create_continuous_resolver_byprop(str.encode(prop),
str.encode(value),
c_double(forget_after))
elif prop is not None or value is not None:
raise ValueError("if prop is specified, then value must be "
"specified, too, and vice versa.")
else:
self.obj = lib.lsl_create_continuous_resolver(c_double(forget_after))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create continuous resolver.")
def __del__(self):
"""Destructor for the continuous resolver."""
# noinspection PyBroadException
try:
lib.lsl_destroy_continuous_resolver(self.obj)
except:
pass
def results(self):
"""Obtain the set of currently present streams on the network.
Returns a list of matching StreamInfo objects (with empty desc
field), any of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024)
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# =========================
# === Error Definitions ===
# =========================
# noinspection PyShadowingBuiltins
class TimeoutError(RuntimeError):
# note: although this overrides the name of a built-in exception,
# this API is retained here for compatiblity with the Python 2.x
# version of pylsl
pass
class LostError(RuntimeError):
pass
class InvalidArgumentError(RuntimeError):
pass
class InternalError(RuntimeError):
pass
def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3:
raise InvalidArgumentError("an argument was incorrectly specified.")
elif errcode == -4:
raise InternalError("an internal error has occurred.")
elif errcode < 0:
raise RuntimeError("an unknown error has occurred.")
# =================================================
# === Compatibility Interface for old pylsl API ===
# =================================================
# set class aliases
stream_info = StreamInfo
stream_outlet = StreamOutlet
stream_inlet = StreamInlet
xml_element = XMLElement
timeout_error = TimeoutError
lost_error = LostError
vectorf = vectord = vectorl = vectori = vectors = vectorc = vectorstr = list
def resolve_stream(*args):
if len(args) == 0:
return resolve_streams()
elif type(args[0]) in [int, float]:
return resolve_streams(args[0])
elif type(args[0]) is str:
if len(args) == 1:
return resolve_bypred(args[0])
elif type(args[1]) in [int, float]:
return resolve_bypred(args[0], args[1])
else:
if len(args) == 2:
return resolve_byprop(args[0], args[1])
else:
return resolve_byprop(args[0], args[1], args[2])
# ==================================
# === Module Initialization Code ===
# ==================================
# find and load library
os_name = platform.system()
bitness = 8 * struct.calcsize("P")
if os_name in ['Windows', 'Microsoft']:
libname = 'liblsl32.dll' if bitness == 32 else 'liblsl64.dll'
elif os_name == 'Darwin':
libname = 'liblsl32.dylib' if bitness == 32 else 'liblsl64.dylib'
elif os_name == 'Linux':
libname = 'liblsl32.so' if bitness == 32 else 'liblsl64.so'
else:
raise RuntimeError("unrecognized operating system:", os_name)
libpath = os.path.join(os.path.dirname(__file__), libname)
if not os.path.isfile(libpath):
libpath = util.find_library(libname)
if not libpath:
raise RuntimeError("library " + libname + " was not found - make sure "
"that it is on the search path (e.g., in the same "
"folder as pylsl.py).")
lib = CDLL(libpath)
# set function return types where necessary
lib.lsl_local_clock.restype = c_double
lib.lsl_create_streaminfo.restype = c_void_p
lib.lsl_get_name.restype = c_char_p
lib.lsl_get_type.restype = c_char_p
lib.lsl_get_nominal_srate.restype = c_double
lib.lsl_get_source_id.restype = c_char_p
lib.lsl_get_created_at.restype = c_double
lib.lsl_get_uid.restype = c_char_p
lib.lsl_get_session_id.restype = c_char_p
lib.lsl_get_hostname.restype = c_char_p
lib.lsl_get_desc.restype = c_void_p
lib.lsl_get_xml.restype = c_char_p
lib.lsl_create_outlet.restype = c_void_p
lib.lsl_create_inlet.restype = c_void_p
lib.lsl_get_fullinfo.restype = c_void_p
lib.lsl_open_stream.restype = c_void_p
lib.lsl_time_correction.restype = c_double
lib.lsl_pull_sample_f.restype = c_double
lib.lsl_pull_sample_d.restype = c_double
lib.lsl_pull_sample_l.restype = c_double
lib.lsl_pull_sample_i.restype = c_double
lib.lsl_pull_sample_s.restype = c_double
lib.lsl_pull_sample_c.restype = c_double
lib.lsl_pull_sample_str.restype = c_double
lib.lsl_pull_sample_buf.restype = c_double
lib.lsl_first_child.restype = c_void_p
lib.lsl_last_child.restype = c_void_p
lib.lsl_next_sibling.restype = c_void_p
lib.lsl_previous_sibling.restype = c_void_p
lib.lsl_parent.restype = c_void_p
lib.lsl_child.restype = c_void_p
lib.lsl_next_sibling_n.restype = c_void_p
lib.lsl_previous_sibling_n.restype = c_void_p
lib.lsl_name.restype = c_char_p
lib.lsl_value.restype = c_char_p
lib.lsl_child_value.restype = c_char_p
lib.lsl_child_value_n.restype = c_char_p
lib.lsl_append_child_value.restype = c_void_p
lib.lsl_prepend_child_value.restype = c_void_p
lib.lsl_append_child.restype = c_void_p
lib.lsl_prepend_child.restype = c_void_p
lib.lsl_append_copy.restype = c_void_p
lib.lsl_prepend_copy.restype = c_void_p
# noinspection PyBroadException
try:
lib.lsl_pull_chunk_f.restype = c_long
lib.lsl_pull_chunk_d.restype = c_long
lib.lsl_pull_chunk_l.restype = c_long
lib.lsl_pull_chunk_i.restype = c_long
lib.lsl_pull_chunk_s.restype = c_long
lib.lsl_pull_chunk_c.restype = c_long
lib.lsl_pull_chunk_str.restype = c_long
lib.lsl_pull_chunk_buf.restype = c_long
except:
print("pylsl: chunk transfer functions not available in your liblsl "
"version.")
# noinspection PyBroadException
try:
lib.lsl_create_continuous_resolver.restype = c_void_p
lib.lsl_create_continuous_resolver_bypred.restype = c_void_p
lib.lsl_create_continuous_resolver_byprop.restype = c_void_p
except:
print("pylsl: ContinuousResolver not (fully) available in your liblsl "
"version.")
# set up some type maps
string2fmt = {'float32': cf_float32, 'double64': cf_double64,
'string': cf_string, 'int32': cf_int32, 'int16': cf_int16,
'int8': cf_int8, 'int64': cf_int64}
fmt2string = ['undefined', 'float32', 'double64', 'string', 'int32', 'int16',
'int8', 'int64']
fmt2type = [[], c_float, c_double, c_char_p, c_int, c_short, c_byte, c_longlong]
fmt2push_sample = [[], lib.lsl_push_sample_ftp, lib.lsl_push_sample_dtp,
lib.lsl_push_sample_strtp, lib.lsl_push_sample_itp,
lib.lsl_push_sample_stp, lib.lsl_push_sample_ctp, []]
fmt2pull_sample = [[], lib.lsl_pull_sample_f, lib.lsl_pull_sample_d,
lib.lsl_pull_sample_str, lib.lsl_pull_sample_i,
lib.lsl_pull_sample_s, lib.lsl_pull_sample_c, []]
# noinspection PyBroadException
try:
fmt2push_chunk = [[], lib.lsl_push_chunk_ftp, lib.lsl_push_chunk_dtp,
lib.lsl_push_chunk_strtp, lib.lsl_push_chunk_itp,
lib.lsl_push_chunk_stp, lib.lsl_push_chunk_ctp, []]
fmt2pull_chunk = [[], lib.lsl_pull_chunk_f, lib.lsl_pull_chunk_d,
lib.lsl_pull_chunk_str, lib.lsl_pull_chunk_i,
lib.lsl_pull_chunk_s, lib.lsl_pull_chunk_c, []]
except:
# if not available
fmt2push_chunk = [None, None, None, None, None, None, None, None]
fmt2pull_chunk = [None, None, None, None, None, None, None, None]
| mit | 5,422,760,942,595,016,000 | 40.571774 | 88 | 0.599721 | false | 4.23853 | false | false | false |
ArcAwe/coseq | Software/RaspberryPi/main/xbox_read.py | 1 | 1990 |
# from https://github.com/zephod/lego-pi/blob/master/lib/xbox_read.py
from os import popen
from sys import stdin
import re
import time
s = re.compile('[ :]')
class Event:
def __init__(self,key,value,old_value):
self.key = key
self.value = value
self.old_value = old_value
def is_press(self):
return self.value==1 and self.old_value==0
def __str__(self):
return 'Event(%s,%d,%d)' % (self.key,self.value,self.old_value)
def apply_deadzone(x, deadzone, scale):
if x < 0:
return (scale * min(0,x+deadzone)) / (32768-deadzone)
return (scale * max(0,x-deadzone)) / (32768-deadzone)
def event_stream(deadzone=0,scale=32768):
_data = None
pid = 65536
subprocess = popen('nohup xboxdrv','r',pid)
tryRmmod = False
print "Starting..."
while (True):
line = subprocess.readline()
if 'Error' in line:
if(not tryRmmod):
pid+=3
subprocess = popen('nohup rmmod xpad','r',pid)
time.sleep(1)
pid+=3
subprocess = popen('nohup xboxdrv','r',pid)
tryRmmod = True
continue
raise ValueError(line)
data = filter(bool,s.split(line[:-1]))
if len(data)==42:
# Break input string into a data dict
data = { data[x]:int(data[x+1]) for x in range(0,len(data),2) }
if not _data:
_data = data
continue
for key in data:
if key=='X1' or key=='X2' or key=='Y1' or key=='Y2':
data[key] = apply_deadzone(data[key],deadzone,scale)
if data[key]==_data[key]: continue
event = Event(key,data[key],_data[key])
yield event
_data = data
# Appendix: Keys
# --------------
# X1
# Y1
# X2
# Y2
# du
# dd
# dl
# dr
# back
# guide
# start
# TL
# TR
# A
# B
# X
# Y
# LB
# RB
# LT
# RT | mit | 3,269,753,871,665,201,700 | 22.702381 | 75 | 0.513568 | false | 3.251634 | false | false | false |
igorvc/iso8583py | examples/echoClient.py | 1 | 2943 | """
(C) Copyright 2009 Igor V. Custodio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from ISO8583.ISO8583 import ISO8583
from ISO8583.ISOErrors import *
import socket
import sys
import time
# Configure the client
serverIP = "127.0.0.1"
serverPort = 8583
numberEcho = 5
timeBetweenEcho = 5 # in seconds
bigEndian = True
#bigEndian = False
s = None
for res in socket.getaddrinfo(serverIP, serverPort, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
s = None
continue
try:
s.connect(sa)
except socket.error, msg:
s.close()
s = None
continue
break
if s is None:
print ('Could not connect :(')
sys.exit(1)
for req in range(0,numberEcho):
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(3,'300000')
iso.setBit(24,'045')
iso.setBit(41,'11111111')
iso.setBit(42,'222222222222222')
iso.setBit(63,'This is a Test Message')
if bigEndian:
try:
message = iso.getNetworkISO()
s.send(message)
print ('Sending ... %s' % message)
ans = s.recv(2048)
print ("\nInput ASCII |%s|" % ans)
isoAns = ISO8583()
isoAns.setNetworkISO(ans)
v1 = isoAns.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
if isoAns.getMTI() == '0810':
print ("\tThat's great !!! The server understand my message !!!")
else:
print ("The server dosen't understand my message!")
except InvalidIso8583, ii:
print ii
break
time.sleep(timeBetweenEcho)
else:
try:
message = iso.getNetworkISO(False)
s.send(message)
print ('Sending ... %s' % message)
ans = s.recv(2048)
print ("\nInput ASCII |%s|" % ans)
isoAns = ISO8583()
isoAns.setNetworkISO(ans,False)
v1 = isoAns.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
if isoAns.getMTI() == '0810':
print ("\tThat's great !!! The server understand my message !!!")
else:
print ("The server dosen't understand my message!")
except InvalidIso8583, ii:
print ii
break
time.sleep(timeBetweenEcho)
print ('Closing...')
s.close()
| gpl-3.0 | 1,896,742,353,546,762,200 | 22.731092 | 90 | 0.638464 | false | 3.049741 | false | false | false |
w1z2g3/crossbar | crossbar/adapter/rest/test/test_signature.py | 1 | 11871 | #####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import json
from twisted.internet.defer import inlineCallbacks
from crossbar.test import TestCase
from crossbar._compat import native_string
from crossbar._logging import LogCapturer
from crossbar.adapter.rest import PublisherResource
from crossbar.adapter.rest.test import MockPublisherSession, renderResource, makeSignedArguments
resourceOptions = {
"secret": "foobar",
"key": "bazapp"
}
publishBody = b'{"topic": "com.test.messages", "args": [1]}'
class SignatureTestCase(TestCase):
"""
Unit tests for the signature authentication part of L{_CommonResource}.
"""
@inlineCallbacks
def test_good_signature(self):
"""
A valid, correct signature will mean the request is processed.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="bazapp", signSecret="foobar")
self.assertEqual(request.code, 202)
self.assertEqual(json.loads(native_string(request.get_written_data())),
{"id": session._published_messages[0]["id"]})
logs = l.get_category("AR203")
self.assertEqual(len(logs), 1)
@inlineCallbacks
def test_incorrect_secret(self):
"""
An incorrect secret (but an otherwise well-formed signature) will mean
the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="bazapp", signSecret="foobar2")
self.assertEqual(request.code, 401)
errors = l.get_category("AR459")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 401)
@inlineCallbacks
def test_unknown_key(self):
"""
An unknown key in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="spamapp", signSecret="foobar")
self.assertEqual(request.code, 401)
errors = l.get_category("AR460")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 401)
@inlineCallbacks
def test_no_timestamp(self):
"""
No timestamp in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'timestamp']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_wrong_timestamp(self):
"""
An invalid timestamp in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'timestamp'] = [b"notatimestamp"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_outdated_delta(self):
"""
If the delta between now and the timestamp in the request is larger than
C{timestamp_delta_limit}, the request is rejected.
"""
custOpts = {"timestamp_delta_limit": 1}
custOpts.update(resourceOptions)
session = MockPublisherSession(self)
resource = PublisherResource(custOpts, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'timestamp'] = [b"2011-10-14T16:59:51.123Z"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR464")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_invalid_nonce(self):
"""
An invalid nonce in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'nonce'] = [b"notanonce"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_nonce(self):
"""
A missing nonce in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'nonce']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_signature(self):
"""
A missing signature in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'signature']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_key(self):
"""
A missing key in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'key']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_seq(self):
"""
A missing sequence in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'seq']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_wrong_seq(self):
"""
A missing sequence in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'seq'] = [b"notaseq"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
| agpl-3.0 | 3,433,306,572,119,676,000 | 35.082067 | 96 | 0.61486 | false | 4.364338 | true | false | false |
Southpaw-TACTIC/TACTIC | src/tactic/startup/first_run_init.py | 1 | 4352 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FirstRunInit']
from pyasm.common import Common, Config, Environment, Common, TacticException, Container
import os, shutil
import sys
class FirstRunInit(object):
def execute(self):
self.copy_start()
# check to see if there is a server code
server_code = Config.get_value("install", "server")
#if not server_code:
# # generate one
# server_code = Common.generate_alphanum_key(3)
# Config.set_value("install", "server", server_code)
# Config.save_config()
def copy_start(self):
data_dir = Environment.get_data_dir(manual=True)
# check to see if the data folder already exists
print("\n")
print("Data Directory [%s]" % data_dir)
install_dir = Environment.get_install_dir()
# find criteria for initializing
initialize = False
if data_dir and not os.path.exists(data_dir):
initialize = True
if data_dir and not os.path.exists("%s/config" % data_dir):
initialize = True
if initialize:
# copy the template over. This should exist even if it is not used
print("... not found: initializing\n")
install_data_path = "%s/src/install/start" % (install_dir)
if os.path.exists(install_data_path):
dirnames = os.listdir(install_data_path)
for dirname in dirnames:
to_dir = "%s/%s" % (data_dir, dirname)
if os.path.exists(to_dir):
print("WARNING: path [%s] exists ... skipping copying" % to_dir)
continue
print("Copying to [%s]" % to_dir)
from_dir = "%s/%s" % (install_data_path, dirname)
shutil.copytree(from_dir, to_dir)
else:
shutil.copytree(install_data_path, data_dir)
# create the dist folder
to_dir = "%s/dist" % (data_dir)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
# copy the appropriate config file
if os.name == 'nt':
filename = 'standalone_win32-conf.xml'
else:
filename = 'standalone_linux-conf.xml'
install_config_path = "%s/src/install/config/%s" % (install_dir,filename)
to_config_path = "%s/config/tactic-conf.xml" % data_dir
if not os.path.exists(to_config_path):
dirname = os.path.dirname(to_config_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy(install_config_path, to_config_path)
# some backwards compatibility issues
old_config_path = "%s/config/tactic_linux-conf.xml" % data_dir
if os.path.exists(old_config_path):
new_config_path = "%s/config/tactic-conf.xml" % data_dir
shutil.move(old_config_path, new_config_path)
config_path = Config.get_config_path()
config_exists = False
if os.path.exists(config_path):
config_exists = True
# insert the plugin path to run get_asset_dir()
plugin_dir = Environment.get_plugin_dir()
sys.path.insert(0, plugin_dir)
asset_dir = Environment.get_asset_dir()
print("Asset Directory [%s]" % asset_dir)
tmp_dir = Environment.get_tmp_dir()
print("Temp Directory [%s]" % tmp_dir)
# check if there is a config path already exists. If it does,
# then don't do anything further. This is likely a previous
# installation
if config_exists:
print("Config path [%s]" % config_path)
return
else:
# if there is no config, retrieve data_dir in non-manual mode
data_dir = Environment.get_data_dir()
f = open("%s/first_run" % data_dir, 'w')
f.write("")
f.close()
return
| epl-1.0 | 8,725,499,631,576,695,000 | 31.969697 | 88 | 0.55239 | false | 3.981702 | true | false | false |
kkamkou/gitmostwanted.com | tests/unit/lib/test_bigquery.py | 1 | 2352 | from unittest import TestCase
from gitmostwanted.lib.bigquery.result import ResultJob
class LibBigQueryResultTestCase(TestCase):
def setUp(self):
pass
def test_convert_incoming_obj(self):
result = ResultJob(self.response_example())
self.assertEqual(len(result), 2)
self.assertEqual(next(result), ['29028775', 'facebook/react-native', '225'])
self.assertEqual(next(result), ['29028776', 'facebook/react-native2', '226'])
self.assertRaises(StopIteration, next, result)
def test_convert_incoming_empty_obj(self):
result = ResultJob(self.response_example_empty())
self.assertEqual(len(result), 0)
self.assertRaises(StopIteration, next, result)
def response_example_empty(self):
data = self.response_example()
data['rows'] = []
data['totalRows'] = 0
return data
def response_example(self):
return {
'cacheHit': False,
'jobComplete': True,
'jobReference': {
'jobId': 'job_123-4567',
'projectId': 'my-project-1234567890'
},
'kind': 'bigquery#queryResponse',
'rows': [
{
'f': [
{'v': '29028775'},
{'v': 'facebook/react-native'},
{'v': '225'}
]
},
{
'f': [
{'v': '29028776'},
{'v': 'facebook/react-native2'},
{'v': '226'}
]
}
],
'schema': {
'fields': [
{
'mode': 'NULLABLE',
'name': 'repo_id',
'type': 'INTEGER'
},
{
'mode': 'NULLABLE',
'name': 'repo_name',
'type': 'STRING'
},
{
'mode': 'NULLABLE',
'name': 'cnt',
'type': 'INTEGER'
}
]
},
'totalBytesProcessed': '5568164',
'totalRows': '2'
}
| mit | -4,338,712,916,941,077,500 | 30.36 | 85 | 0.39966 | false | 4.761134 | true | false | false |
jakobzhao/qq-xingyun | qqcrawler/report.py | 1 | 2335 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: [email protected]
# @website: http://yenching.org
# @organization: Harvard Kennedy School
# libraries
import socket
import smtplib
from pymongo import MongoClient
from qqcrawler.settings import EMAIL_PASSWORD
from log import *
# receiver string
# example rief_report('[email protected];[email protected]', "weibo")
# funcs
def brief_report(settings):
pis = settings['pis']
project = settings['project']
address = settings['address']
port = settings['port']
sender = '[email protected]'
username = '[email protected]'
t = datetime.datetime.now().strftime('%Y-%m-%d')
pi_str = ''
for pi in pis:
pi_str += (pi + ';')
now = datetime.datetime.now()
utc_now_1 = now - datetime.timedelta(days=1)
utc_now_2 = now - datetime.timedelta(days=2)
utc_now_5 = now - datetime.timedelta(days=5)
# For post information
client = MongoClient(address, port)
db = client[project]
total_posts = db.pages.find().count()
count_1 = db.pages.find({"timestamp": {"$gt": utc_now_1}}).count()
count_2 = db.pages.find({"timestamp": {"$gt": utc_now_2}}).count()
count_5 = db.pages.find({"timestamp": {"$gt": utc_now_5}}).count()
line_2 = "Total posts: %d" % total_posts
line_3 = "Within the past 24 hours: %d collected" % count_1
line_4 = "Within the past 2 days: %d collected" % count_2
line_5 = "Within the past 5 days: %d collected" % count_5
msg = '''From: Weibo Crawler Server <[email protected]>
To: ''' + pi_str[:-1] + '''
Subject: [''' + t + '''] Daily Briefing for ''' + project.capitalize() + ''' Project
MIME-Version: 1.0
Dear PI(s),
Here is a briefing about the progress of Weibo data harvest:
''' + line_2 + '''
''' + line_3 + '''
''' + line_4 + '''
''' + line_5 + '''
--
Sent from the Weibo Cralwer Server.'''
# The actual mail send
try:
server = smtplib.SMTP()
server.connect('smtp.gmail.com', '587')
server.ehlo()
server.starttls()
server.login(username, EMAIL_PASSWORD)
server.sendmail(sender, pis, msg)
server.quit()
except socket.gaierror, e:
print str(e) + "/n error raises when sending E-mails."
| lgpl-3.0 | -3,129,869,230,584,553,500 | 27.47561 | 84 | 0.604711 | false | 3.080475 | false | false | false |
eustislab/horton | doc/conf.py | 1 | 9564 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#
# HORTON documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 11:14:50 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'breathe']
mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HORTON'
copyright = u'2011-2015, The HORTON Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def get_version_release():
# get a version string
import re, subprocess
try:
# The full version number, including alpha/beta/rc tags.
release = subprocess.check_output(['git', 'describe']).strip()
except (subprocess.CalledProcessError, OSError) as e:
# fall back to the defaul release
release = '2.0.0-nogit'
# Chop of at the first dash, if any, i.e. X.Y.Z
if '-' in release:
version = release.split('-')[0]
else:
version = release
return version, release
# version = short X.Y.Z, release = full thing
version, release = get_version_release()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo_only': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'horton.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HORTONdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'horton.tex', u'HORTON Documentation',
u'The HORTON Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'horton', u'HORTON Documentation',
[u'The HORTON Development Team'], 1)
]
# -- Custom HORTON-specific settings -------------------------------------------
breathe_projects = { "horton": "doxyxml" }
breathe_default_project = "horton"
pngmath_latex_preamble = r"\usepackage{color,amsmath}"
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma 1.6', '-D 120']
sys.path.append('../')
os.environ['HORTONDATA'] = '../data'
autoclass_content = "class"
autodoc_member_order = "groupwise"
autodoc_default_flags = ['members', 'undoc-members', 'inherited-members', 'show-inheritance']
def autodoc_skip_member(app, what, name, obj, skip, options):
if what=="class" and name=="__init__":
return False
if what=="class" and name=="__call__":
return False
if what=="class" and name=="__getitem__":
return False
if name.startswith("_"):
return True
return False
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect("autodoc-skip-member", autodoc_skip_member)
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.add_stylesheet("custom.css")
| gpl-3.0 | -3,263,852,963,252,831,000 | 32.795053 | 95 | 0.700544 | false | 3.699807 | false | false | false |
chipaca/snapcraft | snapcraft/internal/project_loader/_extensions/kde_neon.py | 1 | 4701 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import types and tell flake8 to ignore the "unused" List.
from collections import namedtuple
from typing import Any, Dict, Optional, Tuple
from ._extension import Extension
_ExtensionInfo = namedtuple("ExtensionInfo", "cmake_args content provider build_snaps")
_Info = dict(
core18=_ExtensionInfo(
cmake_args=None,
content="kde-frameworks-5-core18-all",
provider="kde-frameworks-5-core18",
build_snaps=["kde-frameworks-5-core18-sdk/latest/stable"],
),
core20=_ExtensionInfo(
cmake_args="-DCMAKE_FIND_ROOT_PATH=/snap/kde-frameworks-5-qt-5-15-core20-sdk/current",
content="kde-frameworks-5-qt-5-15-core20-all",
provider="kde-frameworks-5-qt-5-15-core20",
build_snaps=["kde-frameworks-5-qt-5-15-core20-sdk/latest/candidate"],
),
)
class ExtensionImpl(Extension):
"""The KDE Neon extension.
This extension makes it easy to assemble KDE based applications
using the Neon stack.
It configures each application with the following plugs:
\b
- Common Icon Themes.
- Common Sound Themes.
- The Qt5 and KDE Frameworks runtime libraries and utilities.
For easier desktop integration, it also configures each application
entry with these additional plugs:
\b
- desktop (https://snapcraft.io/docs/desktop-interface)
- desktop-legacy (https://snapcraft.io/docs/desktop-legacy-interface)
- opengl (https://snapcraft.io/docs/opengl-interface)
- wayland (https://snapcraft.io/docs/wayland-interface)
- x11 (https://snapcraft.io/docs/x11-interface)
"""
@staticmethod
def is_experimental(base: Optional[str]) -> bool:
# TODO: remove experimental once sdk is on stable
return base == "core20"
@staticmethod
def get_supported_bases() -> Tuple[str, ...]:
return ("core18", "core20")
@staticmethod
def get_supported_confinement() -> Tuple[str, ...]:
return ("strict", "devmode")
def __init__(self, *, extension_name: str, yaml_data: Dict[str, Any]) -> None:
super().__init__(extension_name=extension_name, yaml_data=yaml_data)
info = _Info[yaml_data["base"]]
self.root_snippet = {
"assumes": ["snapd2.43"], # for 'snapctl is-connected'
"plugs": {
"icon-themes": {
"interface": "content",
"target": "$SNAP/data-dir/icons",
"default-provider": "gtk-common-themes",
},
"sound-themes": {
"interface": "content",
"target": "$SNAP/data-dir/sounds",
"default-provider": "gtk-common-themes",
},
"kde-frameworks-5-plug": {
"content": info.content,
"interface": "content",
"default-provider": info.provider,
"target": "$SNAP/kf5",
},
},
"environment": {"SNAP_DESKTOP_RUNTIME": "$SNAP/kf5"},
"hooks": {
"configure": {
"plugs": ["desktop"],
"command-chain": ["snap/command-chain/hooks-configure-desktop"],
}
},
}
if info.cmake_args is not None:
self.part_snippet = {
"build-environment": [{"SNAPCRAFT_CMAKE_ARGS": info.cmake_args}]
}
self.app_snippet = {
"command-chain": ["snap/command-chain/desktop-launch"],
"plugs": ["desktop", "desktop-legacy", "opengl", "wayland", "x11"],
}
self.parts = {
"kde-neon-extension": {
"source": "$SNAPCRAFT_EXTENSIONS_DIR/desktop",
"source-subdir": "kde-neon",
"plugin": "make",
"make-parameters": ["PLATFORM_PLUG=kde-frameworks-5-plug"],
"build-packages": ["g++"],
"build-snaps": info.build_snaps,
}
}
| gpl-3.0 | -2,491,254,435,194,227,700 | 34.613636 | 94 | 0.581578 | false | 3.927318 | false | false | false |
mitodl/open-discussions | notifications/api.py | 1 | 9247 | """Notifications API"""
import logging
from django.conf import settings
from django.db.models import Q
from django.contrib.auth.models import User
from channels.models import Subscription, ChannelGroupRole, Channel
from channels.api import get_admin_api
from channels.constants import ROLE_MODERATORS
from notifications.notifiers.exceptions import (
UnsupportedNotificationTypeError,
CancelNotificationError,
)
from notifications.models import (
EmailNotification,
PostEvent,
NotificationSettings,
NOTIFICATION_TYPE_FRONTPAGE,
NOTIFICATION_TYPE_COMMENTS,
NOTIFICATION_TYPE_MODERATOR,
FREQUENCY_IMMEDIATE,
FREQUENCY_DAILY,
FREQUENCY_WEEKLY,
FREQUENCY_NEVER,
)
from notifications.notifiers import comments, frontpage, moderator_posts
from notifications import tasks
from open_discussions.utils import chunks
log = logging.getLogger()
def ensure_notification_settings(user, skip_moderator_setting=False):
"""
Populates user with notification settings
Args:
user (User): user to create settings for
skip_moderator_setting (boolean): Skip moderator notifaction creation
"""
existing_notification_types = NotificationSettings.objects.filter(
user=user
).values_list("notification_type", flat=True)
if NOTIFICATION_TYPE_FRONTPAGE not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
defaults={"trigger_frequency": FREQUENCY_DAILY},
)
if NOTIFICATION_TYPE_COMMENTS not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_COMMENTS,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
if not skip_moderator_setting:
for channel_group_role in ChannelGroupRole.objects.filter(
group__user=user, role=ROLE_MODERATORS
):
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel_group_role.channel,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
def attempt_send_notification_batch(notification_settings_ids):
"""
Attempts to send notification for the given batch of ids
Args:
notification_settings_ids (list of int): list of NotificationSettings.ids
"""
notification_settings = NotificationSettings.objects.filter(
id__in=notification_settings_ids
)
for notification_setting in notification_settings:
try:
notifier = frontpage.FrontpageDigestNotifier(notification_setting)
notifier.attempt_notify()
except: # pylint: disable=bare-except
log.exception(
"Error attempting notification for user %s", notification_setting.user
)
def get_daily_frontpage_settings_ids():
"""Returns daily frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_DAILY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def get_weekly_frontpage_settings_ids():
"""Returns weekly frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_WEEKLY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def _get_notifier_for_notification(notification):
"""
Get the notifier for the notification's type
Args:
notification (NotificationBase): the notification to get a notifier for
Returns:
Notifier: instance of the notifier to use
"""
if notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
channel_api = get_admin_api()
event = PostEvent.objects.get(email_notification=notification)
channel_name = channel_api.get_post(event.post_id).subreddit.display_name
notification_settings = NotificationSettings.objects.get(
user=notification.user,
notification_type=notification.notification_type,
channel__name=channel_name,
)
else:
notification_settings = NotificationSettings.objects.get(
user=notification.user, notification_type=notification.notification_type
)
if notification.notification_type == NOTIFICATION_TYPE_FRONTPAGE:
return frontpage.FrontpageDigestNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_COMMENTS:
return comments.CommentNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
return moderator_posts.ModeratorPostsNotifier(notification_settings)
else:
raise UnsupportedNotificationTypeError(
"Notification type '{}' is unsupported".format(
notification.notification_type
)
)
def send_unsent_email_notifications():
"""
Send all notifications that haven't been sent yet
"""
for notification_ids in chunks(
EmailNotification.objects.filter(state=EmailNotification.STATE_PENDING)
.exclude(notification_type=NOTIFICATION_TYPE_FRONTPAGE)
.values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_email_notification_batch.delay(notification_ids)
for notification_ids in chunks(
EmailNotification.objects.filter(
state=EmailNotification.STATE_PENDING,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
).values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_frontpage_email_notification_batch.delay(notification_ids)
def send_email_notification_batch(notification_ids):
"""
Sends a batch of notifications
Args:
notification_ids (list of int): notification ids to send
"""
for notification in EmailNotification.objects.filter(id__in=notification_ids):
try:
notifier = _get_notifier_for_notification(notification)
notifier.send_notification(notification)
except CancelNotificationError:
log.debug("EmailNotification canceled: %s", notification.id)
notification.state = EmailNotification.STATE_CANCELED
notification.save()
except: # pylint: disable=bare-except
log.exception("Error sending notification %s", notification)
def send_comment_notifications(post_id, comment_id, new_comment_id):
"""
Sends notifications for a reply to a given post notification
Args:
post_id (str): base36 post id
comment_id (str): base36 comment id
new_comment_id (str): base36 comment id of the new comment
"""
for subscription in (
Subscription.objects.filter(post_id=post_id)
.filter(Q(comment_id=comment_id) | Q(comment_id=None))
.distinct("user")
.iterator()
):
try:
notification_settings = NotificationSettings.objects.get(
user_id=subscription.user_id,
notification_type=NOTIFICATION_TYPE_COMMENTS,
)
except NotificationSettings.DoesNotExist:
log.exception(
"NotificationSettings didn't exist for subscription %s", subscription.id
)
continue
notifier = comments.CommentNotifier(notification_settings)
notifier.create_comment_event(subscription, new_comment_id)
def send_moderator_notifications(post_id, channel_name):
"""
Sends post notifications to channel moderators
Args:
post_id (str): base36 post id
channel_name (str): channel_name
"""
channel_api = get_admin_api()
for moderator in channel_api.list_moderators(channel_name):
self_user = User.objects.get(username=moderator.name)
try:
notification_setting = NotificationSettings.objects.get(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel__name=channel_name,
)
except NotificationSettings.DoesNotExist:
channel = Channel.objects.get(name=channel_name)
notification_setting = NotificationSettings.objects.create(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel,
trigger_frequency=FREQUENCY_NEVER,
)
notifier = moderator_posts.ModeratorPostsNotifier(notification_setting)
notifier.create_moderator_post_event(self_user, post_id)
| bsd-3-clause | 3,742,379,848,601,232,400 | 34.293893 | 88 | 0.670704 | false | 4.380388 | false | false | false |
jaredscarr/django-imager | imagersite/imagersite/settings.py | 1 | 4051 | """
Django settings for imagersite project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# from configurations import Configuration
# class Dev(Configuration):
# DEBUG = True
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'not the secret')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG')
THUMBNAIL_DEBUG = os.environ.get('DJANGO_DEBUG')
ALLOWED_HOSTS = [
'.us-west-2.compute.amazonaws.com',
'localhost',
]
# Application definition
INSTALLED_APPS = [
'sorl.thumbnail',
'imager_images',
'imager_profile',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'imagersite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'imagersite', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'imagersite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default=os.environ.get('DATABASE_URL')
)
}
# caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'thumbnails',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# security
# CSRF_COOKIE_SECURE = True
# SESSION_COOKIE_SECURE = True
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'imagersite', 'static'), ]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# activation
ACCOUNT_ACTIVATION_DAYS = 7
# console backend
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| mit | 2,117,890,535,857,599,700 | 24.639241 | 91 | 0.691681 | false | 3.489233 | false | false | false |
xswxm/nrf24-injection | Experiments/Scripts/ping.py | 1 | 1628 | #!/usr/bin/env python2
'''
Author: xswxm
Blog: xswxm.com
This script will measure the successful pings per seconds.
e.g.: sudo python ping.py -l -a 61:8E:9C:CD:03 -f 74 -t 0 -r 0
'''
import sys, time, threading
from lib import common
common.init_args('./ping.py')
common.parser.add_argument('-a', '--address', type=str, help='Address to sniff, following as it changes channels', required=True)
common.parser.add_argument('-f', '--channel', type=int, help='RF channel', default=0)
common.parse_and_init()
channel = common.args.channel
# Total number of payloads sent
count = 0
# Parse the prefix address
address = common.args.address.replace(':', '').decode('hex')[::-1][:5]
# Put the radio in sniffer mode (ESB w/o auto ACKs)
common.radio.enter_sniffer_mode(address)
# Set channel
common.radio.set_channel(channel)
stop_event = threading.Event()
stop_event.set()
# Update per milliseconds
def display():
global count, stop_event
# To record the number of payloads sent for every 100 milliseconds
pings = [0]*10
# Highest rate
max_rate = 0
while stop_event.isSet():
pings = pings[1:] + [count]
rate = pings[-1] - pings[0]
if max_rate < rate: max_rate = rate
msg = 'Maximum Rate: {0:>4}pks/s Current Rate: {1:>4}pks/s'.format(max_rate, rate)
sys.stdout.write('\r'+msg)
sys.stdout.flush()
time.sleep(0.1)
if __name__ == "__main__":
t = threading.Thread(target=display,args=())
t.start()
try:
while True:
if common.radio.transmit_payload(common.ping_payload, common.ack_timeout, common.retries):
count += 1
except KeyboardInterrupt:
stop_event.clear() | gpl-3.0 | 6,024,926,676,848,217,000 | 28.089286 | 129 | 0.677518 | false | 3.100952 | false | false | false |
REGOVAR/Regovar | regovar/core/managers/pipeline_manager.py | 1 | 12599 | #!env/python3
# coding: utf-8
try:
import ipdb
except ImportError:
pass
import os
import shutil
import json
import zipfile
import datetime
import time
import uuid
import subprocess
import requests
from config import *
from core.framework.common import *
from core.framework.postgresql import execute
from core.model import *
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PIPELINE MANAGER
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class PipelineManager:
def __init__(self):
pass
def list(self):
"""
List all pipelines with minimum of data
"""
sql = "SELECT id, name, type, status, description, version, image_file_id, starred, installation_date, manifest, documents FROM pipeline ORDER BY id"
result = []
for res in execute(sql):
result.append({
"id": res.id,
"name": res.name,
"description": res.description,
"type": res.type,
"status": res.status,
"version": res.version,
"image_file_id": res.image_file_id,
"starred": res.starred,
"installation_date": res.installation_date.isoformat(),
"manifest": res.manifest,
"documents": res.documents
})
return result
def get(self, fields=None, query=None, order=None, offset=None, limit=None, depth=0):
"""
Generic method to get pipelines according provided filtering options
"""
if not isinstance(fields, dict):
fields = None
if query is None:
query = {}
if order is None:
order = "name, installation_date desc"
if offset is None:
offset = 0
if limit is None:
limit = RANGE_MAX
pipes = Session().query(Pipeline).filter_by(**query).order_by(order).limit(limit).offset(offset).all()
for p in pipes: p.init(depth)
return pipes
def install_init (self, name, metadata={}):
pipe = Pipeline.new()
pipe.name = name
pipe.status = "initializing"
pipe.save()
if metadata and len(metadata) > 0:
pipe.load(metadata)
log('core.PipeManager.register : New pipe registered with the id {}'.format(pipe.id))
return pipe
def install_init_image_upload(self, filepath, file_size, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be uploaded on the server.
Create an entry for the pipeline and the file (image that will be uploaded) in the database.
Return the Pipeline and the File objects created
This method shall be used to init a resumable upload of a pipeline
(the pipeline/image are not yet installed and available, but we need to manipulate them)
"""
from core.core import core
pfile = core.files.upload_init(filepath, file_size)
pipe = self.install_init(filepath, pipe_metadata)
pipe.image_file_id = pfile.id
pipe.save()
return pipe, pfile
async def install_init_image_url(self, url, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be retrieved via an url.
Create an entry for the pipeline and the file (image) in the database.
Async method as the download start immediatly, followed by the installation when it's done
Return the Pipeline object ready to be used
"""
raise NotImplementedError("TODO")
def install_init_image_local(self, filepath, move=False, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be retrieved on the local server.
Create an entry for the pipeline and the file (image) in the database.
Copy the local file into dedicated directory and start the installation of the Pipeline
Return the Pipeline object ready to be used
"""
from core.core import core
pfile = core.files.from_local(filepath, move)
pipe = self.install_init(os.path.basename(filepath), pipe_metadata)
# FIXME: Sometime getting sqlalchemy error 'is not bound to a Session'
# why it occure here ... why sometime :/
check_session(pfile)
check_session(pipe)
pipe.image_file_id = pfile.id
pipe.save()
return pipe
def install_init_image(self, file_id, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have already been uploaded the local server via the regovar file api.
Create an entry for the pipeline in the database.
Return the Pipeline object ready to be used
"""
from core.core import core
pfile = File.from_id(file_id)
if pfile:
pipe = self.install_init(os.path.basename(pfile.path), pipe_metadata)
pipe.image_file_id = file_id
pipe.save()
return pipe
return None
def check_manifest(self, manifest):
"""
Check that manifest (json) is valid and return the full version completed
with default values if needed
"""
missing = ""
for k in ["name", "version"]:
if k not in manifest.keys():
missing += k + ", "
if missing != "":
missing = missing[:-2]
raise RegovarException("FAILLED Checking validity of manifest (missing : {})".format(missing))
# 2- Default value for optional fields in mandatory file
default = {
"description": "",
"type": "job",
"contacts": [],
"regovar_db_access": False,
"inputs": "/pipeline/inputs",
"outputs": "/pipeline/outputs",
"databases": "/pipeline/databases",
"logs": "/pipeline/logs"
}
for k in default.keys():
if k not in manifest.keys():
manifest[k] = default[k]
# 3- check type
if manifest["type"] not in ["job", "importer", "exporter", "reporter"]:
raise RegovarException("FAILLED Checking validity of manifest (type '{}' not supported)".format(manifest["type"]))
log('Validity of manifest checked')
return manifest
def install(self, pipeline_id, asynch=True):
"""
Start the installation of the pipeline. (done in another thread)
The initialization shall be done (image ready to be used)
"""
from core.core import core
pipeline = Pipeline.from_id(pipeline_id, 1)
if not pipeline :
raise RegovarException("Pipeline not found (id={}).".format(pipeline_id))
if pipeline.status != "initializing":
raise RegovarException("Pipeline status ({}) is not \"initializing\". Cannot perform another installation.".format(pipeline.status))
if pipeline.image_file and pipeline.image_file.status not in ["uploaded", "checked"]:
raise RegovarException("Wrong pipeline image (status={}).".format(pipeline.image_file.status))
if not pipeline.image_file or pipeline.image_file.status in ["uploaded", "checked"]:
if asynch:
run_async(self.__install, pipeline)
else:
pipeline = self.__install(pipeline)
return pipeline
def __install(self, pipeline):
from core.core import core
# Dezip pirus package in the pirus pipeline directory
root_path = os.path.join(PIPELINES_DIR, str(pipeline.id))
log('Installation of the pipeline package : ' + root_path)
os.makedirs(root_path)
os.chmod(pipeline.image_file.path, 0o777)
# TODO: Check zip integrity and security before extracting it
# see python zipfile official doc
with zipfile.ZipFile(pipeline.image_file.path,"r") as zip_ref:
zip_ref.extractall(root_path)
# check package tree
# find root folder
files = [i.filename for i in zip_ref.infolist()]
for f in files:
if f.endswith("manifest.json"): break
zip_root = os.path.dirname(f)
# remove intermediate folder
if zip_root != "":
zip_root = os.path.join(root_path, zip_root)
for filename in os.listdir(zip_root):
shutil.move(os.path.join(zip_root, filename), os.path.join(root_path, filename))
os.rmdir(zip_root)
# Load manifest
try:
log(os.path.join(root_path, "manifest.json"))
with open(os.path.join(root_path, "manifest.json"), "r") as f:
data = f.read()
log(data)
# Fix common parsing problem regarding json syntaxe
data = data.replace("False", "false")
data = data.replace("True", "true")
manifest = json.loads(data)
manifest = self.check_manifest(manifest)
pipeline.developpers = manifest.pop("contacts")
pipeline.manifest = manifest
# list documents available
pipeline.documents = {
"about": os.path.join(root_path, "doc/about.html"),
"help": os.path.join(root_path, "doc/help.html"),
"icon": os.path.join(root_path, "doc/icon.png"),
"icon2": os.path.join(root_path, "doc/icon.jpg"),
"form": os.path.join(root_path, "form.json"),
"license":os.path.join(root_path, "LICENSE"),
"readme": os.path.join(root_path, "README")
}
for k in pipeline.documents.keys():
if not os.path.exists(pipeline.documents[k]):
pipeline.documents[k] = None
p = pipeline.documents.pop("icon2")
if not pipeline.documents["icon"]:
pipeline.documents["icon"] = p
pipeline.load(manifest)
pipeline.save()
except Exception as ex:
pipeline.status = "error"
pipeline.save()
raise RegovarException("Unable to open and read manifest.json. The pipeline package is wrong or corrupt.", exception=ex)
# Update and save pipeline status
pipeline.type = manifest["type"]
pipeline.installation_date = datetime.datetime.now()
pipeline.status = "installing"
pipeline.save()
# Install pipeline
result = core.container_manager.install_pipeline(pipeline)
return result
def delete(self, pipeline_id, asynch=True):
"""
Start the uninstallation of the pipeline. (done in another thread)
Remove image file if exists.
"""
from core.core import core
result = None
pipeline = Pipeline.from_id(pipeline_id, 1)
if pipeline:
result = pipeline.to_json()
# Clean container
try:
if asynch:
run_async(self.__delete, pipeline)
else:
self.__delete(pipeline)
except Exception as ex:
war("core.PipelineManager.delete : Container manager failed to delete the container with id {}.".format(pipeline.id))
try:
# Clean filesystem
shutil.rmtree(pipeline.path, True)
# Clean DB
core.files.delete(pipeline.image_file_id)
Pipeline.delete(pipeline.id)
except Exception as ex:
raise RegovarException("core.PipelineManager.delete : Unable to delete the pipeline's pirus data for the pipeline {}.".format(pipeline.id), ex)
return result
def __delete(self, pipeline):
from core.core import core
try:
core.container_manager.uninstall_pipeline(pipeline)
except Exception as ex:
raise RegovarException("Error occured during uninstallation of the pipeline. Uninstallation aborded.", ex)
| agpl-3.0 | -2,622,789,942,192,497,000 | 35.625 | 159 | 0.560124 | false | 4.442525 | false | false | false |
asanfilippo7/osf.io | website/addons/wiki/model.py | 1 | 8106 | # -*- coding: utf-8 -*-
import datetime
import functools
import logging
from bleach import linkify
from bleach.callbacks import nofollow
from website.models import NodeLog
import markdown
from markdown.extensions import codehilite, fenced_code, wikilinks
from modularodm import fields
from framework.forms.utils import sanitize
from framework.guid.model import GuidStoredObject
from framework.mongo import utils as mongo_utils
from website import settings
from website.addons.base import AddonNodeSettingsBase
from website.addons.wiki import utils as wiki_utils
from website.addons.wiki.settings import WIKI_CHANGE_DATE
from website.project.commentable import Commentable
from website.project.signals import write_permissions_revoked
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
)
logger = logging.getLogger(__name__)
class AddonWikiNodeSettings(AddonNodeSettingsBase):
complete = True
has_auth = True
is_publicly_editable = fields.BooleanField(default=False, index=True)
def set_editing(self, permissions, auth=None, log=False):
"""Set the editing permissions for this node.
:param auth: All the auth information including user, API key
:param bool permissions: True = publicly editable
:param bool save: Whether to save the privacy change
:param bool log: Whether to add a NodeLog for the privacy change
if true the node object is also saved
"""
node = self.owner
if permissions and not self.is_publicly_editable:
if node.is_public:
self.is_publicly_editable = True
else:
raise NodeStateError('Private components cannot be made publicly editable.')
elif not permissions and self.is_publicly_editable:
self.is_publicly_editable = False
else:
raise NodeStateError('Desired permission change is the same as current setting.')
if log:
node.add_log(
action=(NodeLog.MADE_WIKI_PUBLIC
if self.is_publicly_editable
else NodeLog.MADE_WIKI_PRIVATE),
params={
'project': node.parent_id,
'node': node._primary_key,
},
auth=auth,
save=False,
)
node.save()
self.save()
def after_register(self, node, registration, user, save=True):
"""Copy wiki settings to registrations."""
clone = self.clone()
clone.owner = registration
if save:
clone.save()
return clone, None
def after_set_privacy(self, node, permissions):
"""
:param Node node:
:param str permissions:
:return str: Alert message
"""
if permissions == 'private':
if self.is_publicly_editable:
self.set_editing(permissions=False, log=False)
return (
'The wiki of {name} is now only editable by write contributors.'.format(
name=node.title,
)
)
def to_json(self, user):
return {}
@write_permissions_revoked.connect
def subscribe_on_write_permissions_revoked(node):
# Migrate every page on the node
for wiki_name in node.wiki_private_uuids:
wiki_utils.migrate_uuid(node, wiki_name)
def build_wiki_url(node, label, base, end):
return '/{pid}/wiki/{wname}/'.format(pid=node._id, wname=label)
def validate_page_name(value):
value = (value or '').strip()
if not value:
raise NameEmptyError('Page name cannot be blank.')
if value.find('/') != -1:
raise NameInvalidError('Page name cannot contain forward slashes.')
if len(value) > 100:
raise NameMaximumLengthError('Page name cannot be greater than 100 characters.')
return True
def render_content(content, node):
html_output = markdown.markdown(
content,
extensions=[
wikilinks.WikiLinkExtension(
configs=[
('base_url', ''),
('end_url', ''),
('build_url', functools.partial(build_wiki_url, node))
]
),
fenced_code.FencedCodeExtension(),
codehilite.CodeHiliteExtension(
[('css_class', 'highlight')]
)
]
)
# linkify gets called after santize, because we're adding rel="nofollow"
# to <a> elements - but don't want to allow them for other elements.
sanitized_content = sanitize(html_output, **settings.WIKI_WHITELIST)
return sanitized_content
class NodeWikiPage(GuidStoredObject, Commentable):
_id = fields.StringField(primary=True)
page_name = fields.StringField(validate=validate_page_name)
version = fields.IntegerField()
date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
is_current = fields.BooleanField()
content = fields.StringField(default='')
user = fields.ForeignField('user')
node = fields.ForeignField('node')
@property
def deep_url(self):
return '{}wiki/{}/'.format(self.node.deep_url, self.page_name)
@property
def url(self):
return '{}wiki/{}/'.format(self.node.url, self.page_name)
@property
def rendered_before_update(self):
return self.date < WIKI_CHANGE_DATE
# For Comment API compatibility
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'wiki'
@property
def root_target_page(self):
"""The comment page type associated with NodeWikiPages."""
return 'wiki'
@property
def is_deleted(self):
key = mongo_utils.to_mongo_key(self.page_name)
return key not in self.node.wiki_pages_current
def belongs_to_node(self, node_id):
"""Check whether the wiki is attached to the specified node."""
return self.node._id == node_id
def get_extra_log_params(self, comment):
return {'wiki': {'name': self.page_name, 'url': comment.get_comment_page_url()}}
# used by django and DRF - use v1 url since there are no v2 wiki routes
def get_absolute_url(self):
return '{}wiki/{}/'.format(self.node.absolute_url, self.page_name)
def html(self, node):
"""The cleaned HTML of the page"""
sanitized_content = render_content(self.content, node=node)
try:
return linkify(
sanitized_content,
[nofollow, ],
)
except TypeError:
logger.warning('Returning unlinkified content.')
return sanitized_content
def raw_text(self, node):
""" The raw text of the page, suitable for using in a test search"""
return sanitize(self.html(node), tags=[], strip=True)
def get_draft(self, node):
"""
Return most recently edited version of wiki, whether that is the
last saved version or the most recent sharejs draft.
"""
db = wiki_utils.share_db()
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, self.page_name)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
if doc_item:
sharejs_version = doc_item['_v']
sharejs_timestamp = doc_item['_m']['mtime']
sharejs_timestamp /= 1000 # Convert to appropriate units
sharejs_date = datetime.datetime.utcfromtimestamp(sharejs_timestamp)
if sharejs_version > 1 and sharejs_date > self.date:
return doc_item['_data']
return self.content
def save(self, *args, **kwargs):
rv = super(NodeWikiPage, self).save(*args, **kwargs)
if self.node:
self.node.update_search()
return rv
def rename(self, new_name, save=True):
self.page_name = new_name
if save:
self.save()
def to_json(self):
return {}
| apache-2.0 | 2,979,544,810,616,633,300 | 30.297297 | 93 | 0.613003 | false | 4.127291 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.