code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import pyganim
import pygame
import os
from entity import Entity
from vector import Vector
from statemachine import StateMachine, State
from random import randint
from constants import SCREEN_SIZE, END_LEVEL_EVENT
class Tank(Entity):
SCORE = 1000
SPEED = 300
TANK_IMAGE = os.path.join('assets', 'images', 'tank.png')
def __init__(self,*args, **kwargs):
images = pyganim.getImagesFromSpriteSheet(self.TANK_IMAGE, rows=1, cols=4, rects=[])
frames = list(zip(images[:3], [200, 200, 200]))
self.animObj = pyganim.PygAnimation(frames)
self.animObj.play()
kwargs['image'] = frames[0][0]
kwargs['life'] = 20
kwargs['speed'] = Tank.SPEED
super(Tank, self).__init__(*args, **kwargs)
def build_brain(self):
brain = StateMachine()
shooting_state = TankStateShooting(self)
waiting_state = TankStateWaiting(self)
dodging_state = TankStateDodging(self)
brain.add_state(shooting_state)
brain.add_state(waiting_state)
brain.add_state(dodging_state)
brain.set_state('waiting')
self._brain = brain
def render(self, surface):
location = self.get_location()
self.animObj.blit(surface, (location.x, location.y))
def flip(self):
if not self._is_flip:
self._is_flip = True
self.animObj.flip(True, False)
def reverse_flip(self):
if self._is_flip:
self._is_flip = False
self.animObj.flip(True, False)
def shoot_spark(self):
self.spark = Spark(self.world)
location = self.get_location()
self.spark.set_location(location + Vector(63, 106))
self.world.add_entity(self.spark, ('enemy_shots',))
def kill(self):
e = pygame.event.Event(END_LEVEL_EVENT)
pygame.event.post(e)
super(Tank, self).kill()
class TankStateDodging(State):
def __init__(self, tank):
super(TankStateDodging, self).__init__('dodging')
self.tank = tank
self._last_time_collided = None
def set_destination(self):
sara_rect = self.tank.world.get_player().get_rect()
tank_rect = self.tank.get_rect()
tank_rect.centery = sara_rect.centery
self.tank.set_destination(Vector(*tank_rect.topleft))
def do_actions(self):
# just move once and then do nothing
pass
def check_conditions(self, time_passed):
if self.tank.get_location() == self.tank.get_destination():
return 'shooting'
return None
def entry_actions(self):
self.set_destination()
class TankStateShooting(State):
def __init__(self, tank):
super(TankStateShooting, self).__init__('shooting')
self.tank = tank
self.has_shot = False
def entry_actions(self):
self.tank.shoot_spark()
def check_conditions(self, time_passed):
if self.tank.spark.shoot:
return 'waiting'
return None
class TankStateWaiting(State):
WAIT = 1 # second
def __init__(self, tank):
super(TankStateWaiting, self).__init__('waiting')
self.tank = tank
self.time_passed = 0
def check_conditions(self, time_passed):
self.time_passed += time_passed
if self.time_passed > self.WAIT:
self.time_passed = 0
return 'dodging'
SPARK_IMAGE_FILENAME = os.path.join('assets', 'images', 'redspark.png')
class Spark(Entity):
LOADING_TIME = 0.5
def __init__(self, world):
images = pyganim.getImagesFromSpriteSheet(SPARK_IMAGE_FILENAME, rows=1, cols=2, rects=[])
frames = list(zip(images, [100, 100]))
self.animObj = pyganim.PygAnimation(frames)
self.animObj.play()
sprite = pygame.image.load(SPARK_IMAGE_FILENAME).convert_alpha()
super(Spark, self).__init__(world, 'spark', sprite)
self.loading = 0
self.shoot = False
def render(self, surface):
location = self.get_location()
self.animObj.blit(surface, (location.x, location.y))
def flip(self):
if not self._is_flip:
self._is_flip = True
self.animObj.flip(True, False)
def reverse_flip(self):
if self._is_flip:
self._is_flip = False
self.animObj.flip(True, False)
def process(self, time_passed):
self.loading += time_passed
if self.loading > Spark.LOADING_TIME and not self.get_destination():
self.shoot = True
self.set_speed(1200)
x = 0 - self.get_width() if self.is_flip() else SCREEN_SIZE[0] + self.get_width()
self.set_destination(Vector(
x,
self.get_location().y
))
super(Spark, self).process(time_passed)
| juanjosegzl/learningpygame | tank.py | Python | gpl-3.0 | 4,813 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_format_output',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_format_output(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = "ansible"
data = v._format_output(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_unicode(lines[0])
assert header.endswith(';TEST'), "header does end with cipher name"
header_parts = header.split(';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.b_version, "header version is incorrect"
assert header_parts[2] == 'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.b_version == "9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
| ilya-epifanov/ansible | test/units/parsing/vault/test_vault.py | Python | gpl-3.0 | 5,685 |
#!/usr/bin/python3
'''
This is a demo of itertools.groupby
'''
import itertools # for groupby
def indentation(line):
return len(line) - len(line.lstrip())
data = '''this is no indent 1
this is no indent 2
this is 1 indent 1
this is 1 indent 2
this is no indent 3
this is no indent 4
this is 1 indent 3
this is 1 indent 4'''
for (indent, paragraph) in itertools.groupby(data.split('\n'), key=indentation):
print('%d-spaced paragraph' % indent)
for line in paragraph:
print(line.strip())
| nonZero/demos-python | src/examples/short/iteration/group_by.py | Python | gpl-3.0 | 517 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
from ..auxiliary_classes.Brushes import Brushes
from ..auxiliary_classes.FalloffCurve import FalloffCurve
from ..auxiliary_classes.Octree import Octree
from ..auxiliary_classes.RainbowRamp import RainbowRamp
from ..auxiliary_classes.VertexProperties import VertexProperties
from ..auxiliary_classes.View3DGraphic import View3DGraphic
class MeshBrushProps(bpy.types.PropertyGroup):
data_path =(
"user_preferences.addons['{0}'].preferences.mesh_brush"
).format(__package__.split(".")[0])
# Brush Settings
iterations = bpy.props.IntProperty(
name = "Iterations",
description = "Number of smoothing iterations",
default = 1,
min = 1,
soft_max = 25
)
position_x = bpy.props.IntProperty()
position_y = bpy.props.IntProperty()
radius = bpy.props.IntProperty(
name = "Radius",
description = "Radius, in pixels, of the mesh brush",
default = 75,
min = 1,
soft_max = 250
)
spacing = bpy.props.IntProperty(
name = "Spacing",
description =\
"Distance between dabs as a percentage of the brush's radius",
default = 25,
min = 1,
soft_max = 100,
subtype = 'PERCENTAGE'
)
# Display Properties
brush_is_visible = bpy.props.BoolProperty(
name = "Show Brush",
description = "Show/hide the brush.",
default = True
)
brush_influence_is_visible = bpy.props.BoolProperty(
name = "Show Influence",
description = "Show/hide the brush's influence.",
default = False
)
interior_color = bpy.props.FloatVectorProperty(
name = "Interior Color",
description = "Color of the brush's interior",
default = (1.0, 0.522, 0, 0.1),
min = 0,
max = 1,
subtype = 'COLOR',
size = 4
)
outline_color = bpy.props.FloatVectorProperty(
name = "Outline Color",
description = "Color of the brush's outline",
default = (1.0, 0.522, 0, 1.0),
min = 0,
max = 1,
subtype = 'COLOR',
size = 4
)
outline_thickness = bpy.props.IntProperty(
name = "Outline Thickness",
description = "Thickness of the brush's outline",
default = 1,
min = 1,
soft_max = 10
)
# Falloff
falloff_profile = bpy.props.EnumProperty(
name = "Falloff Curve",
description = "The intensity profile of the brush",
default = 'SMOOTH',
items = [
('SMOOTH', "Profile", "Smooth", 'SMOOTHCURVE', 0),
('ROUND', "Profile", "Round", 'SPHERECURVE', 1),
('ROOT', "Profile", "Root", 'ROOTCURVE', 2),
('SHARP', "Profile", "Sharp", 'SHARPCURVE', 3),
('LINEAR', "Profile", "Linear", 'LINCURVE', 4),
('CONSTANT', "Profile", "Constant", 'NOCURVE', 5),
('RANDOM', "Profile", "Random", 'RNDCURVE', 6)
]
)
# Options
backfacing_are_ignored = bpy.props.BoolProperty(
name = "Ignore Backfacing",
description =\
"Ignore vertices with normals pointing away from the brush.",
default = False
)
boundary_is_locked = bpy.props.BoolProperty(
name = "Lock Boundary",
description = "Lock vertices that are on the boundary of the mesh.",
default = False
)
selection_is_isolated = bpy.props.BoolProperty(
name = "Isolate Selection",
description = (
"Isolate the selected faces from the rest of the mesh object, " +
"and lock the selection border."
),
default = False
)
# Symmetry
radial_count = bpy.props.IntProperty(
name = "Radial Count",
description =\
"Number of radially symmetrical brushes per axis of symmetry",
default = 1,
min = 1,
soft_max = 12
)
symmetry_type = bpy.props.EnumProperty(
name = "Symmetry Type",
description =\
"The type of symmetry to employ in modifying the mesh object",
default = 'MIRROR',
items = [
('MIRROR', "Mirror", "Mirror across planes of symmetry."),
('RADIAL', "Radial", "Rotate around axes of symmetry.")
]
)
x_axis_symmetry_is_enabled = bpy.props.BoolProperty(
name = "X Symmetry",
description =(
"Enable/disable symmetrical modification of the mesh object " +
"using the x-axis of its local space axes."
),
default = False
)
y_axis_symmetry_is_enabled = bpy.props.BoolProperty(
name = "Y Symmetry",
description =(
"Enable/disable symmetrical modification of the mesh object " +
"using the y-axis of its local space axes."
),
default = False
)
z_axis_symmetry_is_enabled = bpy.props.BoolProperty(
name = "Z Symmetry",
description =(
"Enable/disable symmetrical modification of the mesh object " +
"using the z-axis of its local space axes."
),
default = False
)
# UI Visibility
display_props_ui_is_visible = bpy.props.BoolProperty(
name = "Display Properties UI Visibility",
description = "Show/hide the Display Properties UI.",
default = False
)
falloff_ui_is_visible = bpy.props.BoolProperty(
name = "Curve UI Visibility",
description = "Show/hide the Curve UI.",
default = False
)
options_ui_is_visible = bpy.props.BoolProperty(
name = "Options UI Visibility",
description = "Show/hide the Options UI.",
default = False
)
settings_ui_is_visible = bpy.props.BoolProperty(
name = "Settings UI Visibility",
description = "Show/hide the Settings UI.",
default = False
)
symmetry_ui_is_visible = bpy.props.BoolProperty(
name = "Symmetry UI Visibility",
description = "Show/hide the Symmetry UI.",
default = False
)
# Persistent Objects
brushes = Brushes()
color_ramp = RainbowRamp()
falloff_curve = FalloffCurve()
brush_graphic = View3DGraphic()
brush_influence_graphic = View3DGraphic()
brush_strength_graphic = View3DGraphic()
octree = Octree(max_indices_per_leaf = 50)
redo_stack = list()
undo_stack = list() | fedackb/surface-constraint-tools | properties/MeshBrushProps.py | Python | gpl-3.0 | 7,230 |
# -*- coding: utf-8 -*-
#
# IITB Unsupervised Transliterator documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 9 17:29:09 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'IITB Unsupervised Transliterator'
copyright = u'2018, Anoop Kunchukuttan'
author = u'Anoop Kunchukuttan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'IITBUnsupervisedTransliteratordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'IITBUnsupervisedTransliterator.tex', u'IITB Unsupervised Transliterator Documentation',
u'Anoop Kunchukuttan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'iitbunsupervisedtransliterator', u'IITB Unsupervised Transliterator Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'IITBUnsupervisedTransliterator', u'IITB Unsupervised Transliterator Documentation',
author, 'IITBUnsupervisedTransliterator', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| anoopkunchukuttan/transliterator | docs/conf.py | Python | gpl-3.0 | 11,772 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setFixedSize(362, 241)
self.textBrowser = QtGui.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(10, 10, 341, 221))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> Author: feihu @ Gu-Lab</p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> Lab URL: <a href=\"http://gu.ee.tsinghua.edu.cn/\"><span style=\" text-decoration: underline; color:#0000ff;\">http://gu.ee.tsinghua.edu.cn/</span></a> </p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> This software is under GPLv3 (Because of pyQt).</p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> 作者2016年1月毕业,本软件需要有人维护,以防Tunet改版。2015年暑假期间已经改过一次了,推测近几年不会再改,但还是希望有人能维护。</p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> 志愿者请联系作者: lpffeihu (at) gmail.com</p></body></html>", None))
| lpffeihu/QTunet | about.py | Python | gpl-3.0 | 2,723 |
import datetime
def change(char):
if char != '0':
return int(char)
return 11
def main(string, size):
length = len(string)
value = -1
maxvalue = -1
for l in range(length - size + 1):
r = l + size - 1
if l == 0:
temp = 1
for i in range(l, r + 1):
temp *= change(string[i])
value = temp
else:
value = value // change(string[l - 1]) * change(string[r])
maxvalue = max(maxvalue, value if value % 11 != 0 else 0)
return maxvalue
file = open('p008.in')
string = ''.join([line[:-1] for line in file])
file.close()
try:
para = int(input())
except:
para = 13
beg = datetime.datetime.now()
ans = main(string, para)
end = datetime.datetime.now()
print("answer:", ans)
print("time:", end - beg)
| nowsword/ProjectEuler | p008.py | Python | gpl-3.0 | 829 |
# -*- encoding: utf-8 -*-
# pilas engine - a video game framework.
#
# copyright 2010 - hugo ruscitti
# license: lgplv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# website - http://www.pilas-engine.com.ar
import pilas
class ColisionesFisicas(pilas.escenas.Normal):
def __init__(self):
pilas.escenas.Normal.__init__(self, pilas.colores.grisoscuro)
pilas.avisar("Un ejemplo de colisiones")
pilas.fondos.Pasto()
m = pilas.actores.Mono()
m.aprender(pilas.habilidades.Arrastrable)
m.aprender(pilas.habilidades.ColisionableComoPelota)
b = pilas.actores.Bomba()
b.aprender(pilas.habilidades.RebotarComoPelota)
pilas.atajos.fabricar(pilas.actores.Pelota, 20)
| cristian99garcia/pilas-activity | pilas/ejemplos/fisica.py | Python | gpl-3.0 | 738 |
from .wallet import *
| leofnch/kc | hodl/wallet/__init__.py | Python | gpl-3.0 | 22 |
#!/usr/bin/env python
# encoding: utf-8
""" Time utilities and time zone information
:author: David Hoese (davidh)
:contact: [email protected]
:organization: Space Science and Engineering Center (SSEC)
:copyright: Copyright (c) 2013 University of Wisconsin SSEC. All rights reserved.
:date: Jan 2013
:license: GNU GPLv3
Copyright (C) 2013 Space Science and Engineering Center (SSEC),
University of Wisconsin-Madison.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This file is part of the polar2grid software package. Polar2grid takes
satellite observation data, remaps it, and writes it to a file format for
input into another program.
Documentation: http://www.ssec.wisc.edu/software/polar2grid/
Written by David Hoese January 2013
University of Wisconsin-Madison
Space Science and Engineering Center
1225 West Dayton Street
Madison, WI 53706
[email protected]
"""
__docformat__ = "restructuredtext en"
import os
import logging
import datetime
log = logging.getLogger(__name__)
class UTC(datetime.tzinfo):
"""Time zone class for UTC
"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
def utc_now():
return datetime.datetime.utcnow().replace(tzinfo=UTC())
| tommyjasmin/polar2grid | py/polar2grid_core/polar2grid/core/time_utils.py | Python | gpl-3.0 | 1,982 |
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""``Rotterdam`` skin package.
$Id: __init__.py 65511 2006-02-27 05:24:24Z philikon $
"""
__docformat__ = "reStructuredText"
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class rotterdam(IBrowserRequest):
"""Layer for registering Rotterdam-specific macros."""
class Rotterdam(rotterdam, IDefaultBrowserLayer):
"""The ``Rotterdam`` skin.
It is available via ``++skin++Rotterdam``.
"""
# BBB 2006/02/18, to be removed after 12 months
import zope.app.skins
zope.app.skins.set('Rotterdam', Rotterdam)
| Donkyhotay/MoonPy | zope/app/rotterdam/__init__.py | Python | gpl-3.0 | 1,238 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# itucsdb documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 18 15:56:05 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the projects.
project = 'itucsdb1611'
copyright = '2016, AcademicFreelance'
# The version info for the projects you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<projects> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'itucsdb1611doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'itucsdb1611.tex', 'itucsdb1611 Documentation',
'AcademicFreelance', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'itucsdb1611', 'itucsdb1611 Documentation',
['AcademicFreelance'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'itucsdb1611', 'itucsdb1611 Documentation',
'AcademicFreelance', 'itucsdb1611', 'One line description of projects.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| itucsdb1611/itucsdb1611 | docs/conf.py | Python | gpl-3.0 | 8,209 |
#!/usr/bin/python2
import sys
import time
import icehms
if __name__ == "__main__":
name = sys.argv[1]
mgr = icehms.IceManager()
mgr.init()
try:
print("Holons are: ", mgr.findHolons())
rob = mgr.getHolon(name)
print("My robot is: ", rob)
rob = rob.ice_timeout(10000)
p = rob.getl()
print("Current pose is: ", rob.getl())
p[2] += 0.10
print("Sending robot to: ", p)
rob.movel(p, 0.1, 0.01)
print("Current pose is: ", rob.getl())
p[1] += 0.10
rob.movel(p, 0.1, 0.01)
print("Current pose is: ", rob.getl())
p[2] -= 0.10
rob.movel(p, 0.1, 0.01)
p[1] -= 0.10
rob.movel(p, 0.1, 0.01)
finally:
mgr.shutdown()
| oroulet/vc2hms | demo/advance_control_system.py | Python | gpl-3.0 | 768 |
# Scrapy settings for WebCrawler project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'WebCrawler'
CONCURRENT_ITEMS = 1
LOG_ENABLED = True
SPIDER_MODULES = ['WebCrawler.spiders', "WebCrawler.utility_spiders"]
NEWSPIDER_MODULE = 'WebCrawler.spiders'
DOWNLOADER_MIDDLEWARES = {
'HTResearch.WebCrawler.WebCrawler.middlewares.UrlQueueMiddleware': 400,
}
ITEM_PIPELINES = {
'HTResearch.WebCrawler.WebCrawler.item_pipeline.item_switches.ItemSwitch': 100,
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'WebCrawler (+http://www.yourdomain.com)'
| mizhgun/HTResearch | HTResearch/WebCrawler/WebCrawler/settings.py | Python | gpl-3.0 | 762 |
# -*- coding: utf-8 -*-
# xVector Engine Server
# Copyright (c) 2011 James Buchwald
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
Server-specific network code.
'''
import logging
import traceback
import asyncore
import socket
import sys
from xVLib import Networking
from . import ServerGlobals, IPBans, ConnectionNegotiation, Login
# stuff we use later
mainlog = logging.getLogger("Server.Main")
class ConnectionClosed(Exception): pass
'''Raised when a connection is closed.'''
class ServerConnection(Networking.BaseConnectionHandler):
'''
Server-side connection handler for a single connection.
'''
##
## State constants.
##
State_Negotiate = 0
'''Connection negotiation state.'''
State_WaitForLogin = 1
'''State in which the server waits for the client to login or register.'''
State_Login = 2
'''State in which the server is processing a challenge-response login.'''
State_CharacterSelect = 3
'''State in which the user is selecting a character.'''
State_CharacterCreate = 4
'''State in which the user is creating a new character.'''
State_Game = 5
'''State in which the user is playing the game.'''
##
## State packet routers.
##
StateRouters = {
State_Negotiate: ConnectionNegotiation.ConnectionNegotiationRouter,
State_WaitForLogin: Login.WaitForLoginRouter,
State_Login: Login.LoginRouter,
State_CharacterSelect: None, # TODO: Implement
State_CharacterCreate: None, # TODO: Implement
State_Game: None, # TODO: Implement
}
'''
Maps connection states to their packet router classes.
'''
def __init__(self, sock=None):
'''
Creates a new server-side connection handler.
@type sock: socket
@param sock: Connected socket to wrap
'''
# Inherit base class behavior
Networking.BaseConnectionHandler.__init__(self, sock)
# Set the initial state.
self.State = self.State_Negotiate
'''Current state.'''
self.Router = self.StateRouters[self.State]()
'''Current packet router.'''
print "[debug] self.Router = %s" % self.Router
# Declare account management attributes.
self._Account = None
'''Account associated with this connection.'''
# Login state tracking attributes.
self.LoginChallenge = None
'''Login challenge for the current login attempt.'''
self.LastLogin = 0
'''Time of the last login attempt.'''
# Register the connection.
msg = "New connection from %s." % self.Address[0]
mainlog.info(msg)
App = ServerGlobals.Application
try:
App.Connections.AddConnection(self)
except ConnectionLimitExceeded:
self.close()
except BannedIPAddress:
self.close()
def SetState(self, newstate):
self.State = newstate
# Adjust the packet router.
self.Router = self.StateRouters[newstate]()
##
## connection information properties
##
def GetAccountName(self):
'''Old-style class getter for the connected account's name.'''
if self.Account:
return self.Account.Username
else:
return None
def GetCharacterName(self):
'''Old-style class getter for the connected character's name.'''
return None # TODO: Implement
def GetAccount(self):
'''Old-style class getter for the associated account.'''
return self._Account
def SetAccount(self, newaccount):
'''Old-style class setter for the associated account.'''
self._Account = newaccount
##
## reimplemented methods from Networking.BaseConnectionHandler
##
def PacketReceived(self, packet):
'''
Reimplemented from xVLib.Networking.BaseConnectionHandler.
Here we pass the packet to the appropriate handler. Again, nothing
special. This is essentially just a routing method.
'''
# Hand it off to the router.
self.Router.HandlePacket(packet)
def OnCorruptPacket(self):
# Log an error message.
msg = "%s - Corrupt packet received." % self.Address[0]
msg += "\n\n%s" % traceback.format_exc()
mainlog.error(msg)
# Close the connection.
self.close()
def OnTimeout(self):
# Log the event.
msg = "%s - Connection timed out." % self.Address[0]
mainlog.info(msg)
# Close the connection.
self.close()
##
## low-level asyncore callbacks
##
def handle_error(self):
'''
Called when an unhandled exception occurs in the handler.
'''
# log the error
msg = self.Address[0] + " - Unhandled exception in connection handler."
msg += "\n%s" % traceback.format_exc()
mainlog.error(msg)
# kill the connection if it's still alive
try:
self.close()
except:
# we don't care
pass
##
## crypto stuff
##
def WrapTLS(self):
'''
Encrypts the connection in a TLS layer.
'''
##
## reimplemented methods from asyncore.dispatcher
##
def close(self):
'''
Closes the socket and cleans up.
This extends the close() method of asyncore.dispatcher.
'''
# Log the close event.
msg = "%s - Connection closed." % self.Address[0]
mainlog.info(msg)
# Deregister the connection.
App = ServerGlobals.Application
try:
App.Connections.RemoveConnection(self)
except UnregisteredConnection:
pass
# Inherit base class behavior.
asyncore.dispatcher_with_send.close(self)
class ConnectionLimitExceeded(Exception): pass
'''Raised if the connection limit is exceeded.'''
class UnregisteredConnection(Exception): pass
'''Raised if a referenced connection is not registered with the manager.'''
class NameAlreadyInUse(Exception): pass
'''Raised if an account or character name is already in use.'''
class BannedIPAddress(Exception): pass
'''Raised if the connection's IP address is banned.'''
class ConnectionManager(object):
'''
Manages the set of server connections.
This class exposes a set of data structures useful for rapid lookup of
connections. The individual connections are responsible for registering
themselves appropriately.
Note that a connection does not have to be registered with all indices at
once. It MUST be registered with ConnectionSet and ByAddress, but the
others are only mandatory once they make sense for the connection.
'''
def __init__(self):
'''Creates an empty connection manager.'''
# Create the master connection set.
self.ConnectionSet = set()
'''Set of all managed connections.'''
# Create the lookup indices.
self.ByAddress = {}
'''Maps network address tuples to their connections.'''
self.ByAccountName = {}
'''Maps account names to their connections.'''
self.ByCharacterName = {}
'''Maps character names to their connections.'''
# Create the tracking lists.
self.AddressConnectCount = {}
'''Maps address strings to the number of connections from them.'''
def AddConnection(self, conn):
'''
Adds a connection.
@type conn: ServerConnection
@param conn: Connection to add.
'''
# check that the connection isn't already added
if conn in self.ConnectionSet:
# already added, continue
return
# Make sure the connection's IP isn't banned.
addr = conn.Address
if IPBans.IsBanned(addr[0]):
msg = "%s - IP is banned, rejecting connection." % addr[0]
mainlog.info(msg)
raise BannedIPAddress
# now register the connection
App = ServerGlobals.Application
maxtotal = App.Config['Network/Connections/Max']
if len(self.ConnectionSet) > maxtotal:
# Total connection limit exceeded
msg = "Too many total connections, rejecting from %s." % addr[0]
mainlog.warning(msg)
raise ConnectionLimitExceeded
self.ConnectionSet.add(conn)
# check the address
maxip = App.Config['Network/Connections/PerIP']
if addr[0] in self.AddressConnectCount:
if self.AddressConnectCount[addr[0]] > maxip:
# Per-IP connection limit exceeded.
msg = "Too many connections from %s, rejecting." % addr[0]
mainlog.info(msg)
raise ConnectionLimitExceeded
self.AddressConnectCount[addr[0]] += 1
else:
self.AddressConnectCount[addr[0]] = 1
self.ByAddress[addr] = conn
def UpdateConnection(self, conn, oldAccount=None, oldChar=None):
'''
Updates a connection that has already been added.
@type conn: ServerConnection
@param conn: Connection object to update in the manager.
@type oldAccount: string
@param oldAccount: If set, the old account name to cancel.
@type oldChar: string
@param oldChar: If set, the old character name to cancel.
@raise NameAlreadyInUse: Raised if the account or character name is
already registered with a different connection.
'''
# ensure that the connection is registered
if conn not in self.ConnectionSet:
# not registered
addr = conn.Address[0]
msg = "Tried to update an unregistered connection from %s." % addr
mainlog.error(msg)
raise UnregisteredConnection
# is there a change of account?
acctname = conn.GetAccountName()
if acctname:
if acctname in self.ByAccountName:
if self.ByAccountName[acctname] != conn:
# account already connected by other connection
raise NameAlreadyInUse
else:
# register
self.ByAccountName[acctname] = conn
else:
if oldAccount:
# does this connection actually own that account?
try:
if self.ByAccountName[oldAccount] == conn:
# deregister
del self.ByAccountName[oldAccount]
except:
# we don't care about any key errors
pass
# is there a change of character?
charname = conn.GetCharacterName()
if charname:
# we don't have to check for it to already be logged in since the
# character is tied to a single account
self.ByCharacterName[charname] = conn
else:
if oldChar:
try:
del self.ByCharacterName[oldChar]
except:
pass
def RemoveConnection(self, conn):
'''
Queues a connection for removal.
@type conn: ServerConnection
@param conn: Connection to unregister
@raise UnregisteredConnection: Raised if conn is not registered.
'''
# make sure the connection is registered
if conn not in self.ConnectionSet:
# not registered; don't report, this might be legitimate
# (premature close() due to IP ban, etc.)
raise UnregisteredConnection
addr = conn.Address[0]
# remove the connection from the lookup tables
charname = conn.GetCharacterName()
if charname:
try:
if self.ByCharacterName[charname] == conn:
del self.ByCharacterName[charname]
else:
args = (addr, charname)
msg = "%s - Tried to deregister character %s not "
msg += "associated with the connection."
msg %= args
mainlog.error(msg)
except:
pass
acctname = conn.GetAccountName()
if acctname:
try:
if self.ByAccountName[acctname] == conn:
del self.ByAccountName[acctname]
else:
args = (addr, acctname)
msg = "%s - Tried to deregister account %s not associated "
msg += "with the connection."
msg %= args
mainlog.error(msg)
except:
pass
# remove the connection entirely
self.ConnectionSet.remove(conn)
try:
self.AddressConnectCount[addr] -= 1
except:
msg = "%s - Failed to decrement per-IP connection count." % addr
mainlog.error(msg)
def ScanForTimeouts(self):
'''
Finds timed-out connections and closes them.
'''
# Scan the connections.
copyset = self.ConnectionSet.copy()
for conn in copyset:
conn.CheckTimeout()
class NetworkStartupError(Exception): pass
'''Raised if an error occurs while initializing the network server.'''
class NetworkServer(asyncore.dispatcher):
'''
Listens for new incoming connections and assigns them to handlers.
Don't directly create instances of this class; instead, use one of its
subclasses, either IPv4Server or IPv6Server.
'''
def __init__(self):
'''Creates the network server and starts listening for connections.'''
# Inherit base class behavior
asyncore.dispatcher.__init__(self)
def handle_accept(self):
'''Called when a client is trying to connect.'''
# accept the connection
try:
pair = self.accept()
except socket.error as err:
# something went wrong
msg = "Failed to accept incoming connection: %s" % err.args[1]
mainlog.error(msg)
return
if not pair:
# apparently they're not trying to connect anymore...
return
# wrap the connection
sock = pair[0]
conn = ServerConnection(sock)
def handle_error(self):
'''Called when an unhandled exception is raised.'''
# show an error
msg = "Unhandled exception in NetworkServer:\n"
msg += traceback.format_exc()
mainlog.error(msg)
class IPv4Server(NetworkServer):
'''
Network server class for IPv4 connections.
'''
def __init__(self):
# inherit base class behavior
NetworkServer.__init__(self)
# create the listening socket
App = ServerGlobals.Application
iface = App.Config['Network/Address/IPv4/Interface']
port = App.Config['Network/Address/IPv4/Port']
addr = (iface, port)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
except socket.error as err:
msg = "Could not create listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
# bind to the network address and start listening
try:
self.bind(addr)
except socket.error as err:
msg = "Could not bind listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
try:
self.listen(5)
except socket.error as err:
msg = "Could not listen on listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
class IPv6Server(NetworkServer):
'''
Network server class for IPv6 connections.
@warn
Due to a bug in current Win32 releases of Python, IPv6 support is unstable
for xVector servers running on Windows. This bug arises because the latest
versions of the Windows SDK support the IPPROTO_IPV6 flag on Windows XP and
above, while the 32-bit releases of Python are built to support Windows
2000 and later. xVector gets around this by self-defining the constant if
Windows is detected; this will be fixed as soon as possible. For more
information, please see http://bugs.python.org/issue6926.
'''
def __init__(self):
# inherit base class behavior
NetworkServer.__init__(self)
# create the listening socket
App = ServerGlobals.Application
iface = App.Config['Network/Address/IPv6/Interface']
port = App.Config['Network/Address/IPv6/Port']
addr = (iface, port)
try:
# Create the IPv6 socket
self.create_socket(socket.AF_INET6, socket.SOCK_STREAM)
self.set_reuse_addr()
# Limit to IPv6 only (this is an easy way to separate IPv4/IPv6 on
# multiple platforms)
##### This next portion is a rather hackish fix.
if sys.platform == "win32":
IPPROTO_IPV6 = 41 # hack!
else:
IPPROTO_IPV6 = socket.IPPROTO_IPV6
self.socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
except socket.error as err:
msg = "Could not create listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
# bind to the network address and start listening
try:
self.bind(addr)
except socket.error as err:
msg = "Could not bind listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
try:
self.listen(5)
except socket.error as err:
msg = "Could not listen on listening socket: %s" % err.args[1]
mainlog.critical(msg)
raise NetworkStartupError
def PollNetwork():
'''
Polls the network and handles any network events that are pending.
This should be called once per cycle of the main loop.
'''
# Figure out what we're doing
App = ServerGlobals.Application
usepoll = App.Config['Network/Engine/UsePoll']
# Poll the network
asyncore.loop(timeout=0, count=1, use_poll=usepoll)
# Check for timed-out connections
App.Connections.ScanForTimeouts()
| buchwj/xvector | server/xVServer/ServerNetworking.py | Python | gpl-3.0 | 19,620 |
# -*- coding: utf-8 -*-
import re
from pyload.plugin.internal.MultiHook import MultiHook
class DebridItaliaCom(MultiHook):
__name = "DebridItaliaCom"
__type = "hook"
__version = "0.12"
__config = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)" , "" ),
("revertfailed" , "bool" , "Revert to standard download if fails", True ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description = """Debriditalia.com hook plugin"""
__license = "GPLv3"
__authors = [("stickell" , "[email protected]"),
("Walter Purcaro", "[email protected]" )]
def getHosters(self):
return self.getURL("http://debriditalia.com/api.php", get={'hosts': ""}).replace('"', '').split(',')
| ardi69/pyload-0.4.10 | pyload/plugin/hook/DebridItaliaCom.py | Python | gpl-3.0 | 1,097 |
import numpy as np
def softmax(x):
"""Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code. You might find numpy
functions np.exp, np.sum, np.reshape, np.max, and numpy
broadcasting useful for this task.
Numpy broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
You should also make sure that your code works for a single
N-dimensional vector (treat the vector as a single row) and
for M x N matrices. This may be useful for testing later. Also,
make sure that the dimensions of the output match the input.
You must implement the optimization in problem 1(a) of the
written assignment!
Arguments:
x -- A N dimensional vector or M x N dimensional numpy matrix.
Return:
x -- You are allowed to modify x in-place
"""
orig_shape = x.shape
if len(x.shape) > 1:
# Matrix
### YOUR CODE HERE
exp_minmax = lambda x: np.exp(x - np.max(x))
denom = lambda x: 1.0 / np.sum(x)
x = np.apply_along_axis(exp_minmax, 1, x)
denominator = np.apply_along_axis(denom, 1, x)
if len(denominator.shape) == 1:
denominator = denominator.reshape((denominator.shape[0], 1))
x = x * denominator
### END YOUR CODE
else:
# Vector
### YOUR CODE HERE
x_max = np.max(x)
x = x - x_max
numerator = np.exp(x)
denominator = 1.0 / np.sum(numerator)
x = numerator.dot(denominator)
### END YOUR CODE
assert x.shape == orig_shape
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1, 2]))
print test1
ans1 = np.array([0.26894142, 0.73105858])
assert np.allclose(test1, ans1, rtol=1e-05, atol=1e-06)
test2 = softmax(np.array([[1001, 1002], [3, 4]]))
print test2
ans2 = np.array([
[0.26894142, 0.73105858],
[0.26894142, 0.73105858]])
assert np.allclose(test2, ans2, rtol=1e-05, atol=1e-06)
test3 = softmax(np.array([[-1001, -1002]]))
print test3
ans3 = np.array([0.73105858, 0.26894142])
assert np.allclose(test3, ans3, rtol=1e-05, atol=1e-06)
print "You should be able to verify these results by hand!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_softmax_basic()
test_softmax()
| hankcs/CS224n | assignment1/q1_softmax.py | Python | gpl-3.0 | 2,883 |
from psrd.stat_block.utils import colon_filter, default_closure
from psrd.stat_block.section import parse_section
from psrd.universal import filter_name
def is_spellbook(sb, book):
fields = dict(sb.keys)
if book == 'Ultimate Magic':
if fields.has_key('Value') or sb.name == 'Preparation Ritual':
return True
return False
def parse_spellbook(sb, book):
newdetails = []
for detail in sb.details:
if hasattr(detail, 'level') and detail.level == 5:
newdetails.append(detail.name)
else:
newdetails.append(detail)
sb.details = newdetails
section = parse_section(sb, book, keys=False)
for key in sb.keys:
newsec = {'type': 'section', 'source': book, 'name': key[0], 'text': key[1]}
sections = section.setdefault('sections', [])
sections.insert(1, newsec)
section['subtype'] = 'spellbook'
return section
| devonjones/PSRD-Parser | src/psrd/stat_block/spellbook.py | Python | gpl-3.0 | 829 |
import re, os, tempfile, shutil, StringIO, math, hashlib
from operator import itemgetter
from utils import humanize, hashcheck, torrentdecode
from BeautifulSoup import BeautifulSoup
import exporter
class Migrator:
def __init__(self, output, client = None, boundfolder = None):
self.audioformats = (".flac",".mp3",".ogg",".aac",".ac3",".dts")
self.outputdir = output
self.torrentclient = client
self.boundfolder = boundfolder
def execute(self, torrentinfo, torrentfolder, torrentid = None):
# remove trailing slash
if torrentfolder[-1] == '/': torrentfolder = torrentfolder[:-1]
self.torrentname = torrentinfo[0]
self.torrentdata = torrentinfo[1]
self.torrentinfo = torrentdecode.decode(torrentinfo[1])
self.torrentfolder = torrentfolder
self.mappings = [] # keeps filename mappings and offsets
# Rename folder
torrentinfo_name = unicode(BeautifulSoup(self.torrentinfo['info']['name']).contents[0])
if torrentinfo_name != os.path.basename(torrentfolder):
print " Rename folder %s => %s" % (os.path.basename(torrentfolder), torrentinfo_name)
# Get a list of all old files
oldfiles = []
for item in os.walk(torrentfolder):
if len(item[2]):
for f in item[2]:
oldfiles.append(os.path.normpath(os.path.relpath(os.path.join(item[0],f),torrentfolder)))
# Remove non-audio files unless file with same filesize and extension is present
for oldfile in oldfiles:
extension = os.path.splitext(oldfile)[-1]
if extension not in self.audioformats:
for newfile in self.torrentinfo['info']['files']:
if os.path.splitext(os.path.join(*newfile['path']))[-1] == extension and os.path.getsize(os.path.join(torrentfolder,oldfile)) == newfile['length']:
new = unicode(BeautifulSoup(os.path.join(*newfile['path'])).contents[0])
oldfile = unicode(BeautifulSoup(oldfile).contents[0])
self.mappings.append((oldfile,new,0))
if len(self.mappings) > 0:
print " Rename non-audio files:"
for mapping in self.mappings:
print " %s => %s" % (mapping[0],mapping[1])
# Audio files mapping
print " Rename audio files. Old name => new name (old size => new size)"
originalAudio = []
for oldfile in oldfiles:
if os.path.splitext(oldfile)[-1] in self.audioformats:
oldfile = unicode(BeautifulSoup(oldfile).contents[0])
originalAudio.append((oldfile,os.path.getsize(os.path.join(torrentfolder,oldfile))))
originalAudio = sorted(originalAudio, key=itemgetter(0))
newAudio = []
for newfile in self.torrentinfo['info']['files']:
if os.path.splitext(os.path.join(*newfile['path']))[-1] in self.audioformats:
audioFile = unicode(BeautifulSoup(os.path.join(*newfile['path'])).contents[0])
newAudio.append((audioFile,newfile['length']))
newAudio = sorted(newAudio, key=itemgetter(0))
# Audio file mapping
for i in range(0,len(originalAudio)):
if i > len(newAudio)-1: break
print " #%d: %s => %s (%s B => %s B)" % (i+1, unicode(originalAudio[i][0]).encode('utf-8'), unicode(newAudio[i][0]).encode('utf-8'), humanize.dots(originalAudio[i][1]), humanize.dots(newAudio[i][1]))
userinput = raw_input(" Is this correct? (y/n) [y] ")
if userinput in ("y","yes",""):
for i in range(0,len(originalAudio)):
if i > len(newAudio)-1: break
self.mappings.append((originalAudio[i][0],newAudio[i][0],0))
else:
print " Correct renames (press enter/correct number):"
for i in range(0,len(newAudio)):
if(i > len(originalAudio)-1): break
userinput = raw_input(" %s (%s) [#%d: %s (%s)] " % (unicode(newAudio[i][0]).encode('utf-8'), humanize.dots(newAudio[i][1]), i+1, unicode(originalAudio[i][0]).encode('utf-8'), humanize.dots(originalAudio[i][1])))
if userinput and userinput.isdigit() and int(userinput) in range(1,len(newAudio)+1):
mapto = int(userinput)-1
else:
mapto = i
self.mappings.append((originalAudio[mapto][0],newAudio[i][0],0))
# Check filesize
sumNew = 0
for new in newAudio: sumNew += new[1]
sumOld = 0
for old in originalAudio: sumOld += old[1]
if sumNew != sumOld:
print " Filesizes do not match (original: %s B, new: %s B)" % (humanize.dots(sumOld),humanize.dots(sumNew))
# add padding to files
print " Changing padding on files"
self.simpleRepad(originalAudio,newAudio)
else:
print " Filesizes match"
# Hash check
print " Hash checking migration..."
tempdir, results = self.hashCheck()
# If bad result suggest hash recognition
"""
if float(results[0])/results[1] < 0.20:
# Ask for hash recognition
userinput = raw_input(" Do you want to run experimental hash recognition to try and auto-correct the files? (y/n) [n] ")
if userinput and userinput.lower() in ("y","yes"):
self.hashRecognition()
# Do final hash check
print " Hash checking migration..."
results = self.hashCheck()
"""
# Offer migration
userinput = raw_input(" Execute? (y/n) [n] ")
if userinput and userinput.lower() in ("y","yes"):
# offer torrent detion
if torrentid and self.torrentclient:
userinput = raw_input(" Remove torrent from client? (y/n) [n] ")
if userinput and userinput.lower() in ("y","yes"):
self.torrentclient.remove_torrent(torrentid)
# offer data deletion
if len(oldfiles) > 1: # single file torrents might not have a directory, so don't offer deletion in that case
userinput = raw_input(" Remove original data '%s'? (y/n) [n] " % (torrentfolder.encode('utf-8'),))
if userinput and userinput.lower() in ("y","yes"):
shutil.rmtree(torrentfolder)
# export
targetdir = os.path.join(self.outputdir,self.torrentinfo['info']['name'])
print " Exporting to %s" % (targetdir,)
shutil.move(tempdir+'/',targetdir);
# offer adding torrent to client
if self.boundfolder:
userinput = raw_input(" Add torrent? (y/n) [n] ")
if userinput and userinput.lower() in ("y","yes"):
f = open(os.path.join(self.boundfolder,self.torrentname),'w')
f.write(self.torrentdata)
f.close()
print " Done"
return True
else:
shutil.rmtree(tempdir)
return False
def simpleRepad(self,originalAudio,newAudio):
for i in range(len(self.mappings)):
sizeOld = sizeNew = -1
# look for mapping in new and old
for old in originalAudio:
if old[0] == self.mappings[i][0]:
sizeOld = old[1]
break
for new in newAudio:
if new[0] == self.mappings[i][1]:
sizeNew = new[1]
break
# use difference as padding
if sizeNew > -1 and sizeOld > -1:
self.mappings[i] = (self.mappings[i][0],self.mappings[i][1],sizeNew - sizeOld)
def hashCheck(self):
# create temp folder
tempdir = tempfile.mkdtemp("whatmigrate_hashcheck")
# export
exporter.export(self.torrentinfo,self.torrentfolder,self.mappings,tempdir)
# hash check
results = hashcheck.hashcheck(self.torrentinfo,tempdir)
print " %d of %d pieces correct " % (results[0],results[1])+"(%d%%)" % (round(float(results[0])/results[1]*100),)
return (tempdir,results)
# unused / slow
def hashRecognition(self):
print " Executing hash recognition... (may take a while)"
piece_length = self.torrentinfo['info']['piece length']
pieces = StringIO.StringIO(self.torrentinfo['info']['pieces'])
offset = 0
numFound = 0
numFiles = 0
# get each file that is mapped and is an audio format
for check in self.torrentinfo['info']['files']:
if os.path.splitext(os.path.join(*check['path']))[-1] in self.audioformats:
for i in range(len(self.mappings)):
if(self.mappings[i][1] == os.path.join(*check['path'])):
# determine pieces and starting offsets
first_piece = math.floor(offset/piece_length)
middle_piece = round((offset+check['length']/2)/piece_length)
starting_offset = int((middle_piece - first_piece) * piece_length - (offset - (first_piece * piece_length)))
pieces.seek(int(middle_piece*20))
piece = pieces.read(20)
found, fileoffset = self.searchPieceInFile(os.path.join(self.torrentfolder,self.mappings[i][0]),piece,starting_offset,piece_length)
if found:
numFound += 1
mappings[i] = (mappings[i][0],mappings[i][1],-fileoffset)
numFiles += 1
break;
offset += check['length']
print " Hash recognition succeeded for %d of %d files" % (numFound,numFiles)
def searchPieceInFile(self,path,piece,starting_offset,piece_length):
# get data from file
f = open(path,'rb')
filedata = StringIO.StringIO(f.read())
f.close()
# init
byteoffset = 0
found = False
# main loop
maxtries = 5000
while True and byteoffset < maxtries:
# look left and right from starting offset
limit = 2
# left
if starting_offset+byteoffset <= os.path.getsize(path):
limit -= 1
filedata.seek(starting_offset+byteoffset)
if hashlib.sha1(filedata.read(piece_length)).digest() == piece:
filedata.close()
return True, byteoffset
#right
if starting_offset-byteoffset >= 0:
limit -= 1
filedata.seek(starting_offset-byteoffset)
if hashlib.sha1(filedata.read(piece_length)).digest() == piece:
filedata.close()
return True, -byteoffset
if limit == 2: break
byteoffset += 1
# close iostring
filedata.close()
# nothing found
return False, byteoffset
| ThomasColliers/whatmigrate | migrator.py | Python | gpl-3.0 | 11,146 |
#!/usr/bin/env python
import sys
import os
import fnmatch
def findSource(dir):
fileList = []
for root, subFolders, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(file, "*.h") or \
fnmatch.fnmatch(file, "*.cxx") or \
fnmatch.fnmatch(file, "*.cpp") or \
fnmatch.fnmatch(file, "CMakeLists.txt") or \
fnmatch.fnmatch(file, "*.ui") or \
fnmatch.fnmatch(file, "*.qrc") or \
fnmatch.fnmatch(file, "*.py") or \
fnmatch.fnmatch(file, "*.png"):
file = os.path.join(root,file)
file = file[len(dir):] # strip common dir
fileList.append(file)
return fileList
def copyAndReplace(inFile, template, target, key, moduleName):
newFile = os.path.join( target, inFile.replace(key, moduleName) )
#newFile = os.path.join(target + "EventTranslatorPlayerTest1.cxx")
print "creating %s" % newFile
path = os.path.dirname(newFile)
if not os.path.exists(path):
os.makedirs(path)
fp = open(os.path.join(template,inFile))
contents = fp.read()
fp.close()
contents = contents.replace(key, moduleName)
contents = contents.replace(key.upper(), moduleName.upper())
fp = open(newFile, "w")
fp.write(contents)
fp.close()
def usage():
print ""
print "Usage:"
print "ModuleWizard [--template <dir>] [--templateKey <key>] [--target <dir>] <moduleName>"
print " --template default ../TemplateTranslatorPlayer/"
print " --templateKey default is Template"
print " --target default ../<moduleName>/"
def main(argv):
template = ""
templateKey = ""
target = ""
moduleName = ""
while argv != []:
arg = argv.pop(0)
if arg == "--template":
template = argv.pop(0)
continue
if arg == "--templateKey":
templateKey = argv.pop(0)
continue
if arg == "--target":
target = argv.pop(0)
continue
if arg == "--help":
usage()
exit()
moduleName = arg
if moduleName == "":
print "Please specify module name"
usage()
exit()
if template == "":
template = "../TemplateTranslatorPlayer/"
if template[-1] != '/':
template += '/'
if templateKey == "":
templateKey = "Template"
#templateKey = os.path.split(template[:-1])[-1]
if target == "":
target = "../" + moduleName
if os.path.exists(target):
print target, "exists - delete it first"
exit()
if not os.path.exists(template):
print template, "does not exist - run from source dir or specify with --template"
usage()
exit()
print "\nWill copy \n\t%s \nto \n\t%s \nreplacing \"%s\" with \"%s\"\n" % (template, target, templateKey, moduleName)
sources = findSource( template )
print sources
for file in sources:
copyAndReplace(file, template, target, templateKey, moduleName)
print '\nModule %s created!' % moduleName
if __name__ == "__main__":
main(sys.argv[1:])
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/ThirdParty/QtTesting/vtkqttesting/Utilities/Scripts/TranslatorPlayerWizard.py | Python | gpl-3.0 | 2,960 |
from channels.routing import include
from channels.routing import route
from otree.channels.routing import channel_routing
from both_change_group.consumers import ws_message as both_change_group_ws_message,\
ws_connect as both_change_group_ws_connect, \
ws_disconnect as both_change_group_ws_disconnect
from both_change_group.consumers import ws_winnerpage_connect,\
ws_winnerpage_message, \
ws_winnerpage_disconnect
from both_change_group.consumers import presenterview_ws_connect as bothchangegroup_presenterview_ws_connect, \
presenterview_ws_message as bothchangegroup_presenterview_ws_message, \
presenterview_ws_disconnect as bothchangegroup_presenterview_ws_disconnect
#################################################
# Sockets for both_change_group application
vote_both_change_group_routing = [route("websocket.connect", both_change_group_ws_connect),
route("websocket.receive", both_change_group_ws_message),
route("websocket.disconnect", both_change_group_ws_disconnect), ]
#################################################
# Sockets for both_change_group application, for players on the winnerannoucementpage
vote_both_change_group_winnerpage_routing = [route("websocket.connect", ws_winnerpage_connect),
route("websocket.receive", ws_winnerpage_message),
route("websocket.disconnect", ws_winnerpage_disconnect), ]
#################################################
# Sockets for both_change_group application
both_change_group_presenterview_routing = [route("websocket.connect", bothchangegroup_presenterview_ws_connect),
route("websocket.receive", bothchangegroup_presenterview_ws_message),
route("websocket.disconnect", bothchangegroup_presenterview_ws_disconnect), ]
#################################################
#################################################
channel_routing += [
include(vote_both_change_group_routing, path=r"^/both_change_group/masterofceremony"),
include(vote_both_change_group_winnerpage_routing, path=r"^/both_change_group/winnerpage"),
include(both_change_group_presenterview_routing, path=r"^/both_change_group/presenterview"),
]
| anthropo-lab/XP | EPHEMER/TaxEvasion_Project/redirect/routing.py | Python | gpl-3.0 | 2,344 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Koha Manual documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 4 10:07:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys; sys.setrecursionlimit(3000)
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinxcontrib.spelling',
'sphinx.ext.autosectionlabel',
]
# Spelling options
spelling_lang='en_US'
spelling_word_list_filename='koha_spelling_wordlist.txt'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Koha Manual'
copyright = '2020, Koha Community'
author = 'Koha Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '20.05'
# The full version, including alpha/beta/rc tags.
release = '20.05'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Koha Manual v17.05'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = '%Y-%m-%d %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'KohaManualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KohaManual.tex', 'Koha Manual Documentation',
'Koha Community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kohamanual', 'Koha Manual Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KohaManual', 'Koha Manual Documentation',
author, 'KohaManual', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
locale_dirs = ['../locales/']
# Remove blank pages
# https://evolvingweb.ca/blog/writing-documentation-restructured-text-and-sphinx
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}'
}
| Koha-Community/kohadocs | source/conf.py | Python | gpl-3.0 | 12,552 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 15:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('presence', '0003_auto_20161103_1457'),
]
operations = [
migrations.AddField(
model_name='app',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='app',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='entity',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='entity',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| RESTfactory/presence | presence/migrations/0004_auto_20161103_1501.py | Python | gpl-3.0 | 1,108 |
# -*- coding: utf-8 -*-
"""Entry point for client resources"""
from restfulcomm.clients.rabbitmqclient import RabbitMqCommClient
from restfulcomm.clients.werkzeugclient import WerkzeugCommClient
class ClientProvider(object):
"""Client provider"""
def __init__(self, client_type, configuration):
"""Configure a client by the given type
Args:
client_type: str. By the moment only rabbitmq value is allowed.
configuration: Config client configuration object
Raises:
TypeError if client_type is an unexpected value.
"""
if client_type == 'rabbitmq':
client_class = RabbitMqCommClient
elif client_type == 'werkzeug':
client_class = WerkzeugCommClient
else:
raise TypeError('Unexpected client type')
self._client = self.factory(client_class, configuration)
@property
def client(self):
return self._client
@classmethod
def factory(cls, client_class, configuration):
return client_class(configuration)
| westial/restfulcomm | restfulcomm/providers/clientprovider.py | Python | gpl-3.0 | 1,078 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/TestDlg.ui'
#
# Created: Sun Jun 28 08:12:27 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_TestDlg(object):
def setupUi(self, TestDlg):
TestDlg.setObjectName(_fromUtf8("TestDlg"))
TestDlg.resize(400, 300)
self.buttonBox = QtGui.QDialogButtonBox(TestDlg)
self.buttonBox.setGeometry(QtCore.QRect(290, 20, 81, 241))
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.retranslateUi(TestDlg)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), TestDlg.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), TestDlg.reject)
QtCore.QMetaObject.connectSlotsByName(TestDlg)
def retranslateUi(self, TestDlg):
TestDlg.setWindowTitle(_translate("TestDlg", "Dialog", None))
| ReubenAbrams/Chrubix | src/ui/ui_TestDlg.py | Python | gpl-3.0 | 1,581 |
# ubuntuone.syncdaemon.logger - logging utilities
#
# Author: Guillermo Gonzalez <[email protected]>
#
# Copyright 2009 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
""" SyncDaemon logging utilities and config. """
from __future__ import with_statement
import logging
import sys
import os
import zlib
from ubuntuone.logger import (
_DEBUG_LOG_LEVEL,
basic_formatter,
CustomRotatingFileHandler,
DayRotatingFileHandler,
Logger,
MultiFilter,
)
from ubuntuone.platform.xdg_base_directory import ubuntuone_log_dir
# api compatibility imports
from ubuntuone import logger
from ubuntuone.platform import get_filesystem_logger, setup_filesystem_logging
DebugCapture = logger.DebugCapture
NOTE = logger.NOTE
TRACE = logger.TRACE
# pylint: disable=C0103
class mklog(object):
"""
Create a logger that keeps track of the method where it's being
called from, in order to make more informative messages.
"""
__slots__ = ('logger', 'zipped_desc')
def __init__(self, _logger, _method, _share, _uid, *args, **kwargs):
# args are _-prepended to lower the chances of them
# conflicting with kwargs
all_args = []
for arg in args:
all_args.append(
repr(arg).decode('ascii', 'replace').encode('ascii', 'replace')
)
for k, v in kwargs.items():
v = repr(v).decode('ascii', 'replace').encode('ascii', 'replace')
all_args.append("%s=%r" % (k, v))
args = ", ".join(all_args)
desc = "%-28s share:%-40r node:%-40r %s(%s) " % (_method, _share,
_uid, _method, args)
self.zipped_desc = zlib.compress(desc, 9)
self.logger = _logger
def _log(self, logger_func, *args):
"""Generalized form of the different logging methods."""
desc = zlib.decompress(self.zipped_desc)
text = desc + args[0]
logger_func(text, *args[1:])
def debug(self, *args):
"""Log at level DEBUG"""
self._log(self.logger.debug, *args)
def info(self, *args):
"""Log at level INFO"""
self._log(self.logger.info, *args)
def warn(self, *args):
"""Log at level WARN"""
self._log(self.logger.warn, *args)
def error(self, *args):
"""Log at level ERROR"""
self._log(self.logger.error, *args)
def exception(self, *args):
"""Log an exception"""
self._log(self.logger.exception, *args)
def note(self, *args):
"""Log at NOTE level (high-priority info) """
self._log(self.logger.high, *args)
def trace(self, *args):
"""Log at level TRACE"""
self._log(self.logger.trace, *args)
def callbacks(self, success_message='success', success_arg='',
failure_message='failure'):
"""
Return a callback and an errback that log success or failure
messages.
The callback/errback pair are pass-throughs; they don't
interfere in the callback/errback chain of the deferred you
add them to.
"""
def callback(arg, success_arg=success_arg):
"it worked!"
if callable(success_arg):
success_arg = success_arg(arg)
self.debug(success_message, success_arg)
return arg
def errback(failure):
"it failed!"
self.error(failure_message, failure.getErrorMessage())
self.debug('traceback follows:\n\n' + failure.getTraceback(), '')
return failure
return callback, errback
# pylint: enable=C0103
LOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon.log')
EXLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-exceptions.log')
INVALIDLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-invalid-names.log')
BROKENLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-broken-nodes.log')
root_logger = logging.getLogger("ubuntuone.SyncDaemon")
twisted_logger = logging.getLogger('twisted')
filesystem_logger = get_filesystem_logger()
# now restore our custom logger class
logging.setLoggerClass(Logger)
root_handler = CustomRotatingFileHandler(filename=LOGFILENAME)
exception_handler = CustomRotatingFileHandler(filename=EXLOGFILENAME)
def init():
# root logger
root_logger.propagate = False
root_logger.setLevel(_DEBUG_LOG_LEVEL)
root_handler.addFilter(MultiFilter(['ubuntuone.SyncDaemon',
'twisted', 'pyinotify']))
root_handler.setFormatter(basic_formatter)
root_handler.setLevel(_DEBUG_LOG_LEVEL)
root_logger.addHandler(root_handler)
# exception logs
exception_handler.setFormatter(basic_formatter)
exception_handler.setLevel(logging.ERROR)
# add the exception handler to the root logger
logging.getLogger('').addHandler(exception_handler)
root_logger.addHandler(exception_handler)
# hook twisted.python.log with standard logging
from twisted.python import log
observer = log.PythonLoggingObserver('twisted')
observer.start()
# configure the logger to only show errors
twisted_logger.propagate = False
twisted_logger.setLevel(logging.ERROR)
twisted_logger.addHandler(root_handler)
twisted_logger.addHandler(exception_handler)
# set the filesystem logging
setup_filesystem_logging(filesystem_logger, root_handler)
# invalid filenames log
invnames_logger = logging.getLogger("ubuntuone.SyncDaemon.InvalidNames")
invnames_logger.setLevel(_DEBUG_LOG_LEVEL)
invnames_handler = CustomRotatingFileHandler(filename=INVALIDLOGFILENAME)
invnames_handler.setFormatter(basic_formatter)
invnames_handler.setLevel(logging.INFO)
invnames_logger.addHandler(invnames_handler)
# broken nodes log
brokennodes_logger = logging.getLogger("ubuntuone.SyncDaemon.BrokenNodes")
brokennodes_logger.setLevel(_DEBUG_LOG_LEVEL)
brokennodes_handler = CustomRotatingFileHandler(filename=BROKENLOGFILENAME)
brokennodes_handler.setFormatter(basic_formatter)
brokennodes_handler.setLevel(logging.INFO)
brokennodes_logger.addHandler(brokennodes_handler)
def configure_logging(level, maxBytes, backupCount):
"""configure level, maxBytes and backupCount in all handlers"""
set_level(level)
set_max_bytes(maxBytes)
set_backup_count(backupCount)
def set_level(level):
"""set 'level' as the level for all the logger/handlers"""
root_logger.setLevel(level)
root_handler.setLevel(level)
def set_max_bytes(size):
"""set the maxBytes value in all the handlers"""
root_handler.maxBytes = size
exception_handler.maxBytes = size
def set_backup_count(count):
"""set the backup count in all the handlers"""
root_handler.backupCount = count
exception_handler.backupCount = count
def set_debug(dest):
""" Set the level to debug of all registered loggers, and replace their
handlers. if debug_level is file, syncdaemon-debug.log is used. If it's
stdout, all the logging is redirected to stdout. If it's stderr, to stderr.
@param dest: a string with a one or more of 'file', 'stdout', and 'stderr'
e.g. 'file stdout'
"""
if not [ v for v in ['file', 'stdout', 'stderr'] if v in dest]:
# invalid dest value, let the loggers alone
return
sd_filter = MultiFilter(['ubuntuone.SyncDaemon', 'twisted', 'pyinotify'])
if 'file' in dest:
# setup the existing loggers in debug
root_handler.setLevel(_DEBUG_LOG_LEVEL)
logfile = os.path.join(ubuntuone_log_dir, 'syncdaemon-debug.log')
root_handler.baseFilename = os.path.abspath(logfile)
# don't cap the file size
set_max_bytes(0)
for name in ['ubuntuone.SyncDaemon', 'twisted']:
logger = logging.getLogger(name)
logger.setLevel(_DEBUG_LOG_LEVEL)
if 'stderr' in dest:
stderr_handler = logging.StreamHandler()
stderr_handler.setFormatter(basic_formatter)
stderr_handler.setLevel(_DEBUG_LOG_LEVEL)
stderr_handler.addFilter(sd_filter)
logger.addHandler(stderr_handler)
if 'stdout' in dest:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(basic_formatter)
stdout_handler.setLevel(_DEBUG_LOG_LEVEL)
stdout_handler.addFilter(sd_filter)
logger.addHandler(stdout_handler)
def set_server_debug(dest):
""" Set the level to debug of all registered loggers, and replace their
handlers. if debug_level is file, syncdaemon-debug.log is used. If it's
stdout, all the logging is redirected to stdout.
@param dest: a string containing 'file' and/or 'stdout', e.g: 'file stdout'
"""
logger = logging.getLogger("storage.server")
logger.setLevel(5) # this shows server messages
if 'file' in dest:
filename = os.path.join(ubuntuone_log_dir, 'syncdaemon-debug.log')
handler = DayRotatingFileHandler(filename=filename)
handler.setFormatter(basic_formatter)
handler.setLevel(5) # this shows server messages
logger.addHandler(handler)
if 'stdout' in dest:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(basic_formatter)
stdout_handler.setLevel(5) # this shows server messages
logger.addHandler(stdout_handler)
if 'stderrt' in dest:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(basic_formatter)
stdout_handler.setLevel(5) # this shows server messages
logger.addHandler(stdout_handler)
# if we are in debug mode, replace/add the handlers
DEBUG = os.environ.get("DEBUG", None)
if DEBUG:
set_debug(DEBUG)
# configure server logging if SERVER_DEBUG != None
SERVER_DEBUG = os.environ.get("SERVER_DEBUG", None)
if SERVER_DEBUG:
set_server_debug(SERVER_DEBUG)
def rotate_logs():
"""do a rollover of the three handlers"""
# ignore the missing file error on a failed rollover
# pylint: disable-msg=W0704
try:
root_handler.doRollover()
except OSError:
pass
try:
exception_handler.doRollover()
except OSError:
pass
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/syncdaemon/logger.py | Python | gpl-3.0 | 10,852 |
# -*- coding: utf-8 -*-
from fabric.api import cd, env, run
env.project_root = "/home/djangobrasil/djangobrasil.org"
env.app_root = env.project_root + "/src/djangobrasil"
env.virtualenv = "/home/djangobrasil/.virtualenvs/djangobrasil"
def update():
with cd(env.project_root):
run("git pull")
def deps():
run("{}/bin/pip install -r {}/requirements.txt".format(
env.virtualenv, env.project_root))
def start():
with cd(env.app_root):
run('%(virtualenv)s/bin/gunicorn_django -p gunicorn.pid \
--bind=127.0.0.1:7777--daemon --workers=3' % env)
def stop():
run('kill -TERM `cat %(app_root)s/gunicorn.pid`' % env)
def reload():
run('kill -HUP `cat %(app_root)s/gunicorn.pid`' % env)
def clean():
with cd(env.app_root):
run("find . -name \"*.pyc\" | xargs rm -f ")
def deploy():
update()
deps()
clean()
| djangobrasil/djangobrasil.org | etc/fabfile.py | Python | gpl-3.0 | 892 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------------------------
esp_new
mantém as informações sobre um espera
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
-------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "Milton Abrunhosa"
__date__ = "2015/11"
# < imports >--------------------------------------------------------------------------------------
# python library
import logging
import sys
# model
import model.items.prc_model as model
import model.newton.defs_newton as ldefs
# control
import control.events.events_basic as events
# < class CEspNEW >------------------------------------------------------------------------------
class CEspNEW(model.CPrcModel):
"""
mantém as informações específicas sobre um procedimento de espera
<espera nEsp="1">
<descricao>Espera MUMOP (274D)</descricao>
<fixo>MUMOP</fixo>
<sentido>D</sentido>
<rumo>274</rumo>
<declmag>-21</declmag>
</espera>
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, f_model, f_data=None, fs_ver="0001"):
"""
@param f_model: model manager
@param f_data: dados da espera
@param fs_ver: versão do formato
"""
# check input
assert f_model
# init super class
super(CEspNEW, self).__init__()
# salva o model manager localmente
self.__model = f_model
# salva o event manager localmente
self.__event = f_model.event
# herados de CPrcModel
# self.v_prc_ok # (bool)
# self.i_prc_id # identificação do procedimento de espera
# self.s_prc_desc # descrição do procedimento de espera
# fixo da espera
self.__ptr_esp_fix = None
# sentido da espera
self.__en_esp_sentido_curva = ldefs.E_MENOR
# rumo magnético da espera
self.__f_esp_rumo = 0.
# rumo verdadeiro da espera
self.__f_esp_true = 0.
# declinação magnética
self.__f_dcl_mag = 0.
# recebeu dados ?
if f_data is not None:
# recebeu uma lista ?
if isinstance(f_data, dict):
# cria uma espera com os dados da lista
self.__load_esp(f_data, fs_ver)
# recebeu uma espera ?
elif isinstance(f_data, CEspNEW):
# copia a espera
self.copy_esp(f_data)
# ---------------------------------------------------------------------------------------------
def copy_esp(self, f_esp):
"""
copy constructor
cria uma nova espera a partir de uma outra espera
@param f_esp: espera a ser copiada
"""
# check input
assert f_esp
# copy super class attributes
super(CEspNEW, self).copy_prc(f_esp)
# fixo
self.__ptr_esp_fix = f_esp.ptr_esp_fix
# rumo
self.__f_esp_rumo = f_esp.f_esp_rumo
# rumo verdadeiro
self.__f_esp_true = f_esp.f_esp_true
# declinação magnética
self.__f_dcl_mag = f_esp.f_dcl_mag
# sentido
self.__en_esp_sentido_curva = f_esp.en_esp_sentido_curva
# ---------------------------------------------------------------------------------------------
def __load_esp(self, fdct_data, fs_ver="0001"):
"""
carrega os dados de espera a partir de um dicionário
@param fdct_data: dicionário com os dados do espera
@param fs_ver: versão do formato dos dados
"""
# formato versão 0.01 ?
if "0001" == fs_ver:
# cria a espera
self.__make_esp(fdct_data)
# senão, formato desconhecido
else:
# logger
l_log = logging.getLogger("CEspNEW::__load_esp")
l_log.setLevel(logging.CRITICAL)
l_log.critical(u"<E01: formato desconhecido.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self.__event.post(l_evt)
# cai fora...
sys.exit(1)
# ---------------------------------------------------------------------------------------------
def __make_esp(self, fdct_data):
"""
carrega os dados de espera a partir de um dicionário (formato 0001)
@param fdct_data: dicionário com os dados do espera
"""
# identificação da espera
if "nEsp" in fdct_data:
self.i_prc_id = int(fdct_data["nEsp"])
self.s_prc_desc = u"Espera {:02d}".format(fdct_data["nEsp"])
# descrição da espera
if "descricao" in fdct_data:
self.s_prc_desc = fdct_data["descricao"]
# fixo da espera
if "fixo" in fdct_data:
# obtém o dicionário de fixos
ldct_fix = self.__model.coords.dct_fix
# obtém o indicativo do fixo
ls_fix = fdct_data["fixo"]
# obtém o fixo da espera
self.__ptr_esp_fix = ldct_fix.get(ls_fix, None)
# fixo não existe ?
if self.__ptr_esp_fix is None:
# logger
l_log = logging.getLogger("CEspNEW::__make_esp")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E01: espera:[{}]. Fixo [{}] não existe no dicionário".format(self.i_prc_id, ls_fix))
# declinação magnética
if "declmag" in fdct_data:
self.__f_dcl_mag = float(fdct_data["declmag"])
# rumo magnético
if "rumo" in fdct_data:
self.__f_esp_rumo = float(abs(int(fdct_data["rumo"])) % 360)
# rumo verdadeiro
self.__f_esp_true = self.__f_esp_rumo + self.__f_dcl_mag
# normaliza o rumo
if self.__f_esp_true < 0.:
self.__f_esp_true += 360.
elif self.__f_esp_true > 360.:
self.__f_esp_true -= 360.
# sentido
if "sentido" in fdct_data:
# sentido de curva
lc_sentido = fdct_data["sentido"].strip().upper()
# valida o sentido de curva
self.__en_esp_sentido_curva = ldefs.DCT_SENTIDOS_CURVA_INV.get(lc_sentido, ldefs.E_MENOR)
# (bool)
self.v_prc_ok = True
# =============================================================================================
# data
# =============================================================================================
# ---------------------------------------------------------------------------------------------
@property
def f_esp_dcl_mag(self):
"""
get declinação magnética
"""
return self.__f_esp_dcl_mag
@f_esp_dcl_mag.setter
def f_esp_dcl_mag(self, f_val):
"""
set declinação magnética
"""
# check input
# assert 0. <= f_val <= 360.
# rumo
self.__f_esp_dcl_mag = f_val
# ---------------------------------------------------------------------------------------------
@property
def ptr_esp_fix(self):
"""
get fixo da espera
"""
return self.__ptr_esp_fix
@ptr_esp_fix.setter
def ptr_esp_fix(self, f_val):
"""
set fixo da espera
"""
self.__ptr_esp_fix = f_val
# ---------------------------------------------------------------------------------------------
@property
def f_esp_rumo(self):
"""
get rumo
"""
return self.__f_esp_rumo
@f_esp_rumo.setter
def f_esp_rumo(self, f_val):
"""
set rumo
"""
# check input
# assert 0. <= f_val <= 360.
# rumo
self.__f_esp_rumo = f_val
# ---------------------------------------------------------------------------------------------
@property
def f_esp_true(self):
"""
get rumo verdadeiro
"""
return self.__f_esp_true
@f_esp_true.setter
def f_esp_true(self, f_val):
"""
set rumo verdadeiro
"""
# check input
# assert 0. <= f_val <= 360.
# rumo
self.__f_esp_true = f_val
# ---------------------------------------------------------------------------------------------
@property
def en_esp_sentido_curva(self):
"""
get sentido de curva
"""
return self.__en_esp_sentido_curva
@en_esp_sentido_curva.setter
def en_esp_sentido_curva(self, f_val):
"""
set sentido de curva
"""
# check input
assert f_val in ldefs.SET_SENTIDOS_CURVA
# sentido de curva
self.__en_esp_sentido_curva = f_val
# < the end >--------------------------------------------------------------------------------------
| mlabru/ptracks | model/items/esp_new.py | Python | gpl-3.0 | 9,808 |
#!/usr/bin/env python
# vim: tabstop=4:softtabstop=4:shiftwidth=4:noexpandtab:
# -*- coding: utf-8 -*-
#
# Copyright 2015,2016 Chris Kuethe <[email protected]>
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
from gnuradio import gr
from PIL import Image
from PIL import ImageOps
import pmt
class image_source(gr.sync_block):
"""
Given an image file readable by Python-Imaging, this block produces
monochrome lines suitable for input to spectrum_paint
"""
image_file = None
image_flip = False
bt709_map = True
image_invert = False
autocontrast = False
repeatmode = 1
image_data = None
eof = False
def __init__(self, image_file, image_flip=False, bt709_map=True, image_invert=False, autocontrast=False, repeatmode=1):
gr.sync_block.__init__(self,
name="image_source",
in_sig=None,
out_sig=[numpy.uint8])
self.image_file = image_file
self.image_flip = image_flip
self.bt709_map = bt709_map
self.image_invert = image_invert
self.autocontrast = autocontrast
self.repeatmode = repeatmode
self.load_image()
def load_image(self):
"""decode the image into a buffer"""
self.image_data = Image.open(self.image_file)
self.image_data = ImageOps.grayscale(self.image_data)
if self.autocontrast:
# may or may not improve the look of the transmitted spectrum
self.image_data = ImageOps.autocontrast(self.image_data)
if self.image_invert:
# may or may not improve the look of the transmitted spectrum
self.image_data = ImageOps.invert(self.image_data)
if self.image_flip:
# set to true for waterfalls that scroll from the top
self.image_data = ImageOps.flip(self.image_data)
(self.image_width, self.image_height) = self.image_data.size
max_width = 4096.0
if self.image_width > max_width:
scaling = max_width / self.image_width
newsize = (int(self.image_width * scaling), int(self.image_height * scaling))
(self.image_width, self.image_height) = newsize
self.image_data = self.image_data.resize(newsize)
self.set_output_multiple(self.image_width)
self.image_data = list(self.image_data.getdata())
if self.bt709_map:
# scale brightness according to ITU-R BT.709
self.image_data = map( lambda x: x * 219 / 255 + 16, self.image_data)
self.image_data = list(self.image_data)
self.image_len = len(self.image_data)
if self.repeatmode != 2:
print ("paint.image_source: %d bytes, %dpx width" % (self.image_len, self.image_width))
self.line_num = 0
def work(self, input_items, output_items):
if self.eof:
return -1
out = output_items[0]
self.add_item_tag(0, self.nitems_written(0), pmt.intern("image_width"), pmt.from_long(self.image_width))
self.add_item_tag(0, self.nitems_written(0), pmt.intern("line_num"), pmt.from_long(self.line_num))
out[:self.image_width] = self.image_data[self.image_width*self.line_num: self.image_width*(1+self.line_num)]
self.line_num += 1
if self.line_num >= self.image_height:
self.line_num = 0
if self.repeatmode == 0:
self.eof = True
if self.repeatmode == 2:
self.load_image()
return self.image_width
| drmpeg/gr-paint | python/image_source.py | Python | gpl-3.0 | 3,745 |
from tasty.types import conversions
from tasty.types import *
from tasty.types.driver import TestDriver
__params__ = {'la': 32, 'lb': 32}
driver = TestDriver()
def protocol(client, server, params):
client.ga = Garbled(val=Unsigned(bitlen=106, dim=[1], signed=False, passive=True, empty=True), passive=True, signed=False, bitlen=106, dim=[1])
client.gb = Garbled(val=Unsigned(bitlen=247, dim=[1], signed=False, passive=True, empty=True), passive=True, signed=False, bitlen=247, dim=[1])
conversions.Garbled_Garbled_receive(client.gc, server.gc, 248, [1], False)
server.c = Unsigned(val=server.gc, signed=False, bitlen=248, dim=[1])
server.c.output(dest=driver, desc='c')
| tastyproject/tasty | tasty/tests/functional/protocols/add/garbled_client_client_server/protocol_online_server.py | Python | gpl-3.0 | 691 |
import reporting.applist as applist
import logging
import os
apps = applist.get_apps()
logger = logging.getLogger(__name__)
def remove_file(fname):
"""
Quiet deletion of the specified file.
:param fname: the file to delete
:type fname: str
:return: True is successful
:rtype: bool
"""
try:
os.remove(fname)
except:
pass
def remove_files(files):
"""
Removes the specified files.
:param files: the files to remove
:type files: list
"""
for f in files:
remove_file(f)
def close_file(fd):
"""
Closes the file with the specified file descriptor.
:param fd: the file descriptor
:return: True if successful
:rtype: bool
"""
try:
fd.close()
return True
except:
return False
def close_files(fds):
"""
Closes the files with the specified file descriptor.
:param fds: the list of file descriptors
"""
for fd in fds:
close_file(fd)
| Waikato/automated-reporting | reporting/reporting/os_utils.py | Python | gpl-3.0 | 1,005 |
import re
import datetime
import collections
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from .utils import (bill_abbr, parse_action_date, bill_list_url, history_url,
info_url, vote_url)
import lxml.html
import urlparse
from .actions import Categorizer
class PABillScraper(BillScraper):
jurisdiction = 'pa'
categorizer = Categorizer()
def scrape(self, chamber, session):
self.validate_session(session)
match = re.search("#(\d+)", session)
if match:
self.scrape_session(chamber, session, int(match.group(1)))
else:
self.scrape_session(chamber, session)
def scrape_session(self, chamber, session, special=0):
url = bill_list_url(chamber, session, special)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for link in page.xpath('//a[contains(@href, "billinfo")]'):
self.parse_bill(chamber, session, special, link)
def parse_bill(self, chamber, session, special, link):
bill_num = link.text.strip()
type_abbr = re.search('type=(B|R|)', link.attrib['href']).group(1)
if type_abbr == 'B':
btype = ['bill']
elif type_abbr == 'R':
btype = ['resolution']
bill_id = "%s%s %s" % (bill_abbr(chamber), type_abbr, bill_num)
url = info_url(chamber, session, special, type_abbr, bill_num)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
xpath = '//div[contains(@class, "BillInfo-ShortTitle")]/div[@class="BillInfo-Section-Data"]'
title = page.xpath(xpath).pop().text_content().strip()
if not title:
return
bill = Bill(session, chamber, bill_id, title, type=btype)
bill.add_source(url)
self.parse_bill_versions(bill, page)
self.parse_history(bill, history_url(chamber, session, special,
type_abbr, bill_num))
# only fetch votes if votes were seen in history
# if vote_count:
self.parse_votes(bill, vote_url(chamber, session, special,
type_abbr, bill_num))
# Dedupe sources.
sources = bill['sources']
for source in sources:
if 1 < sources.count(source):
sources.remove(source)
self.save_bill(bill)
def parse_bill_versions(self, bill, page):
mimetypes = {
'icon-IE': 'text/html',
'icon-file-pdf': 'application/pdf',
'icon-file-word': 'application/msword',
}
for a in page.xpath('//*[contains(@class, "BillInfo-PNTable")]//td/a'):
try:
span = a[0]
except IndexError:
continue
for cls in span.attrib['class'].split():
if cls in mimetypes:
mimetype = mimetypes[cls]
break
href = a.attrib['href']
params = urlparse.parse_qs(href[href.find("?") + 1:])
for key in ('pn', 'PrintersNumber'):
try:
printers_number = params[key][0]
break
except KeyError:
continue
bill.add_version("Printer's No. %s" % printers_number,
href, mimetype=mimetype, on_duplicate='use_old')
def parse_history(self, bill, url):
bill.add_source(url)
html = self.get(url).text
tries = 0
while 'There is a problem generating the page you requested.' in html:
html = self.get(url).text
if tries < 2:
self.logger.warning('Internal error')
return
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
self.parse_sponsors(bill, doc)
self.parse_actions(bill, doc)
# vote count
return len(doc.xpath('//a[contains(@href, "rc_view_action1")]/text()'))
def parse_sponsors(self, bill, page):
first = True
xpath = ("//div[contains(@class, 'BillInfo-PrimeSponsor')]"
"/div[@class='BillInfo-Section-Data']/a")
sponsors = page.xpath(xpath)
first = True
for sponsor in sponsors:
sponsor = sponsor.text_content()
if first:
sponsor_type = 'primary'
first = False
else:
sponsor_type = 'cosponsor'
if sponsor.find(' and ') != -1:
dual_sponsors = sponsor.split(' and ')
bill.add_sponsor(sponsor_type, dual_sponsors[0].strip().title())
bill.add_sponsor('cosponsor', dual_sponsors[1].strip().title())
else:
name = sponsor.strip().title()
bill.add_sponsor(sponsor_type, name)
def parse_actions(self, bill, page):
chamber = bill['chamber']
for tr in page.xpath("//table[@class='DataTable']//tr"):
action = tr.xpath("string()").replace(u'\xa0', ' ').strip()
if action == 'In the House':
chamber = 'lower'
continue
elif action == 'In the Senate':
chamber = 'upper'
continue
elif action.startswith("(Remarks see"):
continue
match = re.match(
r"(.*),\s+(\w+\.?\s+\d{1,2},\s+\d{4})( \(\d+-\d+\))?", action)
if not match:
continue
action = match.group(1)
attrs = self.categorizer.categorize(action)
date = parse_action_date(match.group(2))
bill.add_action(chamber, action, date, **attrs)
def parse_votes(self, bill, url):
bill.add_source(url)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for url in page.xpath("//a[contains(., 'Vote')]/@href"):
bill.add_source(url)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
if '/RC/' in url:
self.parse_chamber_votes(bill, url)
elif '/RCC/' in url:
self.parse_committee_votes(bill, url)
else:
msg = 'Unexpected vote url: %r' % url
raise Exception(msg)
def parse_chamber_votes(self, bill, url):
bill.add_source(url)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
xpath = "//a[contains(@href, 'rc_view_action2')]"
chamber = ('upper'if 'Senate' in page.xpath('string(//h1)') else 'lower')
for link in page.xpath(xpath)[::-1]:
date_str = link.xpath('string(../preceding-sibling::td)').strip()
date = datetime.datetime.strptime(date_str, "%m/%d/%Y")
vote = self.parse_roll_call(link, chamber, date)
bill.add_vote(vote)
def parse_roll_call(self, link, chamber, date):
url = link.attrib['href']
page = self.get(url).text
page = lxml.html.fromstring(page)
xpath = 'string(//div[@class="Column-OneFourth"]/div[3])'
motion = page.xpath(xpath).strip()
motion = re.sub(r'\s+', ' ', motion)
if motion == 'FP':
motion = 'FINAL PASSAGE'
if motion == 'FINAL PASSAGE':
type = 'passage'
elif re.match(r'CONCUR(RENCE)? IN \w+ AMENDMENTS', motion):
type = 'amendment'
else:
type = 'other'
motion = link.text_content()
yeas = int(page.xpath("//div[text() = 'YEAS']")[0].getnext().text)
nays = int(page.xpath("//div[text() = 'NAYS']")[0].getnext().text)
lve = int(page.xpath("//div[text() = 'LVE']")[0].getnext().text)
nv = int(page.xpath("//div[text() = 'N/V']")[0].getnext().text)
other = lve + nv
passed = yeas > (nays + other)
vote = Vote(chamber, date, motion, passed, yeas, nays, other,
type=type)
for div in page.xpath('//*[contains(@class, "RollCalls-Vote")]'):
name = div.text_content().strip()
name = re.sub(r'^[\s,]+', '', name)
name = re.sub(r'[\s,]+$', '', name)
class_attr = div.attrib['class'].lower()
if 'yea' in class_attr:
voteval = 'yes'
elif 'nay' in class_attr:
voteval = 'no'
elif 'nvote' in class_attr:
voteval = 'other'
elif 'lve' in class_attr:
voteval = 'other'
else:
msg = 'Unrecognized vote val: %s' % class_attr
raise Exception(msg)
vote[voteval + '_votes'].append(name)
return vote
def parse_committee_votes(self, bill, url):
bill.add_source(url)
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
chamber = ('upper'if 'Senate' in doc.xpath('string(//h1)') else 'lower')
committee = tuple(doc.xpath('//h2')[0].itertext())[-2].strip()
for link in doc.xpath("//a[contains(@href, 'listVoteSummary.cfm')]"):
# Date
for fmt in ("%m/%d/%Y", "%m-%d-%Y"):
date = link.xpath('../../td')[0].text_content()
try:
date = datetime.datetime.strptime(date, fmt)
except ValueError:
continue
break
# Motion
motion = link.text_content().split(' - ')[-1].strip()
motion = 'Committee vote (%s): %s' % (committee, motion)
# Roll call.
vote_url = link.attrib['href']
rollcall = self.parse_upper_committee_vote_rollcall(bill, vote_url)
vote = Vote(chamber, date, motion, type='other',
committee=committee, **rollcall)
for voteval in ('yes', 'no', 'other'):
for name in rollcall.get(voteval + '_votes', []):
getattr(vote, voteval)(name)
vote.add_source(url)
vote.add_source(vote_url)
bill.add_vote(vote)
def parse_upper_committee_vote_rollcall(self, bill, url):
bill.add_source(url)
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
rollcall = collections.defaultdict(list)
for div in doc.xpath('//*[contains(@class, "RollCalls-Vote")]'):
name = div.xpath('../preceding-sibling::td/text()')[0]
name = re.sub(r'^[\s,]+', '', name)
name = re.sub(r'[\s,]+$', '', name)
class_attr = div.attrib['class'].lower()
if 'yea' in class_attr:
voteval = 'yes'
elif 'nay' in class_attr:
voteval = 'no'
elif 'nvote' in class_attr:
voteval = 'other'
elif 'lve' in class_attr:
voteval = 'other'
else:
msg = 'Unrecognized vote val: %s' % class_attr
raise Exception(msg)
rollcall[voteval + '_votes'].append(name)
for voteval, xpath in (('yes', '//*[contains(@class, "RollCalls-Vote-Yeas")]'),
('no', '//*[contains(@class, "RollCalls-Vote-Nays")]'),
('other', '//*[contains(@class, "RollCalls-Vote-NV")]')):
count = len(doc.xpath(xpath))
rollcall[voteval + '_count'] = int(count)
rollcall['passed'] = rollcall['yes_count'] > rollcall['no_count']
return dict(rollcall)
| showerst/openstates | openstates/pa/bills.py | Python | gpl-3.0 | 11,845 |
# -*- coding: utf-8 -*-
import re
from module.network.RequestFactory import getURL
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.Plugin import chunks
def getInfo(urls):
result = []
for chunk in chunks(urls, 10):
for url in chunk:
html = getURL(url)
if r'<div class="errorMessage mb10">' in html:
result.append((url, 0, 1, url))
elif r'Page cannot be displayed' in html:
result.append((url, 0, 1, url))
else:
try:
url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
file_name = re.search(url_pattern, html).group(0).split(', event)">')[1].split('</a>')[0]
result.append((file_name, 0, 2, url))
except Exception:
pass
# status 1=OFFLINE, 2=OK, 3=UNKNOWN
# result.append((#name,#size,#status,#url))
yield result
class FilesMailRu(Hoster):
__name__ = "FilesMailRu"
__type__ = "hoster"
__version__ = "0.33"
__pattern__ = r'http://(?:www\.)?files\.mail\.ru/.+'
__description__ = """Files.mail.ru hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("oZiRiz", "[email protected]")]
def setup(self):
self.multiDL = bool(self.account)
def process(self, pyfile):
self.html = self.load(pyfile.url)
self.url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
#marks the file as "offline" when the pattern was found on the html-page'''
if r'<div class="errorMessage mb10">' in self.html:
self.offline()
elif r'Page cannot be displayed' in self.html:
self.offline()
#the filename that will be showed in the list (e.g. test.part1.rar)'''
pyfile.name = self.getFileName()
#prepare and download'''
if not self.account:
self.prepare()
self.download(self.getFileUrl())
self.myPostProcess()
else:
self.download(self.getFileUrl())
self.myPostProcess()
def prepare(self):
"""You have to wait some seconds. Otherwise you will get a 40Byte HTML Page instead of the file you expected"""
self.setWait(10)
self.wait()
return True
def getFileUrl(self):
"""gives you the URL to the file. Extracted from the Files.mail.ru HTML-page stored in self.html"""
return re.search(self.url_pattern, self.html).group(0).split('<a href="')[1].split('" onclick="return Act')[0]
def getFileName(self):
"""gives you the Name for each file. Also extracted from the HTML-Page"""
return re.search(self.url_pattern, self.html).group(0).split(', event)">')[1].split('</a>')[0]
def myPostProcess(self):
# searches the file for HTMl-Code. Sometimes the Redirect
# doesn't work (maybe a curl Problem) and you get only a small
# HTML file and the Download is marked as "finished"
# then the download will be restarted. It's only bad for these
# who want download a HTML-File (it's one in a million ;-) )
#
# The maximum UploadSize allowed on files.mail.ru at the moment is 100MB
# so i set it to check every download because sometimes there are downloads
# that contain the HTML-Text and 60MB ZEROs after that in a xyzfile.part1.rar file
# (Loading 100MB in to ram is not an option)
check = self.checkDownload({"html": "<meta name="}, read_size=50000)
if check == "html":
self.logInfo(_(
"There was HTML Code in the Downloaded File (%s)...redirect error? The Download will be restarted." %
self.pyfile.name))
self.retry()
| Zerknechterer/pyload | module/plugins/hoster/FilesMailRu.py | Python | gpl-3.0 | 3,886 |
__author__ = 'j'
import PriceSpider
import Config as config
class CPUCrawler(PriceSpider.PriceSpider):
name = "CPUprice" #Name to craw, gets used to get the start_urls[]
pathName = "CPUpath" #Used to get ConfigFile
arrEanIdentifier = "CPUEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "CPUjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class GPUCrawler(PriceSpider.PriceSpider):
name = "GPUprice" #Name to craw, gets used to get the start_urls[]
pathName = "GPUpath" #Used to get ConfigFile
arrEanIdentifier = "GPUEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "GPUjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class HDDCrawler(PriceSpider.PriceSpider):
name = "HDDprice" #Name to craw, gets used to get the start_urls[]
pathName = "HDDpath" #Used to get ConfigFile
arrEanIdentifier = "HDDEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "HDDjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class SSDCrawler(PriceSpider.PriceSpider):
name = "SSDprice" #Name to craw, gets used to get the start_urls[]
pathName = "SSDpath" #Used to get ConfigFile
arrEanIdentifier = "SSDEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "SSDjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class CaseCrawler(PriceSpider.PriceSpider):
name = "CASEprice" #Name to craw, gets used to get the start_urls[]
pathName = "CASEpath" #Used to get ConfigFile
arrEanIdentifier = "CASEEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "CASEjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class MemoryCrawler(PriceSpider.PriceSpider):
name = "MEMORYprice" #Name to craw, gets used to get the start_urls[]
pathName = "MEMORYpath" #Used to get ConfigFile
arrEanIdentifier = "MEMORYEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "MEMORYjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class SoundcardCrawler(PriceSpider.PriceSpider):
name = "SOUNDCARDprice" #Name to craw, gets used to get the start_urls[]
pathName = "SOUNDCARDpath" #Used to get ConfigFile
arrEanIdentifier = "SOUNDCARDEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "SOUNDCARDjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class OpticaldriveCrawler(PriceSpider.PriceSpider):
name = "OPTICALDRIVEprice" #Name to craw, gets used to get the start_urls[]
pathName = "OPTICALDRIVEpath" #Used to get ConfigFile
arrEanIdentifier = "OPTICALDRIVEEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "OPTICALDRIVEjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class MotherboardCrawler(PriceSpider.PriceSpider):
name = "MOTHERBOARDprice" #Name to craw, gets used to get the start_urls[]
pathName = "MOTHERBOARDpath" #Used to get ConfigFile
arrEanIdentifier = "MOTHERBOARDEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "MOTHERBOARDjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
class PSUCrawler(PriceSpider.PriceSpider):
name = "PSUprice" #Name to craw, gets used to get the start_urls[]
pathName = "PSUpath" #Used to get ConfigFile
arrEanIdentifier = "PSUEAN"
relation = "SELLS" #Name of the relation between the BaseNode and Crawled Node
start_urls = []
allowed_domains = ["tweakers.net"]
path = None
JSONpath = None
JSONpathLocation = "PSUjson"
filteredDict = {}
arrEan = []
y = -1
if name in config.price_configs:
start_urls = config.price_configs[name]
path = config.price_configs[pathName]
arrEan = config.price_configs[arrEanIdentifier]
JSONpath = config.price_configs[JSONpathLocation]
else:
print("ERROR: key does not exist in dictonairy")
def parse(self, response):
PriceSpider.PriceSpider.altParse(self, response)
| Frenesius/CrawlerProject56 | crawler/spiders/PriceCrawler.py | Python | gpl-3.0 | 9,102 |
# -*- coding: utf-8 -*-
# Define self package variable
__version__ = "1.1.3.31"
__all__ = ["pycltools"]
__author__ = "Adrien Leger"
__email__ = "[email protected]"
__url__ = "https://github.com/a-slide/pycoQC"
__licence__ = "GPLv3"
__classifiers__ = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python :: 3",
]
__install_requires__ = [
"pysam>=0.14.0",
"pandas>=0.23.0",
"numpy>=1.14.0",
"notebook>=5.6.0",
"tqdm>=4.23.4",
"httplib2",
"matplotlib>=3.0.0",
]
__python_requires__ = ">=3"
__description__ = "pycltools is a package written in python3 containing a collection of generic functions and classes for file parsing, manipulation..."
# Collect info in a dictionnary for setup.py
setup_dict = {
"name": __name__,
"version": __version__,
"description": __description__,
"url": __url__,
"author": __author__,
"author_email": __email__,
"license": __licence__,
"classifiers": __classifiers__,
"install_requires": __install_requires__,
"packages": [__name__],
"python_requires": __python_requires__,
}
| a-slide/pycl | pycltools/__init__.py | Python | gpl-3.0 | 1,274 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2004 Sam Hocevar <[email protected]>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
import urllib, urllib2
import json
import logging
class RESTClient(object):
"""
Python client to interact with GLPI webservices plugin
"""
def __init__(self, baseurl="http://localhost/glpi"):
"""
@param baseurl: Base URL of your GLPI instance
@type baseurl: str
"""
self.baseurl = baseurl
self.resturl = self.baseurl + '/plugins/webservices/rest.php?'
self.session = None
self.logger = logging.getLogger()
def connect(self, login_name=None, login_password=None):
"""
Connect to a running GLPI instance with webservices
plugin enabled.
Returns True if connection was successful.
@param login_name: your GLPI username
@type login_name: string
@param login_password: your GLPI password
@type login_password: string
"""
if not None in [login_name, login_password]:
params = {
'method':'glpi.doLogin',
'login_name': login_name,
'login_password': login_password,
}
response = urllib2.urlopen(self.resturl + urllib.urlencode(params))
result = json.loads(response.read())
if 'session' in result:
self.session = result['session']
else:
raise Exception("Login incorrect or server down")
else:
self.logger.warn("Connected anonymously, will only be able to use non-authenticated methods")
return True
def __getattr__(self, attr):
def _get_doc(attr, _help):
"""
Format docstring for wrapped method
"""
ret = "Wrapper for GLPI webservices %s method:\n\n" % attr
ret += "It could be a good idea to see method's reference page:\n"
ret += "https://forge.indepnet.net/projects/webservices/wiki/Glpi%s\n\n" % attr
ret += "@param module: webservices module to call (default: glpi)\n"
ret += "@type module: str\n"
ret += "@param kwargs: options for %s method:\n\n" % attr
for (key, value) in _help.items():
ret += '\t- %s: %s\n' % (key, value)
ret += "\n@type kwargs: dict"
return ret
def treatFields(params):
"""
Format fields for REST API
With REST API, fields must be formatted.
Used for methods such as deleteObjects and updateObjects
"""
fields = params.pop('fields', [])
if attr == 'deleteObjects':
for glpi_type in fields:
for key, value in fields[glpi_type].items():
params['fields[%s][%s]' % (glpi_type, key)] = value
elif attr == 'updateObjects':
for glpi_type in fields:
for elem in fields[glpi_type]:
elem_id = elem['id']
for key, value in elem.items():
params['fields[%s][%s][%s]' % (glpi_type, elem_id, key)] = value
return params
def call(module='glpi', *args, **kwargs):
params = {'method': '.'.join([module, attr])}
if self.session:
params['session'] = self.session
params = dict(params.items() + kwargs.items())
if 'fields' in params:
params = treatFields(params)
response = urllib2.urlopen(self.resturl + urllib.urlencode(params))
return json.loads(response.read())
call.__name__ = attr
call.__doc__ = _get_doc(attr, call(help=True))
return call
| x3rus/glpi-nwipe | glpi_client/RESTClient.py | Python | gpl-3.0 | 4,243 |
#!/usr/bin/env python
from optparse import OptionParser
import select
from sys import stderr, stdin, stdout
fromfile=True
if select.select([stdin,],[],[],0.0)[0]:
inp=stdin
fromfile=False
else:
parser = OptionParser()
parser.add_option('-f', '--file', dest='filename',
help='Input file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.filename:
stderr.write('Missing filename')
parser.print_help(stderr)
stderr.write('\n')
exit(1)
inp = open(options.filename, 'r')
fromfile=True
for line in inp.readlines():
line=line.split()
if line==[]:
continue
if line[0]=='e':
stdout.write('edge({},{}).\n'.format(line[1],line[2]))
if fromfile:
inp.close()
try:
stdout.close()
except:
pass
try:
stderr.close()
except:
pass
| daajoe/trellis | converters/dimacs2lp.py | Python | gpl-3.0 | 868 |
# -*- coding: utf-8 -*-
from widgets import widgets
from .inventorybrowser import InventoryBrowserWidget
class ModuleInfo(widgets.ModuleInfoBase):
LABEL = 'inventorybrowser'
NAME = 'Inventory Browser'
@staticmethod
def createWidgets(handle, parent):
return InventoryBrowserWidget(handle, parent)
| matzman666/PyPipboyApp | widgets/inventorybrowser/info.py | Python | gpl-3.0 | 330 |
"""Tests for RH Cloud - Inventory
:Requirement: RH Cloud - Inventory
:CaseAutomation: Automated
:CaseLevel: System
:CaseComponent: RHCloud-Inventory
:Assignee: jpathan
:TestType: Functional
:CaseImportance: Critical
:Upstream: No
"""
from datetime import datetime
import pytest
from nailgun import entities
from robottelo.api.utils import wait_for_tasks
from robottelo.config import settings
from robottelo.constants import DEFAULT_LOC
@pytest.mark.run_in_one_thread
@pytest.mark.tier3
def test_rhcloud_insights_e2e(
session,
rhel8_insights_vm,
fixable_rhel8_vm,
organization_ak_setup,
unset_rh_cloud_token,
):
"""Synchronize hits data from cloud, verify it is displayed in Satellite and run remediation.
:id: d952e83c-3faf-4299-a048-2eb6ccb8c9c2
:Steps:
1. Prepare misconfigured machine and upload its data to Insights.
2. Add Cloud API key in Satellite.
3. In Satellite UI, Configure -> Insights -> Add RH Cloud token and syns recommendations.
4. Run remediation for dnf.conf recommendation against rhel8 host.
5. Assert that job completed successfully.
6. Sync Insights recommendations.
7. Search for previously remediated issue.
:expectedresults:
1. Insights recommendation related to dnf.conf issue is listed for misconfigured machine.
2. Remediation job finished successfully.
3. Insights recommendation related to dnf.conf issue is not listed.
:CaseAutomation: Automated
"""
org, ak = organization_ak_setup
query = 'dnf.conf'
job_query = (
f'Remote action: Insights remediations for selected issues on {rhel8_insights_vm.hostname}'
)
with session:
session.organization.select(org_name=org.name)
session.location.select(loc_name=DEFAULT_LOC)
session.cloudinsights.save_token_sync_hits(settings.rh_cloud.token)
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
wait_for_tasks(
search_query=f'Insights full sync and started_at >= "{timestamp}"',
search_rate=15,
max_tries=10,
)
# Workaround for alert message causing search to fail. See airgun issue 584.
session.browser.refresh()
result = session.cloudinsights.search(query)[0]
assert result['Hostname'] == rhel8_insights_vm.hostname
assert (
result['Recommendation'] == 'The dnf installs lower versions of packages when the '
'"best" option is not present in the /etc/dnf/dnf.conf'
)
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
session.cloudinsights.remediate(query)
result = wait_for_tasks(
search_query=f'{job_query} and started_at >= "{timestamp}"',
search_rate=15,
max_tries=10,
)
task_output = entities.ForemanTask().search(query={'search': result[0].id})
assert task_output[0].result == 'success', f'result: {result}\n task_output: {task_output}'
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
session.cloudinsights.sync_hits()
wait_for_tasks(
search_query=f'Insights full sync and started_at >= "{timestamp}"',
search_rate=15,
max_tries=10,
)
# Workaround for alert message causing search to fail. See airgun issue 584.
session.browser.refresh()
assert not session.cloudinsights.search(query)
| rplevka/robottelo | tests/foreman/ui/test_rhcloud_insights.py | Python | gpl-3.0 | 3,486 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
if sys.version_info[0] > 2:
basestring = unicode = str
import os
import atexit
import logging
import socket
import time
import warnings
from django.utils.translation import ugettext as _
#bots-modules
from . import botslib
from . import botsinit
from . import botsglobal
from . import router
from . import cleanup
''' Start bots-engine.'''
def start():
''' sysexit codes:
0: OK, no errors
1: (system) errors incl parsing of command line arguments
2: bots ran OK, but there are errors/process errors in the run
3: Database is locked, but "maxruntime" has not been exceeded.
'''
#NOTE: bots directory should always be on PYTHONPATH - otherwise it will not start.
#********command line arguments**************************
usage = '''
This is "%(name)s" version %(version)s, part of Bots open source edi translator (http://bots.sourceforge.net).
Does the actual translations and communications; it's the workhorse. It does not have a fancy interface.
Usage:
%(name)s [run-options] [config-option] [routes]
Run-options (can be combined):
--new receive new edi files (default: if no run-option given: run as new).
--resend resend as indicated by user.
--rereceive rereceive as indicated by user.
--automaticretrycommunication - automatically retry outgoing communication.
--cleanup remove older data from database.
Config-option:
-c<directory> directory for configuration files (default: config).
Routes: list of routes to run. Default: all active routes (in the database)
''' % {'name': os.path.basename(sys.argv[0]), 'version': botsglobal.version}
configdir = 'config'
commandspossible = ['--automaticretrycommunication', '--resend', '--rereceive', '--new']
commandstorun = []
routestorun = [] # list with routes to run
do_cleanup_parameter = False
for arg in sys.argv[1:]:
if arg.startswith('-c'):
configdir = arg[2:]
if not configdir:
print('Error: configuration directory indicated, but no directory name.')
sys.exit(1)
elif arg in commandspossible:
commandstorun.append(arg)
elif arg == '--cleanup':
do_cleanup_parameter = True
elif arg in ['?', '/?', '-h', '--help'] or arg.startswith('-'):
print(usage)
sys.exit(0)
else: # pick up names of routes to run
routestorun.append(arg)
if not commandstorun and not do_cleanup_parameter: # if no command on command line, use new (default)
commandstorun = ['--new']
commandstorun = [command[2:] for command in commandspossible if command in commandstorun] # sort commands
#***********end handling command line arguments**************************
botsinit.generalinit(configdir) # find locating of bots, configfiles, init paths etc.
# set working directory to bots installation. advantage: when using
# relative paths it is clear that this point paths within bots
# installation.
os.chdir(botsglobal.ini.get('directories', 'botspath'))
#**************check if another instance of bots-engine is running/if port is free******************************
try:
engine_socket = botslib.check_if_other_engine_is_running()
except socket.error:
sys.exit(3)
else:
atexit.register(engine_socket.close)
#**************initialise logging******************************
process_name = 'engine'
botsglobal.logger = botsinit.initenginelogging(process_name)
atexit.register(logging.shutdown)
if botsglobal.ini.get('settings', 'log_file_number', '') != 'daily':
for key, value in botslib.botsinfo(): # log info about environement, versions, etc
botsglobal.logger.info('%(key)s: "%(value)s".', {'key': key, 'value': value})
#**************connect to database**********************************
try:
botsinit.connect()
except Exception as msg:
botsglobal.logger.exception(
_('Could not connect to database. Database settings are in bots/config/settings.py. Error: "%(msg)s".'), {'msg': msg})
sys.exit(1)
else:
botsglobal.logger.info(_('Connected to database.'))
atexit.register(botsglobal.db.close)
#************initialise user exits for the whole bots-engine*************************
try:
userscript, scriptname = botslib.botsimport('routescripts', 'botsengine')
except botslib.BotsImportError: # userscript is not there; other errors like syntax errors are not catched
userscript = scriptname = None
#***acceptance tests: initialiase acceptance user script******************************
acceptance_userscript = acceptance_scriptname = None
if botsglobal.ini.getboolean('acceptance', 'runacceptancetest', False):
botsglobal.logger.info(
_('This run is an acceptance test - as indicated in option "runacceptancetest" in bots.ini.'))
try:
acceptance_userscript, acceptance_scriptname = botslib.botsimport('routescripts', 'bots_acceptancetest')
except botslib.BotsImportError:
botsglobal.logger.info(
_('In acceptance test there is no script file "bots_acceptancetest.py" to check the results of the acceptance test.'))
#**************handle database lock****************************************
# set a lock on the database; if not possible, the database is locked: an
# earlier instance of bots-engine was terminated unexpectedly.
if not botslib.set_database_lock():
#for SQLite: do a integrity check on the database
if botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
cursor = botsglobal.db.execute('''PRAGMA integrity_check''')
result = cursor.fetchone()
if result[0] != 'ok':
warn = _('!Bots database is locked!\n'
'Bots did an integrity check on the database, but database was not OK.\n'
'Manual action is needed!\n'
'Bots has stopped processing EDI files.')
botsglobal.logger.critical(warn)
botslib.sendbotserrorreport(_('[Bots severe error]Database is damaged'), warn)
sys.exit(1)
warn = _('!Bots database is locked!\n'
'Bots-engine has ended in an unexpected way during the last run.\n'
'Most likely causes: sudden power-down, system crash, problems with disk I/O, bots-engine terminated by user, etc.\n'
'Bots will do an automatic crash recovery now.')
botsglobal.logger.critical(warn)
botslib.sendbotserrorreport(_('[Bots severe error]Database is locked'), warn)
# there is a database lock. Add a crashrecovery as first command to run.
commandstorun.insert(0, 'crashrecovery')
atexit.register(botslib.remove_database_lock)
warnings.simplefilter('error', UnicodeWarning)
#**************run the routes**********************************************
#commandstorun determines the type(s) of run. eg: ['automaticretrycommunication','new']
try:
botslib.prepare_confirmrules()
#in acceptance tests: run a user script before running eg to clean output
botslib.tryrunscript(acceptance_userscript, acceptance_scriptname, 'pretest', routestorun=routestorun)
botslib.tryrunscript(userscript, scriptname, 'pre', commandstorun=commandstorun, routestorun=routestorun)
errorinrun = 0 # detect if there has been some error. Only used for correct exit() code
first_command_2_run = True
for command in commandstorun:
# if multiple commands in run: reports etc are based on timestamp; so
# there needs to be at least one second between these runs.
if first_command_2_run:
first_command_2_run = False
else:
time.sleep(1)
botsglobal.logger.info(_('Run "%(command)s".'), {'command': command})
#************get list of routes to run*******************************
if routestorun:
use_routestorun = routestorun[:]
botsglobal.logger.info(_('Run routes from command line: "%(routes)s".'),
{'routes': unicode(use_routestorun)})
elif command == 'new': # fetch all active routes from database unless 'not in default run' or not active.
use_routestorun = []
for row in botslib.query('''SELECT DISTINCT idroute
FROM routes
WHERE active=%(active)s
AND (notindefaultrun=%(notindefaultrun)s OR notindefaultrun IS NULL)
ORDER BY idroute ''',
{'active': True, 'notindefaultrun': False}):
use_routestorun.append(row[str('idroute')])
botsglobal.logger.info(_('Run active routes from database that are in default run: "%(routes)s".'), {
'routes': unicode(use_routestorun)})
else: # for command other than 'new': use all active routes.
use_routestorun = []
for row in botslib.query('''SELECT DISTINCT idroute
FROM routes
WHERE active=%(active)s
ORDER BY idroute ''',
{'active': True}):
use_routestorun.append(row[str('idroute')])
botsglobal.logger.info(_('Run all active routes from database: "%(routes)s".'),
{'routes': unicode(use_routestorun)})
#************run routes for this command******************************
botslib.tryrunscript(userscript, scriptname, 'pre' + command, routestorun=use_routestorun)
errorinrun += router.rundispatcher(command, use_routestorun)
botslib.tryrunscript(userscript, scriptname, 'post' + command, routestorun=use_routestorun)
#*********finished running routes for this command****************************
#*********finished all commands****************************************
botslib.tryrunscript(userscript, scriptname, 'post', commandstorun=commandstorun, routestorun=routestorun)
try: # in acceptance tests: run a user script. no good reporting of errors/results in post-test script. Reason: this is after automaticmaintence.
botslib.tryrunscript(acceptance_userscript, acceptance_scriptname, 'posttest', routestorun=use_routestorun)
except Exception as msg:
print(unicode(msg))
cleanup.cleanup(do_cleanup_parameter, userscript, scriptname)
except Exception as msg:
# of course this 'should' not happen.
botsglobal.logger.exception(_('Severe error in bots system:\n%(msg)s'), {'msg': unicode(msg)})
sys.exit(1)
else:
if errorinrun:
sys.exit(2) # indicate: error(s) in run(s)
else:
sys.exit(0) # OK
if __name__ == '__main__':
start()
| WouterVH/bots | src/bots/engine.py | Python | gpl-3.0 | 11,626 |
'''
Created on Oct 12, 2011
this code is for reading input parameters.
@author: Ying Jin
@status:
@contact: [email protected]
@version:
'''
# python modules
import sys
import os
import re
import logging
import time
import gzip
import collections
from math import log
from TEToolkit.Constants import *
from TEToolkit.ShortRead.ParseBEDFile import BEDFile,BAMFile,SAMFile
#Taken from HTSeq
class FileOrSequence( object ):
""" The construcutor takes one argument, which may either be a string,
which is interpreted as a file name (possibly with path), or a
connection, by which we mean a text file opened for reading, or
any other object that can provide an iterator over strings
(lines of the file).
The advantage of passing a file name instead of an already opened file
is that if an iterator is requested several times, the file will be
re-opened each time. If the file is already open, its lines can be read
only once, and then, the iterator stays exhausted.
Furthermore, if a file name is passed that end in ".gz" or ".gzip"
(case insensitive), it is transparently gunzipped.
"""
def __init__( self, filename_or_sequence ):
self.fos = filename_or_sequence
self.line_no = None
def __iter__( self ):
self.line_no = 1
if isinstance( self.fos, str ):
if self.fos.lower().endswith( ( ".gz" , ".gzip" ) ):
lines = gzip.open( self.fos )
else:
lines = open( self.fos )
else:
lines = self.fos
for line in lines:
yield line
self.line_no += 1
if isinstance( self.fos, str ):
lines.close()
self.line_no = None
def __repr__( self ):
if isinstance( self.fos, str ):
return "<%s object, connected to file name '%s'>" % (
self.__class__.__name__, self.fos )
else:
return "<%s object, connected to %s >" % (
self.__class__.__name__, repr( self.fos ) )
def get_line_number_string( self ):
if self.line_no is None:
if isinstance( self.fos, str ):
return "file %s closed" % self.fos
else:
return "file closed"
if isinstance( self.fos, str ):
return "line %d of file %s" % ( self.line_no, self.fos )
else:
return "line %d" % self.line_no
#Taken from HTSeq
class SAM_Reader( FileOrSequence ):
"""Parse a SAM File"""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "@" ):
continue
try:
(qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, trash) = line.split("\t", 10)
except ValueError, e:
e.args = e.args + ( self.get_line_number_string(), )
raise
yield (qname, int(flag), rname, int(pos)-1, mapq, cigar, rnext, pnext, seq.upper())
def read_opts(parser):
''' object parser contains parsed options '''
args = parser.parse_args()
#treatment files
for i in range(len(args.tfiles)) :
if not os.path.isfile(args.tfiles[i]) :
logging.error("No such file: %s !\n" % (args.tfiles[i]))
sys.exit(1)
if not os.path.isfile(args.tinputs[0]) :
logging.error("No such file: %s !\n" % (args.tinputs))
sys.exit(1)
#control files
if args.cfiles != None :
for i in range(len(args.cfiles)) :
if not os.path.isfile(args.cfiles[i]) :
logging.error("No such file: %s !\n" % (args.cfiles[i]))
sys.exit(1)
else :
if args.cinputs == None :
logging.error("No input for control samples!\n")
sys.exit(1)
else :
args.cinputs = None
if args.TEmode != 'multi' and args.TEmode != 'uniq' :
logging.error("Does not support TE mode : %s !\n" % (args.TEmode))
# file parser
if args.format == "BAM" :
args.parser = BAMFile
# elif args.format == "SAM" :
# args.parser = SAMFile
elif args.format == "BED" :
args.parser = BEDFile
else :
logging.error("Does not support such file format: %s !\n" %(args.format))
sys.exit(1)
#window size
if args.wsize < 0 :
logging.error("window size should be greater than 0, default value %d was used\n" % (WIN_SIZE))
args.wsize = WIN_SIZE
#step size
if args.step < 0 :
logging.error("step size should be greater than 0, default value %d was used\n" % (STEP))
args.step = STEP
if args.step > args.wsize :
logging.error("step should be smaller than window size,default value %d was used\n" % (STEP))
args.step = STEP
#cutoff
if args.minread < 0 :
args.minread = 0
if args.minread > 20 :
args.minread = 20
#species
if args.species[0] not in ['hg','mm','dm','tm'] :
logging.error("species not found %s \n" %(args.species[0]))
parser.print_help()
sys.exit(1)
args.gsize = efgsize[args.species[0]]
args.gsize = float(args.gsize)
if args.species[0] == 'hg' :
args.chrom = HS_CHROM
args.species[0] = 'hg19'
elif args.species[0] == 'mm' :
args.chrom = MM_CHROM
args.species[0] = 'mm9'
elif args.species[0] == 'dm' :
args.chrom = DM_CHROM
args.species[0] = 'dm3'
elif args.species[0] == 'tm' :
args.chrom = TM_CHROM
#normalization
if args.norm not in ['sd','bc'] :
logging.error("normalization method %s not supported\n" % (args.norm))
parser.print_help()
sys.exit(1)
#p-value
if args.pval < 0 or args.pval > 1 :
logging.error("p-value should be a value in [0,1]\n")
sys.exit(1)
args.log_pvalue = log(args.pval,10)*-10
#gap size
if args.gap < 0 :
logging.error("gap size should be greater than 0, default value was used\n")
args.gap = GAP
#fragment size
if args.fragsize < 0 :
logging.error("fragment size should be greater than 0, default value %d was used\n" % (FRAG_SIZE))
args.fragsize = FRAG_SIZE
#output filenames
args.dfbs = args.prj_name+"_dfbs"
# logging object
logging.basicConfig(level=(4-args.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
args.error = logging.critical # function alias
args.warn = logging.warning
args.debug = logging.debug
args.info = logging.info
cinput = None
if args.cinputs != None:
cinput = args.cinputs[0]
args.argtxt = "\n".join((
"# ARGUMENTS LIST:",\
"# name = %s" % (args.prj_name),\
"# treatment files = %s" % (args.tfiles),\
"# control files = %s" % (args.cfiles),\
'# treatment input = %s' % (args.tinputs[0]),\
'# control input = %s' % (cinput),\
# "# window size = %d" % (args.wsize),\
"# step = %d" % (args.step),\
# "# gap = %d" % (args.gap),\
"# fragment size = %d" % (args.fragsize),\
"# species = %s (hg:human, rn:rat, mm:mouse)" % (args.species[0]),\
"# min read cutoff = %d" % (args.minread),\
"# statistical model = Poisson distribution" ,\
"# normalization = %s (sd: sequence depth, bc: bin correlation)" % (args.norm),\
"# pvalue cutoff = %.2e" % (args.pval),\
"# TEmode = %s " % (args.TEmode)
# "# TE annotation file = %s \n" % (args.TEannotation)
))
return args
def read_opts2(parser):
args = parser.parse_args()
if not os.path.isfile(args.tefile) :
logging.error("No such file: %s !\n" %(args.tefile))
sys.exit(1)
if not os.path.isfile(args.gtffile) :
logging.error("No such file: %s !\n" % (args.gtffile))
sys.exit(1)
# Obtain & store list of files for group 1 (e.g. treatment/mutant)
for i in range(len(args.tfiles)) :
if not os.path.isfile(args.tfiles[i]) :
logging.error("No such file: %s !\n" % (args.tfiles[i]))
sys.exit(1)
# Obtain & store list of files for group2 (e.g. control/wildtype)
for i in range(len(args.cfiles)) :
if not os.path.isfile(args.cfiles[i]) :
logging.error("No such file: %s !\n" % (args.cfiles[i]))
sys.exit(1)
# Identify file format for subsequent processing (parsing)
if args.format == "BAM" :
args.parser = "BAM"
elif args.format == "SAM" :
args.parser = "SAM"
else :
logging.error("Does not support such file format: %s !\n" % (args.format))
sys.exit(1)
# What type of RNA-Seq experiment (stranded or not)
if args.stranded not in ['yes', 'no', 'reverse'] :
logging.error("Does not support such stranded value: %s !\n" % (args.stranded))
sys.exit(1)
# Method of assigning reads to annotation (gene or TE)
if args.te_mode not in ['uniq', 'multi'] :
logging.error("multi-mapper counting mode %s not supported\n" % (args.te_mode))
parser.print_help()
sys.exit(1)
# Method of normalization (rpm or quantile)
if args.norm not in ['quant','TC','DESeq_default'] :
logging.error("normalization method %s not supported\n" % (args.norm))
parser.print_help()
sys.exit(1)
# Cutoff for adjusted p-value
if args.pval < 0 or args.pval > 1 :
logging.error("p-value should be a value in [0,1]\n")
sys.exit(1)
# Cutoff for fold change
if args.fc == 0:
logging.error("absolute fold change ratio cannot be zero\n")
sys.exit(1)
elif args.fc < 0:
args.fc = -1.0 * args.fc
elif args.fc < 1 :
args.fc = 1.0/args.fc
else:
args.fc = 1.0 * args.fc
if args.sortByPos:
args.sortByPos=True
else:
args.sortByPos=False
if args.min_read < 0 :
args.min_read = 1
if args.numItr < 0 :
args.numItr = 0
if args.fragLength < 0 :
logging.error("the fragment length cannot be negative. \n")
sys.exit(1)
if args.minL < 0 :
logging.error("the minimum fragment length cannot be negative. \n")
sys.exit(1)
if args.maxL < 0 :
logging.error("the maximum fragment length cannot be negative. \n")
sys.exit(1)
# Level of logging for tool
logging.basicConfig(level=(4 - args.verbose) * 10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr, filemode="w")
args.error = logging.critical # function alias
args.warn = logging.warning
args.debug = logging.debug
args.info = logging.info
args.argtxt = "\n".join(("# ARGUMENTS LIST:", \
"# name = %s" % (args.prj_name), \
"# treatment files = %s" % (args.tfiles), \
"# control files = %s" % (args.cfiles), \
"# GTF file = %s " % (args.gtffile), \
"# TE file = %s " % (args.tefile), \
"# multi-mapper mode = %s " % (args.te_mode), \
"# stranded = %s " % (args.stranded), \
"# normalization = %s (rpm: Reads Per Million mapped; quant: Quantile normalization)" % (args.norm), \
"# FDR cutoff = %.2e" % (args.pval), \
"# fold-change cutoff = %5.2f" % (args.fc), \
"# read count cutoff = %d" % (args.min_read), \
"# number of iteration = %d" % (args.numItr), \
"# Alignments grouped by read ID = %s\n" % (not args.sortByPos)
))
return args
def read_chrlen_tbl(chrfile,error,info):
''' read in chrom_size file '''
if not os.path.isfile(chrfile) :
error("No such file: %s !\n" % (chrfile))
sys.exit(1)
try:
f = open(chrfile,'r')
except IOError :
error("open file %s error !\n" %(chrfile))
sys.exit(1)
else :
chrlen_map = dict() # hash table for chromosome length.
cnt = 0
for line in f :
cnt += 1
line = line.strip()
items = line.split('\t') # skip empty line.
if len(items) < 2 :
info("Insufficient chromosome information at % s, line: %s. Skip!\n" % (chrfile, line))
if re.match('^(c|C)hr', items[0]) and re.match('^[0-9]+$', items[1]) :
chrlen_map[items[0]] = int(items[1])
else :
info("Format error at %s, line %d: %s. Skip!\n" % (chrfile,cnt,line))
f.close()
return chrlen_map
def read_short_reads(samples,parser,TEmode):
'''read short reads from single or multple samples and stored in short read objects '''
shortReads = []
# chroms = chrlen_tbl.keys()
for i in range(len(samples)) :
s = samples[i]
if not os.path.isfile(s) :
logging.error("No such file %s !\n" %(s))
sys.exit(1)
logging.info("reading sample file %s ...\n" %(s))
time.sleep(1)
#sbed = __bam2bed(s,0,error)
#b = BEDFile(sbed,chroms)
#b = parser(s,chroms)
b = parser(s)
t = b.build_fwtrack(TEmode)
shortReads.append(t)
return shortReads
def read_short_reads_sameFam(samples,parser,teIdx):
'''read short reads from single or multple samples and stored in short read objects '''
shortReads = []
# chroms = chrlen_tbl.keys()
for i in range(len(samples)) :
s = samples[i]
if not os.path.isfile(s) :
logging.error("No such file %s !\n" %(s))
sys.exit(1)
logging.info("reading sample file %s ...\n" %(s))
time.sleep(1)
#sbed = __bam2bed(s,0,error)
#b = BEDFile(sbed,chroms)
#b = parser(s,chroms)
b = parser(s)
t = b.build_fwtrack_v2(teIdx)
shortReads.append(t)
return shortReads
#def read_alignments(samples,chrlen_tbl,parser):
# '''read alignments from single or multple SAM or BAM files '''
# shortReads = []
# chroms = chrlen_tbl.keys()
# for i in range(len(samples)) :
# s = samples[i]
# if not os.path.isfile(s) :
# logging.error("No such file %s !\n" %(s))
# sys.exit(1)
# logging.info("reading treatment sample file %s ...\n" %(s))
# time.sleep(1)
#sbed = __bam2bed(s,0,error)
#b = BEDFile(sbed,chroms)
#b = parser(s,chroms)
# shortReads.append(b)
# return shortReads
def __bam2bed(sample,pairend,error):
res = sample + ".bed"
if pairend == 0 : #single end
try:
os.system("bamToBED -ed -i sample >res")
res = __assignWeight(sample,".bed",error)
except :
error("file format error %s !\n" %(sample))
sys.exit(0)
else :
try:
os.system("bamToBED -bedpe -i sample >res")
res = __assignWeight(sample,".bed",error)
except :
error("file format error %s !\n" %(sample))
sys.exit(0)
return res
def __assignWeight(sample,suffix,error):
src = sample + suffix
dest = sample + ".bal.bed"
lines = []
cur_seqid = "-1"
multi_num = 0
try:
f = open(src,'r')
of = open(dest,'w')
except IOError :
error("open file %s error !\n" %(src))
sys.exit(1)
else :
for line in f :
line = line.strip()
arr = line.split('\t')
if cur_seqid == arr[3] :
lines.append(line)
multi_num += 1
else :
if multi_num > 0 :
val = 1/multi_num
for record in lines :
of.write(record + "\t" + val + "\n")
lines.clear()
lines.append(line)
cur_seqid = arr[3]
multi_num=1
f.close()
if multi_num > 0 :
val = 1/multi_num
for record in lines :
of.write(record + "\t" + val +"\n")
of.close()
return dest
| wwliao/tetoolkit | TEToolkit/IO/ReadInputs.py | Python | gpl-3.0 | 16,212 |
# Copyright (C) IBM Corp. 2016.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import fnmatch
import logging
import os
import subprocess
import time
from lib import exception
LOG = logging.getLogger(__name__)
def base_directory_not_found_error(err):
if err.errno == errno.ENOENT:
raise exception.FilesToValidateNotFound()
else:
raise err
def retry_on_error(f, error=Exception, failure_handler=None,
max_retries=2, seconds_between_retries=5):
"""
Retries running [f] when an exception occurs.
Returns the result of [f] if it succeeds.
Args:
f: function to execute. Takes no arguments.
Options:
error: exception to watch for retry attempt.
failure_handler: function be run when all retry attempts fail.
Takes an exception as argument.
max_retries: total number of retries to attempt.
seconds_between_retries: time to wait until next retry.
"""
assert max_retries >= 0
def _reraise_exception(exc):
raise exc
failure_handler = failure_handler or _reraise_exception
while True:
try:
return f()
except error as exc:
max_retries -= 1
if max_retries < 0:
return failure_handler(exc)
LOG.debug("Function {function} failed, retrying in {seconds} "
"seconds.".format(function=f,
seconds=seconds_between_retries))
time.sleep(seconds_between_retries)
def retry_on_timeout(f, is_timeout_error_f,
max_retries=2,
seconds_between_retries=5,
initial_timeout=120,
timeout_incr_f=lambda t: t * 2):
"""
Retries running [f] when a timeout error is detected.
Returns the result of [f] when it succeeds.
Args:
f: function to execute. Takes a timeout value as argument.
is_timeout_error_f: function to check if the exception raised by [f]
is a timeout error. Takes an exception as argument.
Options:
max_retries: total number of retries to attempt.
seconds_between_retries: number of seconds to wait before retrying [f].
initial_timeout: timeout value (seconds) of first [f] execution.
timeout_incr_f: function that returns a new timeout value, based on the
current one.
"""
assert max_retries >= 0
timeout = initial_timeout
retries_left = max_retries
while True:
try:
return f(timeout)
except Exception as exc:
if not is_timeout_error_f(exc):
raise exc
retries_left -= 1
if retries_left < 0:
raise exception.TimeoutError(
func_name=f.__name__,
num_attempts=max_retries + 1,
initial_timeout=initial_timeout,
final_timeout=timeout)
timeout = timeout_incr_f(timeout)
time.sleep(seconds_between_retries)
def set_http_proxy_env(proxy):
LOG.info('Setting up http proxy: {}'.format(proxy))
os.environ['https_proxy'] = proxy
os.environ['http_proxy'] = proxy
def run_command(cmd, **kwargs):
"""
Run command, wait for it to finish and read its output.
Args:
cmd (str): command string
kwargs (dict): extra parameters passed to subprocess.Popen
Returns:
str: command standard output
"""
LOG.debug("Command: %s" % cmd)
shell = kwargs.pop('shell', True)
success_return_codes = kwargs.pop('success_return_codes', [0])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell, **kwargs)
output, error_output = process.communicate()
LOG.debug("stdout: %s" % output)
LOG.debug("stderr: %s" % error_output)
if process.returncode not in success_return_codes:
raise exception.SubprocessError(cmd=cmd, returncode=process.returncode,
stdout=output, stderr=error_output)
return output
def create_directory(directory):
"""
Create a directory it if it does not exist.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
def is_package_installed(package_name):
"""
Checks if a RPM package is installed
Args:
package_name (str): package name
Returns:
bool: if RPM package is installed
"""
cmd = "rpm -q --whatprovides %s" % package_name
try:
run_command(cmd, shell=True)
except exception.SubprocessError as e:
# rpm returns 1 when search string is not found and other non-zero values
# if an error occurred
#pylint: disable=no-member
if e.returncode == 1:
return False
else:
raise
return True
def recursive_glob(directory, pattern):
"""
Find all files matching a pattern according to the rules used by Linux shell
Args:
directory (str): searched directory
pattern (str): glob pattern
"""
matches = []
for root, _, filenames in os.walk(directory, onerror=base_directory_not_found_error):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def replace_str_in_file(file_path, search, replacement):
lines = []
with file(file_path, "r") as f:
lines = f.readlines()
with file(file_path, "w") as f:
for line in lines:
line = line.replace(search, replacement)
f.write(line)
def force_symlink(target_path, link_path):
"""
Create a symbolic link to the target path, deleting it beforehand if it
already exists.
Args:
target_path (str): original source path
link_path (str): linked path
"""
try:
os.symlink(target_path, link_path)
except OSError, e:
if e.errno == errno.EEXIST:
os.remove(link_path)
os.symlink(target_path, link_path)
| open-power-host-os/builds | lib/utils.py | Python | gpl-3.0 | 6,741 |
'''
Copyright 2014 Pierre Cadart
This file is part of Factory Maker.
Factory Maker is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Factory Maker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Factory Maker. If not, see <http://www.gnu.org/licenses/>.
Description:
This class defines a simple rectangle,
and an intersect method
it can have floating point coordinates, unlike pygame.Rect
'''
class Rect:
def __init__(self,coord,size):
x,y = coord
w,h = size
if w<0:
x = x+w
w = -w
if h<0:
y = y+h
h = -h
self.left = x
self.top = y
self.right = x+w
self.bottom = y+h
self.w = w
self.h = h
def hasCommonPoint(self,r):
if r.top > self.bottom:
return False
if r.left > self.right:
return False
if r.bottom < self.top:
return False
if r.right < self.left:
return False
return True
def intersect(self,r):
if not self.hasCommonPoint(r):
return Rect((self.left,self.top),(0,0))
top = max( r.top , self.top )
bottom = min( r.bottom , self.bottom )
left = max( r.left , self.left )
right = min( r.right , self.right )
return Rect((left,top),(right-left , bottom-top ))
| redstorm45/factory-maker | rect.py | Python | gpl-3.0 | 1,911 |
from infection_monkey.telemetry.attack.usage_telem import UsageTelem
class T1035Telem(UsageTelem):
def __init__(self, status, usage):
"""
T1035 telemetry.
:param status: ScanStatus of technique
:param usage: Enum of UsageEnum type
"""
super(T1035Telem, self).__init__("T1035", status, usage)
| guardicore/monkey | monkey/infection_monkey/telemetry/attack/t1035_telem.py | Python | gpl-3.0 | 346 |
'''
Utility classes and functions.
These generally need to be split into other packages.
For example, many could go in syr.debug or syr.fs.
But you need time to find and change the callers.
Copyright 2009-2016 GoodCrypto
Last modified: 2016-10-18
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import print_function
from __future__ import unicode_literals
import sys
IS_PY2 = sys.version_info[0] == 2
from contextlib import contextmanager
from datetime import datetime, timedelta
from fnmatch import fnmatch
from functools import wraps
from glob import glob
import bz2, calendar, os, os.path, re, sh, string, sys, tempfile, types, zipfile
import gzip as gz
import threading, trace, traceback
import re, time, types, unicodedata
if IS_PY2:
from cStringIO import StringIO
from urlparse import urljoin, urlparse
else:
from io import StringIO
from urllib.parse import urljoin, urlparse
from syr.lock import locked
from syr.python import is_string, object_name
from syr.times import timedelta_to_human_readable
# we can't use syr.log here because syr.log uses this module
_debug = False
if _debug:
def log(msg):
print(msg)
else:
def log(msg):
pass
Http_Separator = '://'
_synchronized_master_lock = threading.Lock()
_synchronized_locks = {}
_cached_return_values = {}
# linux allows almost any characters, but this is cross platform pathnames
valid_pathname_chars = "-_.()/\\: %s%s" % (string.ascii_letters, string.digits)
class NotImplementedException(Exception):
''' Operation not implemented exception. '''
pass
class MovedPermanentlyException(Exception):
''' Object moved permanently exception.
Always say where it was moved. '''
pass
def get_scheme_netloc(url):
''' Return (scheme, netloc) from url.
If the port is non-standard, the netloc is 'domain:port'. Otherwise
netloc is the domain.
This is used because python 2.4 and 2.5
give slightly different results from urlparse.
>>> get_scheme_netloc('http://goodcrypto.com')
('http', 'goodcrypto.com')
>>> get_scheme_netloc('https://test:8211')
('https', 'test:8211')
'''
parsed_url = urlparse(url)
try:
scheme = parsed_url.scheme
netloc = parsed_url.netloc
except:
scheme = parsed_url[0]
netloc = parsed_url[1]
return (scheme, netloc)
def get_remote_ip(request):
'''Get the remote ip. If there is a forwarder, assume the first IP
address (if there are more than 1) is the original machine's address.
Otherwise, use the remote addr.
Any errors, return 0.0.0.0
'''
Unknown_IP = '0.0.0.0'
if request:
try:
# if we're using a reverse proxy, the ip is the proxy's ip address
remote_addr = request.META.get('REMOTE_ADDR', '')
forwarder = request.META.get('HTTP_X_FORWARDED_FOR', '')
if forwarder and forwarder is not None and len(forwarder) > 0:
m = re.match('(.*?),.*?', forwarder)
if m:
remote_ip = m.group(1)
else:
remote_ip = forwarder
else:
remote_ip = remote_addr
if not remote_ip or remote_ip is None or len(remote_ip) <= 0:
remote_ip = Unknown_IP
except:
log(traceback.format_exc())
remote_ip = Unknown_IP
else:
remote_ip = Unknown_IP
log('no request so returning unknown ip address')
return remote_ip
def stacktrace():
raise MovedPermanentlyException('moved to syr.python')
def last_exception(noisy=False):
raise MovedPermanentlyException('moved to syr.python')
def last_exception_only():
raise MovedPermanentlyException('moved to syr.python')
def get_module(name):
raise MovedPermanentlyException('moved to syr.python')
def caller_dir():
raise MovedPermanentlyException('moved to syr.python')
def caller_file():
raise MovedPermanentlyException('moved to syr.python')
def get_absolute_url(url, home_url, request=None):
''' Return an absolute url from a relative url
adapting for protocol if request included.'''
final_home_url = home_url
if url.startswith('/'):
url = url[1:]
try:
if request is not None and request.META.get('HTTP_REFERER') is not None:
# use the same protocol for the new url
referer = requestMETA.get('HTTP_REFERER')
if (referer.find('://' + TOP_LEVEL_DOMAIN) > 0 and
referer.lower().startswith('https')):
index = final_home_url.find('://')
if index >= 0:
final_home_url = 'https' + final_home_url[index]
log('final url: {}'.format(final_home_url))
except:
pass
return urljoin(final_home_url, url)
def say(message):
''' Speak a message.
Runs a "say" program, passing the message on the command line.
Because most systems are not set up for speech, it is not an
error if the "say" program is missing or fails.
It is often easy to add a "say" program to a system. For example,
a linux system using festival for speech can use a one line script:
festival --batch "(SayText \"$*\")"
Depending on the underlying 'say' command's implementation, say()
probably does not work unless user is in the 'audio' group.
>>> say('test say')
'''
enabled = True
if enabled:
try:
# the words are unintelligible, and usually all we want is to know something happened
# message = 'tick' # just a sound #DEBUG
# os.system passes successive lines to sh
message = message.split('\n')[0]
sh.say(*message)
except:
pass
def synchronized(function):
''' Decorator to lock a function so each call completes before
another call starts.
If you use both the staticmethod and synchronized decorators,
@staticmethod must come before @synchronized. '''
@wraps(function)
def synchronizer(*args, **kwargs):
''' Lock function access so only one call at a time is active.'''
# get a shared lock for the function
with locked(_synchronized_master_lock):
lock_name = object_name(function)
if lock_name in _synchronized_locks:
lock = _synchronized_locks[lock_name]
else:
lock = threading.Lock()
_synchronized_locks[lock_name] = lock
with locked(lock):
result = function(*args, **kwargs)
return result
return synchronizer
def pdb_break():
''' Breakpoint for pdb command line debugger.
Usage:
from syr import pdb_break ; pdb_break()
'''
import pdb
log('breakpointing for pdb')
pdb.set_trace()
def winpdb_break():
''' Breakpoint for winpdb debugger.
Example:
from syr import winpdb_break; winpdb_break() #DEBUG
'''
import rpdb2 #DEBUG
log('breakpointing for winpdb')
rpdb2.start_embedded_debugger("password") #DEBUG
def generate_password(max_length=25, punctuation_chars='-_ .,!+?$#'):
'''
Generate a password.
>>> len(generate_password())
25
'''
# the password must be random, but the characters must be valid for django
password = ''
while len(password) < max_length:
new_char = os.urandom(1)
try:
new_char = new_char.decode()
# the character must be a printable character
if ((new_char >= 'A' and new_char <= 'Z') or
(new_char >= 'a' and new_char <= 'z') or
(new_char >= '0' and new_char <= '9') or
(new_char in punctuation_chars)):
# and the password must not start or end with a punctuation
if (new_char in punctuation_chars and
(len(password) == 0 or (len(password) + 1) == max_length)):
pass
else:
password += new_char
except:
pass
return password
def cache(function):
''' Decorator to cache returned value.
Use @cache for expensive calculations that should only run once.
>>> @cache
... def test():
... import random
... return random.random()
>>> a = test()
>>> b = test()
>>> assert a == b
'''
@wraps(function)
def cacher(*args, **kwargs):
''' Cache returned value.'''
@synchronized
def get_value():
key = object_name(function)
if key in _cached_return_values:
value = _cached_return_values[key]
else:
value = function(*args, **kwargs)
_cached_return_values[key] = value
return get_value()
return cacher
def exec_trace(code, ignoredirs=[sys.prefix, sys.exec_prefix], globals=None, locals=None, coverdir='/tmp'):
''' Trace code.
Code must be a string. Code must start at column 1 in the string.
exec_trace() usually requires passing "globals=globals(), locals=locals()".
Example:
from syr import exec_trace
exec_trace("""
from jean.events import log_event
log_event(name, request=request, details=details)
""",
globals=globals(), locals=locals())
'''
tracer = trace.Trace(ignoredirs=ignoredirs)
tracer.runctx(code.strip(), globals=globals, locals=locals)
r = tracer.results()
r.write_results(show_missing=True, coverdir=coverdir)
def clean_pathname(pathname):
''' Clean a pathname by removing all invalid chars.
See http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-pathname-in-python
From that page, roughly:
The unicodedata.normalize call replaces accented characters
with the unaccented equivalent, which is better than simply
stripping them out. After that all disallowed characters are
removed. Doesn't avoid possible disallowed pathnames."
'''
if IS_PY2:
ascii_pathname = unicodedata.normalize('NFKD', unicode(pathname)).encode('ASCII', 'ignore')
else:
ascii_pathname = unicodedata.normalize('NFKD', pathname.decode()).encode('ASCII', 'ignore')
return ''.join(c for c in ascii_pathname if c in valid_pathname_chars)
def strip_input(data):
'''Strip the leading and trailing spaces.'''
try:
if data is not None:
if is_string(data) or isinstance(data, CharField):
data = data.strip()
elif isinstance(data, EmailField):
data = '{}'.format(data)
data = data.strip()
elif isinstance(data, bytes):
data = data.decode().strip()
except:
log(traceback.format_exc())
return data
def trace_func(frame, event, arg):
''' NOT WORKING - Log trace of python code.
Usage:
import sys
old_trace = sys.gettrace()
sys.settrace(trace)
... code to trace ...
sys.settrace(old_trace)
See python tracing a segmentation fault
http://stackoverflow.com/questions/2663841/python-tracing-a-segmentation-fault
>>> def test():
... print("Line 8")
... print("Line 9")
>>> import sys
>>> old_trace = sys.gettrace()
>>> # NOT WORKING - sys.settrace(trace_func)
>>> test()
Line 8
Line 9
>>> sys.settrace(old_trace)
'''
print('trace: %(event)-12s %(filename)s:%(lineno)d' % {
'event': event,
'filename': frame.f_code.co_filename,
'lineno': frame.f_lineno })
return trace
def pipe(value, *fns):
''' Pipe data from functions a() to b() to c() to d() ...
"pipe(x, a, b, c, d)" is more readble than "d(c(b(a(x))))".
See http://news.ycombinator.com/item?id=3349429
pipe() assumes every function in its list will consume and return the data.
If you need more control such as filtering and routing, see
the syr.coroutine package.
>>> def sqr(x):
... return x*x
>>> def half(x):
... return x/2.0
>>> for i in range(5):
... pipe(i, sqr, half)
0.0
0.5
2.0
4.5
8.0
'''
for fn in fns:
value = fn(value)
return value
def ltrim(string, prefix):
''' Trim all prefixes from string. '''
length = len(prefix)
while string.startswith(prefix):
string = string[length:]
return string
def rtrim(string, suffix):
''' Trim all suffixes from string. '''
length = len(suffix)
while string.endswith(suffix):
string = string[:-length]
return string
def trim(string, xfix):
''' Trim all prefixes or suffixes of xfix from string. '''
if is_string(string):
string = string.encode()
if is_string(xfix):
xfix = xfix.encode()
length = len(xfix)
while string.startswith(xfix):
string = string[length:]
while string.endswith(xfix):
string = string[:-length]
return string
def remove_lines(string, count):
''' Remove lines from string.
If count is negative, removes lines from end of string. '''
if count > 0:
string = '\n'.join(string.split('\n')[count:])
elif count < 0:
string = '\n'.join(string.split('\n')[:count])
return string
def pathmatch(path, pattern):
''' Test whether the path matches the pattern.
This is a mess that needs to be replaced with an ant-style path match.
The pattern is a shell-style wildcard, not a regular expression.
fnmatch.fnmatch tests filenames, not paths.
'**' at the beginning of a pattern matches anything at the beginning
of a path, but no other wildcards are allowed in the pattern. '''
def split(path):
path = os.path.expanduser(path)
path = os.path.abspath(path)
return path.split('/')
if pattern.startswith('**'):
result = path.endswith(pattern[2:])
else:
path = split(path)
pattern = split(pattern)
result = (len(path) == len(pattern) and
all(fnmatch(path[i], pattern[i]) for i in range(len(path))))
return result
def resolve_path(path):
''' Resolves file path wildcards, links, and relative directories.
To resolve a wildcard path that matches more than one file, use
glob() and pass each result to resolve_path().
Returns None if wildcard does not match any files. Raises
ValueError if wildcard matches more than one file. '''
paths = glob(path)
if paths:
if len(paths) > 1:
raise ValueError('Matches more than one path: %s' % path)
path = os.path.normpath(os.path.realpath(paths[0]))
else:
path = None
return path
def domain_base(domain):
''' Returns base name from domain.
I.e. base.tld or base.co.countrydomain or base.com.countrydomain
all have the same base name.
E.g. google.com, google.bg, google.de, google.co.in all are based
on google.
This can be fooled by domain spoofers or squatters. '''
# regexes might be clearer (or not) but would be slower
parts = domain.split('.')
if len(parts) > 1:
# toss the top level domain
parts = parts[:-1]
if len(parts) > 1:
# toss generic second level domains
if parts[-1] in ['com', 'co', 'org', 'net']:
parts = parts[:-1]
# top level left is the base name
return parts[-1]
class textfile(object):
''' Open possibly gzipped text file as file using contextmanager.
E.g. "with textfile('mytext.gz') as f".
Avoids "AttributeError: GzipFile instance has no attribute '__exit__'"
prior to Python 3.1.
As of Python 2.6 contextlib.closing() doesn't work. It doesn't expose underlying
gzip functions because its __enter__() returns the inner object, and it has no
__getattr__()
to expose the inner gzip.open(). '''
def __init__(self, filename, rwmode='r'):
if filename.endswith('.gz'):
self.f = gz.open(filename, '%sb' % rwmode)
elif filename.endswith('.bz2'):
self.f = bz2.BZ2File(filename, '%sb' % rwmode)
elif filename.endswith('.zip'):
self.f = zipfile.ZipFile(filename, '%sb' % rwmode)
else:
self.f = open(filename, rwmode)
self.opened = True
def __iter__(self):
return iter(self.f)
def __enter__(self):
return self.f
def __exit__(self, *exc_info):
self.close()
def unused_close(self):
if self.opened:
self.f.close()
self.opened = False
def __getattr__(self, name):
return getattr(self.f, name)
def gzip(uncompressed):
''' Gzip a string '''
compressed_fileobj = StringIO()
with gz.GzipFile(fileobj=compressed_fileobj, mode='w') as f: #, compresslevel=5) as f:
f.write(uncompressed)
return compressed_fileobj.getvalue()
def gunzip(compressed):
''' Gunzip a string '''
compressed_fileobj = StringIO(compressed)
with gz.GzipFile(fileobj=compressed_fileobj, mode='r') as f:
uncompressed = f.read()
return uncompressed
@contextmanager
def chdir(dirname=None):
''' Chdir contextmanager that restores current dir.
From http://www.astropython.org/snippet/2009/10/chdir-context-manager
This context manager restores the value of the current working
directory (cwd) after the enclosed code block completes or
raises an exception. If a directory name is supplied to the
context manager then the cwd is changed prior to running the
code block.
'''
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def different(file1, file2):
''' Returns whether the files are different. '''
# diff succeeds if there is a difference, and fails if no difference
try:
sh.diff(file1, file2, brief=True)
different = False
except sh.ErrorReturnCode:
different = True
return different
def slugify(value):
''' Converts string to a form usable in a url withjout encoding.
Strips white space from ends, converts to lowercase,
converts spaces to hyphens, and removes non-alphanumeric characters.
'''
value = value.strip().lower()
value = re.sub('[\s-]+', '-', value)
newvalue = ''
for c in value:
if (
(c >= 'A' and c <= 'Z') or
(c >= 'a' and c <= 'z') or
(c >= '0' and c <= '9') or
c == '-' or
c == '_'
):
newvalue += c
return newvalue
def replace_strings(text, replacements, regexp=False):
""" Replace text. Returns new text.
'replacements' is a dict of {old: new, ...}.
Every occurence of each old string is replaced with the
matching new string.
If regexp=True, the old string is a regular expression.
>>> text = 'ABC DEF 123 456'
>>> replacements = {
... 'ABC': 'abc',
... '456': 'four five six'
... }
>>> replace_strings(text, replacements)
'abc DEF 123 four five six'
"""
for old in replacements:
new = replacements[old]
if regexp:
text = re.sub(old, new, text)
else:
text = text.replace(old, new)
return text
def caller_module_name(ignore=None, syr_utils_valid=False):
raise MovedPermanentlyException('moved to syr.python')
def run(command, expected_output=None, verbose=False, quiet=False, no_stdout=False, raise_exception=False):
''' Runs the command.
Returns True iff:
1. The return code is zero.
2. There was no stderr output.
3. Any expected output appears at the end of stdout.
Otherwise returns False.
Note that any warnings to stderr result in False.
To get the output from a command see get_command_output().
If verbose is True, print the command to stderr.
If quiet is True, don't print details of why command failed, just
print a message with the return code. By default run() prints why a
command failed to stderr. Quiet implies not verbose.
If no_stdout is True, don't print stdout.
If raise_exception is True and the command fails, raise an
exception instead of returning False.
'''
class RunFailed(Exception):
pass
def report_failure(why):
if not quiet:
message = 'command "%s" failed: %s' % (command, why)
print(message, file=sys.stderr)
log(message)
raise Exception('Deprecated. Use the sh module.')
import subprocess
if no_stdout:
verbose = False
if verbose and not quiet:
print(command)
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if not no_stdout:
if stdout:
stdout = stdout.rstrip()
print(stdout)
if stderr:
stderr = stderr.rstrip()
print(stderr)
# a return code of zero is success in linux, anything else is failure
if process.returncode:
msg = 'exit status %d' % process.returncode
report_failure(msg)
success = False
if raise_exception:
raise RunFailed(msg)
elif stderr:
success = False
if raise_exception:
raise RunFailed('stderr: %s' % stderr.rstrip())
elif expected_output and not (
stdout and stdout.endswith(expected_output)):
msg = 'expected "%s", got "%s"' % (expected_output, command_output)
report_failure(msg)
success = False
if raise_exception:
raise RunFailed(msg)
else:
success = True
return success
def run2(command, check=True, timeout=None, *args, **kwargs):
''' Run a command.
If check=True (the default),
then if return code is not zero or there is stderr output,
raise CalledProcessError. Return any output in the exception.
If timeout (in seconds) is set and command times out, raise TimeoutError. '''
''' Parts from subprocess32.check_output(). '''
raise Exception('Deprecated. Use the sh module.')
# use subprocess32 for timeout
from subprocess32 import Popen, CalledProcessError, TimeoutExpired
process = Popen(command, stdout=stdout, stderr=stderr, *args, **kwargs)
try:
process.wait(timeout=timeout)
except TimeoutExpired:
print('TimeoutExpired') #DEBUG
#print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG
#print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG
try:
process.kill()
process.wait()
finally:
print('after kill/wait') #DEBUG
#print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG
#print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG
raise TimeoutExpired(process.args, timeout)
if check:
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args)
def get_command_output(command, quiet=False):
''' Runs the command. Returns stdout.
On linux you can use commands.getoutput() and commands.getstatusoutput().
If quiet is True, don't print stderr.
'''
raise Exception('Deprecated. Use the sh module.')
import subprocess
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if stderr and not quiet:
stderr = stderr.rstrip()
print(stderr, file=sys.stderr)
return stdout
def delete_empty_files(directory):
''' Delete empty files in directory.
Does not delete any subdirectories or files in them.
>>> directory = tempfile.mkdtemp()
>>> assert os.path.isdir(directory)
>>> handle, filename1 = tempfile.mkstemp(dir=directory)
>>> os.close(handle)
>>> assert os.path.exists(filename1)
>>> handle, filename2 = tempfile.mkstemp(dir=directory)
>>> os.close(handle)
>>> assert os.path.exists(filename2)
>>> with open(filename2, 'w') as f2:
... len = f2.write('data')
>>> delete_empty_files(directory)
>>> assert not os.path.exists(filename1)
>>> assert os.path.exists(filename2)
>>> os.remove(filename2)
>>> assert not os.path.exists(filename2)
>>> os.rmdir(directory)
>>> assert not os.path.isdir(directory)
'''
wildcard = os.path.join(directory, '*')
for filename in glob(wildcard):
if os.path.getsize(filename) <= 0:
os.remove(filename)
def dynamically_import_module(name):
raise MovedPermanentlyException('moved to syr.python')
def dynamic_import(name):
raise MovedPermanentlyException('moved to syr.python')
def randint(min=None, max=None):
''' Get a random int.
random.randint() requires that you specify the min and max of
the integer range for a random int. But you almost always want
the min and max to be the system limits for an integer.
If not use random.randint().
'min' defaults to system minimum integer.
'max' defaults to system maximum integer.
'''
import sys, random
if IS_PY2:
maxsize = sys.maxint
else:
maxsize = sys.maxsize
if min is None:
min = -(maxsize-1)
if max is None:
max = maxsize
return random.randint(min, max)
def strip_youtube_hash(filename):
if '.' in filename:
rootname, _, extension = filename.rpartition('.')
youtube_match = re.match(r'(.*)-[a-zA-Z0-9\-_]{11}$', rootname)
if youtube_match:
cleanroot = youtube_match.group(1)
filename = cleanroot + '.' + extension
return filename
if __name__ == "__main__":
import doctest
doctest.testmod()
| goodcrypto/goodcrypto-libs | syr/utils.py | Python | gpl-3.0 | 26,822 |
from visual import types
from comm.types import ResultStatus
from snapshots.base import Client
class Controller:
def __init__(self, interface):
self.__interface = interface
self.__reaction_type_relator = {
types.Result.EVENT_FORM_INPUT_DATA: self.receive_event_input,
types.Result.CLIENT_LIST: self.receive_client_list,
types.Result.EVENT_CREATION_REACHED: self.update_client_info,
}
def startup(self):
self.request_contact_list()
def react(self, req_result, *args, **kwargs):
if self.__reaction_type_relator.get(req_result.type) is None:
return
self.__reaction_type_relator[req_result.type](req_result)
def receive_event_input(self, req_result):
self.__interface.model.event = req_result.data.get('snapshot')
def receive_client_list(self, req_result):
clients = []
for c in req_result.data:
client = Client()
client.deserialize(c)
clients.append(client)
self.__interface.model.client_list = clients
self.__interface.view.update_from_model()
def request_contact_list(self):
request = {
'type': types.Request.CLIENT_LIST,
'origin': self.__interface.__str__(),
'data': {},
'targets': [self.__interface.TYPE]
}
self.__interface.place_request(request)
def notify_change_selected_client(self, client):
result = {
'type': types.Result.SELECTED_CLIENT_UPDATE,
'origin': self.__interface.__str__(),
'data': client,
'targets': ['event_payment'],
'status': ResultStatus.OK
}
self.__interface.place_result(result)
def send_update_status(self, client, errors=False):
result = {
'type': types.Result.CLIENT_SELECTION_STATE,
'origin': self.__interface.__str__(),
'data': client,
'targets': ['event_checkout_summary'],
'status': ResultStatus.ERROR if errors else ResultStatus.OK
}
self.__interface.place_result(result)
def update_client_info(self, result):
if not result.data.get('event'):
return
self.__interface.view.update_client(result.data.get('event').client)
| asmateus/event_manager | manager/visual/element/event_contact/controller.py | Python | gpl-3.0 | 2,344 |
from core.moduleguessbase import ModuleGuessBase
from core.moduleexception import ModuleException, ProbeException, ExecutionException, ProbeSucceed
class ModuleGuess(ModuleGuessBase):
'''Generic ModuleGuess class to inherit.
ModuleGuess object is a dynamically loaded Weevely extension that automatically guess best
way to accomplish tasks on remote target. Vector objects contains the code to run on remote target.
To create a new module, define an object that inherit ModuleGuess (e.g. 'class MyModule(ModuleGuess)')
into python file situated in 'modules/mygroup/mymodule.py'. Class needs the same name of the
python file, with first capital letter.
At first run (e.g. running ':mymgroup.mymodule' from terminal for the first time), module
constructor executes following main tasks:
A) Defines module arguments (method _set_args(), inherition is recommended)
B) Defines module vectors (method _set_vectors(), inherition is recommended)
At every call (e.g. at every ':mymgroup.mymodule' run) run() method parse passed
arguments and execute following main tasks:
1) Optionally prepares the environment (method _prepare(), inherition is optional)
2) Runs every vector to guess best way to accomplish task. Guessing stops as soon as
first vector returns good results. Those three methods are executed for every vector:
2.1) Formats the passed arguments to simplify current_vector run
(method _prepare_vector(), inherition is recommended)
2.2) Runs current_vector and saves results (method _execute_vector(), inherition is optional)
2.3) Verifies probe execution (method _verify_vector_execution(), inherition is optional)
3) Optionally verifies probe execution (method _verify(), inherition is optional)
Example of a basic module that download files from web into target:
==================================== webdownload.py ===================================
from core.moduleguess import ModuleGuess
from core.moduleexception import ProbeException, ProbeSucceed
WARN_DOWNLOAD_OK = 'Downloaded succeed'
class Webdownload(ModuleGuess):
def _set_args(self):
# Declare accepted module parameters. Let the user choose specific vector to skip guessing with
# '-vector' parameter. Parameters passed at run are stored in self.args dictionary.
self.argparser.add_argument('url')
self.argparser.add_argument('rpath')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
def _set_vectors(self):
# Declare vectors to execute.
# Vectors defined in self.vectors are three diffent ways to accomplish tasks.
# They are execute in succession: the first vector that returns a positive
# results, break the probe.
# Vector defined in self.support_vectors are a support vectors executed manually.
# Payload variable fields '$path' and '$url' are replaced at vector execution.
# Because variable fields '$path' and '$url' corresponds with arguments,
# is not necessary to inherit _prepare_vector() and _execute_vector().
self.vectors.add_vector(name='putcontent', interpreter='shell.php', payloads = [ 'file_put_contents("$rpath", file_get_contents("$url"));' ])
self.vectors.add_vector(name='wget', interpreter='shell.sh', payloads = [ 'wget $url -O $rpath' ])
self.vectors.add_vector(name='curl', interpreter='shell.sh', payloads = [ 'curl -o $rpath $url' ])
self.support_vectors.add_vector(name='check_download', interpreter='file.check', payloads = [ '$rpath', 'exists' ])
def _verify_vector_execution(self):
# Verify downloaded file. Save vector return value in self._result and eventually raise
# ProbeSucceed to stop module execution and print error message. If not even one vector
# raise a ProbeSucceed/ProbeException to break the flow, the probe ends with an error
# due to negative value of self._result.
self._result = self.support_vectors.get('check_download').execute({ 'rpath' : self.args['rpath'] })
if self._result == True:
raise ProbeSucceed(self.name, WARN_DOWNLOAD_OK)
=============================================================================
'''
def _set_vectors(self):
"""Inherit this method to add vectors in self.vectors and self.support_vectors lists, easily
callable in _probe() function. This method is called by module constructor.
Example of vector declaration:
> self.support_vectors.add_vector(name='vector_name', interpreter='module_name', payloads = [ 'module_param1', '$module_param2', .. ])
Template fields like '$rpath' are replaced at vector execution.
"""
pass
def _set_args(self):
"""Inherit this method to set self.argparser arguments. Set new arguments following
official python argparse documentation like. This method is called by module constructor.
Arguments passed at module runs are stored in Module.args dictionary.
"""
pass
def _init_module(self):
"""Inherit this method to set eventual additional variables. Called by module constructor.
"""
def _prepare(self):
"""Inherit this method to prepare environment for the probe.
This method is called at every module run. Throws ModuleException, ProbeException.
"""
pass
def _prepare_vector(self):
"""Inherit this method to prepare properly self.formatted_arguments for the
self.current_vector execution.
This method is called for every vector. Throws ProbeException to break module
run with an error, ProbeSucceed to break module run in case of success, and
ExecutionException to skip single self.current_vector execution.
"""
self.formatted_args = self.args
def _execute_vector(self):
"""This method execute self.current_vector. Is recommended to avoid inherition
to prepare properly arguments with self.formatted_args in ModuleGuess._prepare_vector().
Vector execution results should be stored in self._result.
This method is called for every vector. Throws ProbeException to break module
run with an error, ProbeSucceed to break module run in case of success, and
ExecutionException to skip single self.current_vector execution.
"""
self._result = self.current_vector.execute(self.formatted_args)
def _verify_vector_execution(self):
"""This method verify vector execution results. Is recommended to
does not inherit this method but just fill properly self._result in
ModuleGuess._execute_vector().
This method is called for every vector. Throws ProbeException to break module
run with an error, ProbeSucceed to break module run in case of success, and
ExecutionException to skip single self.current_vector execution.
"""
# If self._result is set. False is probably a good return value.
if self._result or self._result == False:
raise ProbeSucceed(self.name,'Command succeeded')
def _verify(self):
"""Inherit this method to check probe result.
Results to print and return after moudule execution should be stored in self._result.
It is called at every module run. Throws ModuleException, ProbeException, ProbeSucceed.
"""
pass | amccormack/Weevely | core/moduleguess.py | Python | gpl-3.0 | 8,072 |
class ScraperException(IOError):
"""Raised when the Scraper class encounters an (internal) error."""
pass
class NoSuchElementException(LookupError):
"""Raised when trying to access an element that was not found."""
pass
class QisLoginFailedException(IOError):
"""Raised when logging in to the QIS system failed."""
pass
class QisNotLoggedInException(BaseException):
"""Raised when trying to perform an action that requires a login without
being logged in.
"""
pass
class UnexpectedStateException(BaseException):
"""Raised when trying to perform an action which prerequisite is not satisfied."""
pass
class PersistenceException(IOError):
"""Raised when a database related process or action failed."""
pass
| scuroworks/qisbot | qisbot/exceptions.py | Python | gpl-3.0 | 771 |
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Set and read for determining the cloud config file location
CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
CLOUD_CONFIG = '/opt/freeware/etc/cloud/cloud.cfg'
# What u get if no config is provided
CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
#'OpenNebula',
#'Azure',
#'AltCloud',
'OVF',
#'MAAS',
#'GCE',
'OpenStack',
'Ec2',
#'CloudSigma',
#'CloudStack',
#'SmartOS',
# At the end to act as a 'catch' when none of the above work...
'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
'syslog_fix_perms': 'root:system',
'system_info': {
'paths': {
'cloud_dir': '/opt/freeware/var/lib/cloud',
'templates_dir': '/opt/freeware/etc/cloud/templates/',
},
'distro': 'aix',
},
'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
PER_INSTANCE = "once-per-instance"
PER_ALWAYS = "always"
PER_ONCE = "once"
# Used to sanity check incoming handlers/modules frequencies
FREQUENCIES = [PER_INSTANCE, PER_ALWAYS, PER_ONCE]
| transt/cloud-init-0.7.5 | cloudinit/settings.py | Python | gpl-3.0 | 2,138 |
"""This module provides example data of varuious isotherms."""
import numpy as np
from . import util
def _sample( descr, Qads, Prel ):
return util.make_touple(
"GasAdsSample",
descr = descr,
Qads = Qads,
Prel = Prel,
)
def carbon_black() :
"""Return carbon black isotehrm data"""
descr = "Carbon black - nitrogen : Carbon Reference Material analyzed with N2 at 77 K : Adsorption"
Qads = np.array([4.39005, 4.67017, 4.79068, 4.9767, 5.14414, 5.31144,
5.47106, 5.63297, 5.80559, 5.96663, 6.13574, 6.31214,
6.49764, 6.67154, 6.85255, 7.04053, 7.22571, 7.40778,
7.59634, 7.7832, 7.96568, 8.1623, 8.34863, 8.54383,
8.74695, 8.94871, 9.16214, 9.38208, 9.61289, 9.8577,
10.12, 10.397, 10.6852, 11.0089, 11.3574, 11.7373,
12.1611, 12.6289, 13.1794, 13.819, 14.57, 15.4858,
16.6535, 18.2409])
Prel = np.array([0.0433547, 0.0672921, 0.0796994, 0.0999331, 0.119912,
0.140374, 0.159884, 0.179697, 0.200356, 0.219646,
0.239691, 0.259671, 0.280475, 0.299907, 0.320048,
0.340746, 0.360882, 0.380708, 0.400956, 0.421168,
0.440603, 0.460924, 0.480902, 0.500572, 0.521144,
0.540715, 0.560852, 0.580887, 0.600803, 0.62089,
0.64084, 0.66093, 0.68071, 0.70082, 0.72096, 0.74084,
0.76081, 0.78045, 0.80084, 0.82107, 0.84075, 0.86069,
0.88041, 0.90023])
return _sample( descr, Qads, Prel )
| lowks/micromeritics | micromeritics/isotherm_examples.py | Python | gpl-3.0 | 1,648 |
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file NXSCollect_test.py
# unittests for field Tags running Tango Server
#
import unittest
try:
import NXSCollect_test
except Exception:
from . import NXSCollect_test
# test fixture
class NXSCollectH5PYTest(NXSCollect_test.NXSCollectTest):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
NXSCollect_test.NXSCollectTest.__init__(self, methodName)
self.writer = "h5py"
self.flags = "--h5py"
if __name__ == '__main__':
unittest.main()
| nexdatas/tools | test/NXSCollectH5PY_test.py | Python | gpl-3.0 | 1,375 |
"""
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
"""
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from scipy import interpolate
from collections import OrderedDict
from pyssn import log_, config
if config.INSTALLED['PyNeb']:
import pyneb as pn
from ..utils.physics import CST, Planck, make_cont_Ercolano, gff
from ..utils.misc import execution_path, change_size, convol, rebin, is_absorb, no_red_corr, gauss, carre, lorentz, convolgauss
from ..utils.misc import vactoair, airtovac, clean_label, get_parser, read_data, my_execfile as execfile
from ..core.profiles import profil_instr
"""
ToDo:
1) Define the special lines in a table or dictionnary where ref, color, style are set up.
"""
# mvfc: 'save_data' removed because it is never used (and it is similar to 'print_data' in misc.py).
# mvfc: changed to capture the error message
def read_data(filename, NF=True):
def rows(mask):
nMax = 5
rowList = np.array(range(1, len(dd)+1))[mask]
rows = ''.join([ str(i)+', ' for i in rowList[:nMax]]).rstrip(', ')
if len(rowList) > nMax:
rows = rows + ', ...'
return rows
dtype = 'i8, a1, a9, float64, float64, float64, float64, a1, i8, i4, f, a100'
if NF:
delimiter = [14, 1, 9, 11, 6, 10, 7, 1, 14, 4, 7, 100]
else:
delimiter = [ 9, 1, 9, 11, 6, 10, 7, 1, 9, 4, 7, 100]
names = ['num', 'foo', 'id', 'lambda','l_shift', 'i_rel', 'i_cor', 'foo2', 'ref', 'profile',
'vitesse', 'comment']
usecols = (0, 2, 3, 4, 5, 6, 8, 9, 10, 11)
dd = np.genfromtxt(filename, dtype=dtype, delimiter=delimiter, names = names, usecols = usecols)
#mvfc: this is needed to avoid the problem of a cosmetic file with only one line
if dd.size == 1:
dd = np.atleast_1d(dd)
msg = ''
if (dd['num'] == -1).sum() > 0:
msg = msg + '\nInvalid line number at row: {}'.format(rows([dd['num'] == -1]))
# mvfc: this is not not working, because an invalid integer is converted to -1, not to nan. why?
if np.isnan(dd['num']).sum() > 0:
msg = msg + '\nInvalid line number at row: {}'.format(rows(np.isnan(dd['num'])))
if np.isnan(dd['lambda']).sum() > 0:
mask = np.isnan(dd['lambda'])
msg = msg + '\nInvalid wavelength at row: {}'.format(rows(np.isnan(dd['lambda'])))
if np.isnan(dd['l_shift']).sum() > 0:
msg = msg + '\nInvalid wavelength shift at row: {}'.format(rows(np.isnan(dd['l_shift'])))
if np.isnan(dd['i_cor']).sum() > 0:
msg = msg + '\nInvalid intensity correction at row: {}'.format(rows(np.isnan(dd['i_cor'])))
if np.isnan(dd['i_rel']).sum() > 0:
msg = msg + '\nInvalid relative intensity at row: {}'.format(rows(np.isnan(dd['i_rel'])))
if len(msg) > 0:
dd = dd[:0]
return dd.view(np.recarray), msg
class spectrum(object):
def __init__(self, config_file=None, phyat_file=None, profil_instr=profil_instr,
do_synth = None, do_read_liste = None, do_cosmetik = None, do_run = True, limit_sp = None,
spectr_obs=None, sp_norm=None, obj_velo=None, post_proc_file=None):
"""
Main pySSN object.
It reads the configuration file given by the config_file parameter.
It reads the atomic data, model and cosmetik files. It reads the observation. It computes the reddening
correction, the
"""
self.cursor = None
self.errorMsg = ''
self.selected_ions_data = None
self.ion_list = []
self.process = { '0' : 'recombination',
'1' : 'recombination',
'2' : 'dielectronic',
'3' : 'collisional',
'4' : 'Bowen',
'5' : 'recombination',
'6' : 'recombination',
'7' : 'fluorecence',
'8' : 'charge exchange',
'9' : 'recombination' }
self.process_abbr = {
'0' : 'rec.',
'1' : 'rec.',
'2' : 'die.',
'3' : 'col.',
'4' : 'fl.',
'5' : 'rec.',
'6' : 'rec.',
'7' : 'fl.',
'8' : 'ch.ex.',
'9' : 'rec.' }
self.fields = [ 'num', 'id', 'lambda', 'proc', 'l_shift', 'i_rel', 'i_cor', 'ref', 'profile', 'vitesse', 'comment' ]
self.field_width = { 'num' : 14,
'id' : 9,
'lambda' : 11,
'proc' : 1,
'l_shift' : 6,
'l_tot' : 11,
'i_rel' : 10,
'i_cor' : 7,
'i_tot' : 10,
'ref' : 14,
'profile' : 4,
'vitesse' : 7,
'comment' : 100 }
self.field_align = { 'num' : '>',
'id' : '<',
'lambda' : '>',
'proc' : '<',
'l_shift' : '>',
'l_tot' : '>',
'i_rel' : '>',
'i_cor' : '>',
'i_tot' : '>',
'ref' : '>',
'profile' : '>',
'vitesse' : '>',
'comment' : '<' }
self.field_pos = { 'num' : 0,
'id' : 15,
'lambda' : 24,
'proc' : 5,
'l_shift' : 35,
'i_rel' : 41,
'i_cor' : 51,
'ref' : 59,
'profile' : 73,
'vitesse' : 77,
'comment' : 85 }
self.field_format = { 'num' : '{:>14d}',
'id' : '{:9s}',
'lambda' : '{:11.3f}',
'proc' : '{:1s}',
'l_shift' : '{:6.3f}',
'l_tot' : '{:11.3f}',
'i_rel' : '{:10.3e}',
'i_cor' : '{:7.3f}',
'i_tot' : '{:10.3e}',
'ref' : '{:>14d}',
'profile' : '{:>4d}',
'vitesse' : '{:7.2f}',
'comment' : '{:>s}' }
self.field_tip = { 'num' : 'line code number',
'id' : 'ion',
'lambda' : 'wavelength in air',
'proc' : 'line process',
'l_shift' : 'wavelength additive correction',
'l_tot' : 'corrected wavelength',
'i_rel' : 'relative intensity',
'i_cor' : 'intensity correction factor',
'i_tot' : 'corrected intensity',
'ref' : 'reference line code number',
'profile' : 'line profile code number',
'vitesse' : 'natural line width',
'comment' : 'comment' }
self.field_abbr = { 'num' : 'line number',
'id' : 'ion',
'lambda' : 'wavelength',
'proc' : 'process',
'l_shift' : 'w shift',
'l_tot' : 'corr wave',
'i_rel' : 'intensity',
'i_cor' : 'i factor',
'i_tot' : 'corr int',
'ref' : 'ref line',
'profile' : 'profile',
'vitesse' : 'v factor',
'comment' : 'comment' }
self.calling = 'spectrum'
self.full_config_file = config_file
if '/' in self.full_config_file:
file_name = self.full_config_file.split('/')[-1]
dir_ = self.full_config_file.split(file_name)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
self.config_file = file_name
else:
self.directory = './'
self.config_file = self.full_config_file
config.addDataFilePath(self.directory, inpySSN=False)
self.init_vars()
self.read_conf(self.config_file)
log_.level = self.get_conf('log_level', 2)
if not self.get_conf('do_synth'):
self.set_conf('plot_residuals', False)
self.set_conf('fic_cosmetik', 'NO_cosmetik.dat')
self.set_conf('fic_modele', 'NO_modele.dat')
self.set_conf('phyat_file', 'NO_phyat.dat')
if self.get_conf('spectr_obs') is None:
self.set_conf('plot_residuals', False)
if do_synth is None:
self.do_synth = self.get_conf('do_synth')
else:
self.do_synth = do_synth
if do_read_liste is None:
do_read_liste = self.get_conf('do_read_liste')
if do_cosmetik is None:
do_cosmetik = self.get_conf('do_cosmetik')
self.profil_instr = profil_instr
self.do_cosmetik = do_cosmetik
self.post_proc_file = post_proc_file
self.init_obs(spectr_obs=spectr_obs, sp_norm=sp_norm, obj_velo=obj_velo, limit_sp=limit_sp)
self.init_red_corr()
self.make_continuum()
if phyat_file is not None:
self.phyat_file = phyat_file
else:
self.phyat_file = self.get_conf('phyat_file', 'liste_phyat.dat')
if do_run:
self.run(do_synth = self.do_synth, do_read_liste = do_read_liste)
self.show_uncor_spec = False
def init_vars(self):
self.fig1 = None
self.fig2 = None
self.fig3 = None
self.ax1 = None
self.ax2 = None
self.ax3 = None
self.cursor_width = 0.02
self.cursor_w0 = None
self.cursor_w1 = None
self.cursor_w2 = None
self.firstClick = True
self.aire_ref = 1.0
self.zoom_fact = 0.1
self._cid = None
self.plot_magenta = None
self.plot_cyan = None
self.label_magenta = None
self.label_cyan = None
self.hr = False
self.split = True
self.do_ax2 = True
self.do_buttons = True
self.do_ax3 = True
self.ax2_fontsize = 12
self.legend_loc = 1
self.legend_fontsize = 'medium'
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.read_obs_error = ''
self.iterpolate_velocity = True
def init_obs(self, spectr_obs=None, sp_norm=None, obj_velo=None, limit_sp=None):
if spectr_obs is not None:
self.set_conf('spectr_obs', spectr_obs)
if sp_norm is not None:
self.set_conf('sp_norm', sp_norm)
if obj_velo is not None:
self.set_conf('obj_velo', obj_velo)
if limit_sp is None:
self.limit_sp = self.get_conf('limit_sp')
else:
self.limit_sp = limit_sp
self.read_obs()
def run(self, do_synth = True, do_read_liste = True, do_profiles=True):
ErrorMsg = ''
if do_profiles:
self.do_profile_dict()
if do_synth:
if do_read_liste:
self.fic_model = self.get_conf('fic_modele', message='error')
self.phyat_arr, ErrorMsg = self.read_phyat(self.phyat_file)
self.errorMsg = ('{}\n\n{}'.format(self.errorMsg,ErrorMsg)).strip()
self.model_arr, ErrorMsg = self.read_model(self.fic_model)
self.errorMsg = ('{}\n\n{}'.format(self.errorMsg,ErrorMsg)).strip()
self.n_models = len(self.model_arr)
self.n_data = len(self.phyat_arr)
if self.n_models > 0 and self.n_data > 0:
self.cosmetik_arr, errorMsg = self.read_cosmetik()
self.n_cosmetik = len(self.cosmetik_arr)
self.sp_theo, self.liste_totale, self.liste_raies = \
self.append_lists(self.phyat_arr, self.model_arr, self.cosmetik_arr)
self.sp_theo, self.sp_synth = self.make_synth(self.liste_raies, self.sp_theo)
self.n_sp_theo = len(self.sp_theo['spectr'])
else:
self.sp_theo = None
self.sp_synth = None
self.n_sp_theo = 0
self.set_conf('do_synth', False)
else:
self.sp_theo = None
self.sp_synth = None
self.n_sp_theo = 0
self.f *= self.aire_ref
self.sp_abs = self.make_sp_abs(self.sp_theo)
self.make_filter_instr()
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
def get_key_indexes(self, key, prof):
return sorted([indexed_key.replace(key,'') for indexed_key in prof.keys() if key in indexed_key])
def format_instr_prof(self):
def get_indexes(key):
l = self.get_key_indexes(key, self.conf['instr_prof'])
return l
prof = self.conf['instr_prof']
if prof is None:
return 'instrumental profile not defined'
keys = prof.keys()
if not 'width' in keys:
return 'invalid instrumental profile: width parameter is missing'
indexes = get_indexes('Bb')
if not indexes == get_indexes('Br') == get_indexes('alpha') == get_indexes('beta'):
return 'invalid instrumental profile: error in indexes'
w1 = max([len(str(prof[key])) for key in keys if 'Bb' in key])
w2 = max([len(str(prof[key])) for key in keys if 'Br' in key])
w3 = max([len(str(prof[key])) for key in keys if 'beta' in key])
w4 = max([len(str(prof[key])) for key in keys if 'alpha' in key])
s = '\'width\': {}'.format(prof['width'])
for i in indexes:
s += ',\n \'Bb{0}\':{1:{w1}}, \'Br{0}\':{2:{w2}}, \'beta{0}\':{3:{w3}}, \'alpha{0}\':{4:{w4}}'.format(i,
prof['Bb'+i], prof['Br'+i], prof['beta'+i], prof['alpha'+i], w1 = w1, w2 = w2, w3 = w3, w4 = w4)
if 'comment' in keys:
s += ',\n \'comment\': \'{}\''.format(prof['comment'].strip())
s = '{{{}}}'.format(s)
return s
def do_profile_dict(self, return_res=False):
self.fic_profs = self.get_conf('fic_profile', None)
if self.fic_profs is None:
self.fic_profs = execution_path('./')+'../data/default_profiles.dat'
else:
self.fic_profs = self.directory + self.fic_profs
if not os.path.isfile(self.fic_profs):
log_.error('File not found {}'.format(self.fic_profs), calling=self.calling)
emis_profiles = {}
emis_profiles['1'] = {'T4': 1.0, 'vel':0.0, 'params': [['G', 1.0, 0.0, 1.0]]}
prof_params = None
with open(self.fic_profs) as f:
for l in f:
if l[0] not in ('#', 'c', 'C', ';'):
if '#' in l:
l = l.split('#')[0]
if ';' in l:
l = l.split(';')[0]
if ':' in l:
if prof_params is not None:
emis_profiles[key] = {'T4': T4,
'vel': vel,
'params' : prof_params}
key, T4_vel = l.split(':')
T4_vel = T4_vel.split()
T4 = np.float(T4_vel[0].strip())
if len(T4_vel) == 2:
vel = np.float(T4_vel[1].strip())
else:
vel = 0.0
prof_params = []
else:
if l.split() != []:
params = l.split()
params[1::] = [np.float(p.strip()) for p in params[1::]]
prof_params.append(params)
emis_profiles[key] = {'T4': T4,
'vel': vel,
'params' : prof_params}
log_.message('line profiles read from {0}'.format(self.fic_profs),
calling = self.calling)
if return_res:
return emis_profiles
self.emis_profiles = emis_profiles
def compare_profiles(self):
ref_diff = []
new_profile = self.do_profile_dict(return_res=True)
for k in new_profile.keys():
if k not in self.emis_profiles.keys():
ref_diff.append(k)
if new_profile[k]['T4'] != self.emis_profiles[k]['T4']:
ref_diff.append(k)
if new_profile[k]['vel'] != self.emis_profiles[k]['vel']:
ref_diff.append(k)
for lo, ln in zip(self.emis_profiles[k]['params'], new_profile[k]['params']):
for llo,lln in zip(lo, ln):
if llo != lln:
ref_diff.append(k)
return np.unique(ref_diff)
def get_profile(self, raie):
basic_profiles_dic = {'G': (3, gauss),
'C': (3, carre),
'L': (3, lorentz)}
profile_key = str(raie['profile'])
if profile_key not in self.emis_profiles:
profile_key = '1'
T4 = self.emis_profiles[profile_key]['T4']
vel = self.emis_profiles[profile_key]['vel']
params_str = self.emis_profiles[profile_key]['params']
lambda_0 = raie['lambda'] + raie['l_shift'] + self.get_conf('lambda_shift', 0.0)
w_norm = self.w - lambda_0 - vel * lambda_0 / CST.CLIGHT * 1e5
profile = np.zeros_like(self.w)
largeur = raie['vitesse'] * lambda_0 / CST.CLIGHT * 1e5
masse = 2 * (raie['num'] - raie['num'] % 100000000000)/100000000000
if (masse == 2 and (raie['num'] - raie['num'] % 101000000000)/100000000 == 1010) :
masse = 1
for param in params_str:
profile_type = param[0]
if profile_type not in basic_profiles_dic:
log_.error('Wrong number profile reference{}'.format(profile_type), calling=self.calling)
params = param[1::]
if len(params) != basic_profiles_dic[profile_type][0]:
log_.error('Wrong number of parameters {} for profile {}'.format(len(params), profile_type), calling=self.calling)
profile += basic_profiles_dic[profile_type][1](w_norm, params[0], params[1]*largeur, params[2]*largeur)
if T4 > 0.0:
fwhm_therm = 21.4721 * np.sqrt(T4 / masse) * lambda_0 / CST.CLIGHT * 1e5 #km/s
profile = convolgauss(profile, self.w, lambda_0, fwhm_therm)
profile[~np.isfinite(profile)] = 0.0
return profile
def read_conf(self, config_file=None):
if config_file is None:
config_file = self.config_file
else:
self.config_file = config_file
self.conf = {}
init_conf = {}
execfile(execution_path('./')+'init_defaults.py', self.conf)
self.default_keys = list(self.conf.keys())
if self.config_file is not None:
if not os.path.exists(self.directory + self.config_file):
log_.error('File {} not found'.format(self.directory + self.config_file))
try:
execfile(self.directory + self.config_file, init_conf)
log_.message('configuration read from {0}'.format(self.config_file),
calling = self.calling)
except:
log_.warn('configuration NOT read from {0}'.format(self.config_file),
calling = self.calling)
obsolete_keys = list(set(init_conf.keys())-set(self.default_keys))
obsolete_keys.sort()
if len(obsolete_keys) > 0:
log_.message('list of variables read from {} that changed name or are obsolete:\n{}'.format(self.config_file, obsolete_keys),
calling = self.calling)
# to change keys automatically
old_keys = ['allow_editing_lines', 'gcont_pl_alpha', 'index_of_current_ion', 'prof', 'line_field_print', 'line_saved_filename', 'line_saved_header', 'line_saved_ordered_by', 'qt_fig_adjust', 'qt_fig_bottom', 'qt_fig_hspace', 'qt_fig_left', 'qt_fig_right', 'qt_fig_top', 'show_dialogs', 'update_after_editing_lines']
new_keys = ['qt_allow_editing_lines', 'cont_pl_alpha', 'index_of_selected_ions', 'instr_prof', 'save_lines_fields', 'save_lines_filename', 'save_lines_header', 'save_lines_sort', 'fig_adjust', 'fig_bottom', 'fig_hspace', 'fig_left', 'fig_right', 'fig_top', 'qt_show_dialogs', 'qt_update_after_editing_lines']
new_name = dict(zip(old_keys, new_keys))
for key in old_keys:
if key in init_conf.keys():
if new_name[key] not in init_conf.keys():
init_conf[new_name[key]] = init_conf[key]
log_.message('variable \'{}\' get from old name \'{}\' from init file {}'.format(new_name[key], key, self.config_file),
calling = self.calling)
del init_conf[key]
# to get 'cont_user_table' from old {'cont_in_lambda', 'cont_intens', 'cont_lambda'}
if {'cont_in_lambda', 'cont_intens', 'cont_lambda'}.issubset(set(init_conf.keys())) and 'cont_user_table' not in init_conf.keys():
x = init_conf['cont_lambda']
y = init_conf['cont_intens']
if isinstance(x, (list,)) and isinstance(y, (list,)) and len(x) == len(y):
s = ''
for i in range(len(x)):
s += '({}, {}), '.format(x[i], y[i])
s = s.strip(' ,')
if s != '':
path = 'cont_user_table = [{}]'.format(s)
try:
user_module = {}
exec(path, user_module)
value = user_module['cont_user_table']
self.set_conf('cont_user_table', value)
log_.message('\'cont_user_table\' get from \'cont_lambda\' and \'cont_intens\'', calling = self.calling)
except:
log_.warn('Can not get \'cont_user_table\' from \'cont_lambda\' and \'cont_intens\'', calling = self.calling)
self.conf.update(init_conf)
# Obsolete for qt
self.plot_magenta = self.get_conf('plot_magenta', None)
self.label_magenta = self.get_conf('label_magenta', None)
self.plot_cyan = self.get_conf('plot_cyan', None)
self.label_cyan = self.get_conf('label_cyan', None)
# If you DON'T want an i_cor on a main line to affect the satellites,
# set the following variable to False
self.set_conf('recursive_i_cor', True)
# Is i_cor applied in the atomic physic database AND model database?
# If this is the case, i_cor on phyat_database will be directly
# applied on i_rel and will not appear as i_cor in the printed liste
# of lines or with the cursor.
self.set_conf('do_icor_outside_cosmetik', True)
# If you want to perform cosmetik on reference lines (which have ref = 0):
self.set_conf('do_icor_on_ref', True)
# Here follow caracteristics of the reference line.
# This line will be assumed to have a flux
# at center of 1.00/A. NO!!!
# Obsolete
# self.set_conf('do_calcul_aire_ref', False)
# self.set_conf('raie_ref ', {"vitesse" : 25.0, "lambda" : 4861.0, "profile" : 1}) # depuis 25/10/01
def get_conf(self, key=None, undefined=None, message=None):
"""
Return the value of the key parameter in the configuration.
If key is not defined, return the value of the undefined keyword, default being None.
"""
if key is None:
for k in self.conf.keys():
self.get_conf(k)
return None
if key not in self.conf:
if message == 'warn':
log_.warn('{0} not defined in configuration file'.format(key), calling=self.calling)
elif message == 'message':
log_.message('{0} not defined in configuration file'.format(key), calling=self.calling)
elif message == 'error':
log_.error('{0} not defined in configuration file'.format(key), calling=self.calling)
else:
pass
return undefined
else:
return self.conf[key]
if 'fic_modele' not in self.conf:
log_.warn('fic_model not defined in configuration file', calling=self.calling)
return None
def set_conf(self, key, value):
"""
Set the value of the configuration "key" parameter to "value".
"""
self.conf[key] = value
def read_phyat(self, phyat_file):
self.phyat_file = phyat_file
phyat_arr = []
for dir_ in config.DataPaths:
try:
phyat_arr, ErrorMsg = read_data('{0}/{1}'.format(dir_, self.phyat_file))
if ErrorMsg:
ErrorMsg = 'Error in line database file \'{}\':'.format(self.phyat_file) + ErrorMsg
log_.message('phyat data read from {0}/{1}'.format(dir_, self.phyat_file),
calling = self.calling)
break
except:
ErrorMsg = 'Line database file \'{}\' not found.'.format(self.phyat_file)
if len(phyat_arr) == 0:
log_.warn( ErrorMsg, calling = self.calling)
return phyat_arr, ErrorMsg
def read_model(self, model_file):
ErrorMsg = ''
model_arr = []
path = self.directory + model_file
if not os.path.isfile(path):
ErrorMsg = 'Model file \'{}\' not found.'.format(os.path.basename(path))
log_.warn(ErrorMsg, calling=self.calling)
return model_arr, ErrorMsg
if model_file == 'from phyat':
mask = self.phyat_arr['ref'] == 999
model_arr = self.phyat_arr.copy()[mask]
model_arr['num'] -= 90000000000000
model_arr['vitesse'] = 10
log_.message('data initialized from phyat',
calling = self.calling)
else:
try:
model_arr, ErrorMsg = read_data(path)
if ErrorMsg == '':
log_.message('cosmetik read from {0}'.format(os.path.basename(path)), calling = self.calling)
else:
ErrorMsg = 'Error in model file \'{0}\':'.format(os.path.basename(path)) + ErrorMsg
log_.warn(ErrorMsg, calling = self.calling)
log_.message('data read from {0}'.format(path),
calling = self.calling)
except:
ErrorMsg = 'Unable to read from file \'{0}\''.format(os.path.basename(path))
log_.warn(ErrorMsg, calling = self.calling)
model_arr['ref'] = 0
return model_arr, ErrorMsg
def read_cosmetik_old(self):
self.fic_cosmetik = self.get_conf('fic_cosmetik', message='warn')
self.do_cosmetik = self.get_conf('do_cosmetik')
cosmetik_arr = []
if self.do_cosmetik and self.fic_cosmetik is not None:
try:
cosmetik_arr, msg = read_data(self.fic_cosmetik)
fic_cosmetik_ok = True
log_.message('cosmetik read from {0}'.format(self.directory + self.fic_cosmetik),
calling = self.calling)
except:
fic_cosmetik_ok = False
log_.warn('unable to read from {0}'.format(self.directory + self.fic_cosmetik),
calling = self.calling)
return cosmetik_arr, fic_cosmetik_ok
def read_cosmetik(self):
self.fic_cosmetik = self.get_conf('fic_cosmetik', message='warn')
self.do_cosmetik = self.get_conf('do_cosmetik')
cosmetik_arr = []
ErrorMsg = ''
if self.do_cosmetik and self.fic_cosmetik is not None:
if os.path.isabs(self.fic_cosmetik):
path = self.fic_cosmetik
else:
path = self.directory + self.fic_cosmetik
if os.path.isfile(path):
if os.path.getsize(path) > 0:
cosmetik_arr, ErrorMsg = read_data(path)
if ErrorMsg == '':
log_.message('cosmetik read from {0}'.format(path), calling = self.calling)
else:
log_.warn('unable to read from {0}'.format(path), calling = self.calling)
else:
log_.warn('empty cosmetic file {0}'.format(path), calling = self.calling)
else:
log_.warn('new cosmetic file {0}'.format(path), calling = self.calling)
return cosmetik_arr, ErrorMsg
def read_obs(self, k_spline = 1):
self.read_obs_error = ''
if self.get_conf('spectr_obs') is not None:
s = self.conf['spectr_obs'].split('.')
if len(s) == 1:
comm = '(with extention .fits, .spr, and .spr.gz) '
obs_file = self.directory + s[0] + '.spr'
if not os.path.isfile(obs_file):
obs_file = self.directory + s[0] + '.spr.gz'
if not os.path.isfile(obs_file):
obs_file = self.directory + s[0] + '.fits'
else:
comm = ''
obs_file = self.directory + self.conf['spectr_obs']
if not os.path.isfile(obs_file):
self.read_obs_error = 'Observed spectrum file \'{}\' {}not found'.format(self.conf['spectr_obs'], comm)
log_.warn(self.read_obs_error, calling = self.calling)
else:
if obs_file.split('.')[-1] == 'fits':
from astropy.io import fits
self.f, header = fits.getdata(obs_file, header=True)
if header['NAXIS'] == 1:
dispersion_start = header['CRVAL1'] - (header['CRPIX1'] - 1) * header['CDELT1']
self.w = dispersion_start + np.arange(len(self.f)) * header['CDELT1']
else:
try:
self.f, header = fits.getdata(obs_file, header=True)
self.f = np.mean(self.f, 1)
dispersion_start = header['CRVAL2'] - (header['CRPIX2'] - 1) * header['CDELT2']
self.w = dispersion_start + np.arange(len(self.f)) * header['CDELT2']
except:
self.read_obs_error = 'Observations NOT read from {0}'.format(obs_file)
log_.warn(self.read_obs_error, calling = self.calling)
else:
try:
self.obs = np.loadtxt(obs_file)
log_.message('Observations read from {0}'.format(obs_file),
calling = self.calling)
if bool(self.get_conf('data_incl_w', undefined = False)):
self.w = self.obs[:,0]
self.f = self.obs[:,1]
else:
self.f = self.obs
self.w = None
if bool(self.get_conf('reverse_spectra', undefined=False)):
self.f = self.f[::-1]
except:
self.read_obs_error = 'Observations NOT read from {0}'.format(obs_file)
log_.warn(self.read_obs_error, calling = self.calling)
if self.get_conf('spectr_obs') is None or len(self.read_obs_error) > 0:
n_pix = (self.limit_sp[1] - self.limit_sp[0]) / self.conf['lambda_pix']
self.w = np.linspace(self.limit_sp[0], self.limit_sp[1], int(n_pix))
self.f = np.ones_like(self.w)
self.set_conf('plot_residuals', False)
if self.get_conf('wave_unit') == 'mu':
self.w *= 10000.
self.n_lambda = len(self.f)
self.tab_pix = np.arange(self.n_lambda)
self.f *= self.get_conf('sp_norm', undefined = 1.)
if ("cal_lambda" in self.conf) and ("cal_pix" in self.conf) and (self.w is None):
cal_lambda = np.array(self.conf["cal_lambda"])
cal_pix = np.array(self.conf["cal_pix"])
arg_sort = cal_lambda.argsort()
cal_lambda = cal_lambda[arg_sort]
cal_pix = cal_pix[arg_sort]
interp_lam = interpolate.UnivariateSpline(cal_pix, cal_lambda, k=k_spline)
self.w = interp_lam(self.tab_pix)
log_.message('Wavelength table generated using spline of order {0}'.format(k_spline),
calling = self.calling)
if bool(self.get_conf('reverse_spectra', undefined=False)) :
self.f = self.f[::-1]
if self.limit_sp[0] < 0.01:
self.limit_sp[0] = np.min(self.w) * (1. + self.get_conf("delta_limit_sp")/100.)
if self.limit_sp[1] > 0.9e10:
self.limit_sp[1] = np.max(self.w) * (1. - self.get_conf("delta_limit_sp")/100.)
# mvfc: this is a new feature; obj_velo can be set by interpolation
# obj_velo_table = [(4000,85), (4200,90), (4300,90), (6000,75), (7000,85) ]
if self.get_conf('obj_velo_table') is not None and self.iterpolate_velocity:
try:
x = np.array([i[0] for i in list(self.get_conf('obj_velo_table'))])
y = np.array([i[1] for i in list(self.get_conf('obj_velo_table'))])
f = interpolate.interp1d(x, y)
v = f((float(self.limit_sp[0])+float(self.limit_sp[1]))/2)
v = int(v*100)/100.
self.set_conf('obj_velo', v)
except:
#self.set_conf('obj_velo', 0.0)
log_.warn('Error interpolating radial velocity', calling = self.calling)
self.obj_velo = self.get_conf("obj_velo", undefined=0.)
self.w *= 1 - self.obj_velo/(CST.CLIGHT/1e5)
log_.message('Wavelenghts shifted by Vel = {} km/s'.format(self.conf["obj_velo"]),
calling = self.calling)
lims = ((self.w >= self.limit_sp[0]) & (self.w <= self.limit_sp[1]))
log_.message('Observations resized from {0} to {1}'.format(len(self.w), lims.sum()), calling=self.calling)
self.w_min = self.w[0]
self.w_max = self.w[-1]
self.w = self.w[lims]
self.f = self.f[lims]
do_shift = False
if self.get_conf('lambda_shift_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('lambda_shift_table'))])
y = np.array([i[1] for i in list(self.get_conf('lambda_shift_table'))])
f = interpolate.interp1d(x, y, fill_value=0, bounds_error=False)
w_shift = f(self.w)
do_shift = True
except:
self.read_obs_error = 'Error interpolating wavelengh correction table'
log_.warn(self.read_obs_error, calling = self.calling)
self.w_obs = self.w.copy()
self.f_ori = self.f.copy()
if do_shift:
correction_is_valid = True
for i in range(1,len(self.w)):
if ((self.w[i]+w_shift[i])-(self.w[i-1]+w_shift[i-1]))*(self.w[i]-self.w[i-1]) <= 0:
correction_is_valid = False
if correction_is_valid:
self.w += w_shift
log_.message('Wavelengths shifted', calling = self.calling)
else:
self.read_obs_error = 'Invalid wavelengh correction table.\nThe order of pixels must be preserved.'
log_.warn(self.read_obs_error, calling = self.calling)
self.w_ori = self.w.copy()
resol = self.get_conf('resol', undefined = 1, message=None)
log_.message('Observations resized from {0} by a factor of {1}'.format(len(self.w), resol),
calling=self.calling)
self.w = change_size(self.w, resol)
self.f = change_size(self.f, resol)
self.n_lambda = len(self.f)
self.tab_pix = change_size(self.tab_pix, resol)
self.lambda_pix = (np.max(self.w) - np.min(self.w)) / self.n_lambda
#log_.debug('n_lambda = {}, tab_pix = {}, lambda_pix = {}'.format(self.n_lambda, self.tab_pix, self.lambda_pix),
# calling = self.calling)
def renorm(self, new_norm):
self.f /= self.get_conf('sp_norm', undefined = 1.)
self.f_ori /= self.get_conf('sp_norm', undefined = 1.)
self.set_conf('sp_norm', new_norm)
self.f *= self.get_conf('sp_norm', undefined = 1.)
self.f_ori *= self.get_conf('sp_norm', undefined = 1.)
def init_red_corr(self):
self.E_BV = self.get_conf('e_bv', 0.)
self.R_V = self.get_conf('r_v', 3.1)
if self.E_BV > 0:
RC = pn.RedCorr(E_BV = self.E_BV, law=self.get_conf('red_corr_law', message='error'), R_V=self.R_V)
self.red_corr = RC.getCorr(self.w, self.get_conf('lambda_ref_rougi', message='error'))
log_.message('Reddening correction set to {0}'.format(self.E_BV), calling=self.calling)
else:
self.red_corr = np.ones_like(self.w)
def update_user_cont(self):
user_cont = np.zeros_like(self.w)
if self.get_conf('cont_user_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('cont_user_table'))])
y = np.array([i[1] for i in list(self.get_conf('cont_user_table'))])
kind = self.get_conf('cont_user_func')
if kind == 'cubic' and len(x) < 4:
kind = 'quadratic'
if kind == 'quadratic' and len(x) < 3:
kind = 'linear'
if kind == 'linear' and len(x) < 2:
kind = 'zero'
user_cont_int = interpolate.interp1d(x, y, kind=kind, fill_value=0, bounds_error=False)
user_cont = user_cont_int(self.w)
except:
self.errorMsg = 'Problem in user-defined continuum interpolation.'
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if kind not in kinds:
self.errorMsg += '\nInvalid function'
log_.message(self.errorMsg, calling = self.calling)
self.cont /= self.aire_ref
self.cont *= self.red_corr
self.cont = self.cont - self.conts['user'] + user_cont
self.cont *= self.aire_ref
self.cont /= self.red_corr
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
self.conts['user'] = user_cont
def cont_at(self, wave, side = '-'):
i_list = [i for i in range(len(self.w)-1) if self.w[i] <= wave <= self.w[i+1] or self.w[i+1] <= wave <= self.w[i]]
if len(i_list) == 1:
i = i_list[0]
if side == '+' and i+1 in range(len(self.w)):
return self.cont[i+1]
else:
return self.cont[i]
else:
return None
def make_continuum(self):
self.conts = {}
user_cont = np.zeros_like(self.w)
if self.get_conf('cont_user_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('cont_user_table'))])
y = np.array([i[1] for i in list(self.get_conf('cont_user_table'))])
kind = self.get_conf('cont_user_func')
if kind == 'cubic' and len(x) < 4:
kind = 'quadratic'
if kind == 'quadratic' and len(x) < 3:
kind = 'linear'
if kind == 'linear' and len(x) < 2:
kind = 'zero'
user_cont_int = interpolate.interp1d(x, y, kind=kind, fill_value=0, bounds_error=False)
user_cont = user_cont_int(self.w)
except:
self.errorMsg = 'Problem in user-defined continuum interpolation.'
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if kind not in kinds:
self.errorMsg += '\nInvalid function'
log_.message(self.errorMsg, calling = self.calling)
cont_pix = self.get_conf("cont_pix", 0.)
if cont_pix != 0:
arg_sort = np.array(cont_pix).argsort()
user_cont_int = interpolate.interp1d(np.array(cont_pix)[arg_sort],
np.array(self.get_conf("cont_intens", message='error'))[arg_sort])
user_cont = user_cont_int(self.tab_pix)
self.conts['user'] = user_cont
bb_cont = np.zeros_like(self.w)
if "cont_bb_t" in self.conf:
if np.ndim(self.conf["cont_bb_t"]) == 0:
tab_T = np.array([self.conf["cont_bb_t"]])
tab_I = np.array([self.conf["cont_bb_i"]])
else:
tab_T = np.array(self.conf["cont_bb_t"])
tab_I = np.array(self.conf["cont_bb_i"])
for I, T in zip(tab_I, tab_T):
bb_cont += I * Planck(self.w, T) / T**4
self.conts['bb'] = bb_cont
pl_cont = np.zeros_like(self.w) # Power law
if "cont_pl_alpha" in self.conf:
if np.ndim(self.conf["cont_pl_alpha"]) == 0:
tab_alpha = np.array([self.conf["cont_pl_alpha"]])
tab_I = np.array([self.conf["cont_pl_i"]])
else:
tab_alpha = np.array(self.conf["cont_pl_alpha"])
tab_I = np.array(self.conf["cont_pl_i"])
for I, alpha in zip(tab_I, tab_alpha):
pl_cont += I * (self.w / 5000.)**alpha
self.conts['pl'] = pl_cont
if self.conf["cont_hi_i"] != 0.:
alfa = 1e-13 * 0.668 * (self.conf["cont_hi_t"]/1e4)**(-0.507) / \
(1. + 1.221*(self.conf["cont_hi_t"]/1e4)**(0.653)) * 1.000
emis_Hi = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4861.3 # erg/s.cm3
H_cont = self.conf["cont_hi_i"] * make_cont_Ercolano(self.conf["cont_hi_t"],'H',airtovac(self.w)) / emis_Hi
H_cont[~np.isfinite(H_cont)] = 0.
self.conts['H'] = H_cont
else:
self.conts['H'] = np.zeros_like(self.w)
if self.conf["cont_hei_i"] != 0.0:
alfa = 1e-13 * 0.331 * (self.conf["cont_hei_t"]/1e4)**(-0.615) / \
(1. + 0.910*(self.conf["cont_hei_t"]/1e4)**(0.780)) * 0.7986
emis_Hei = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4471.5
He1_cont = self.conf["cont_hei_i"] * make_cont_Ercolano(self.conf["cont_hei_t"],'He1',airtovac(self.w)) / emis_Hei
He1_cont[~np.isfinite(He1_cont)] = 0.
self.conts['He1'] = He1_cont
else:
self.conts['He1'] = np.zeros_like(self.w)
if self.conf["cont_heii_i"] != 0.0:
alfa = 2. * 1e-13 * 1.549 * (self.conf["cont_heii_t"]/1e4/4.)**(-0.693) / \
(1. + 2.884*(self.conf["cont_heii_t"]/1e4/4.)**(0.609))*1.000
emis_Heii = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4685.8
He2_cont = self.conf["cont_heii_i"] * make_cont_Ercolano(self.conf["cont_heii_t"],'He2',airtovac(self.w)) / emis_Heii
He2_cont[~np.isfinite(He2_cont)] = 0.
self.conts['He2'] = He2_cont
else:
self.conts['He2'] = np.zeros_like(self.w)
gff_HI = gff(1., self.conf["cont_hi_t"], self.w)
gff_HeI = gff(1., self.conf["cont_hei_t"], self.w)
gff_HeII = gff(4., self.conf["cont_heii_t"], self.w)
#32.d0*!phy.e^4.*!phy.h/3./!phy.m_e^2./!phy.c^3.*sqrt(!dpi*13.6*!phy.erg_s_ev/3./!phy.k)= 6.8391014e-38
if self.conf["cont_hi_i"] != 0 and self.conf["cont_hei_i"] != 0 and self.conf["cont_heii_i"] != 0 :
FF_cont = (6.8391014e-38 * CST.CLIGHT * 1e8 / self.w**2. * (
self.conf["cont_hi_i"] * 1.0**2. / np.sqrt(self.conf["cont_hi_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_hi_t"]) * gff_HI/emis_Hi +
self.conf["cont_hei_i"] * 1.0**2./ np.sqrt(self.conf["cont_hei_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_hei_t"]) * gff_HeI/emis_Hei +
self.conf["cont_heii_i"] * 2.0**2. / np.sqrt(self.conf["cont_heii_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_heii_t"]) * gff_HeII / emis_Heii))
FF_cont[~np.isfinite(FF_cont)] = 0.
self.conts['FF'] = FF_cont
else:
self.conts['FF'] = np.zeros_like(self.w)
# 2-photons
#http://adsabs.harvard.edu/abs/1984A%26A...138..495N
if self.conf["cont_hi_i"] != 0:
y = 1215.7 / self.w
A = 202.0 * (y * (1. - y) * (1. -(4. * y * (1 - y))**0.8) + 0.88 * ( y * (1 - y))**1.53 * (4. * y * (1 - y))**0.8)
alfa_eff = 0.838e-13 * (self.conf["cont_hi_t"] / 1e4)**(-0.728) # fit DP de Osterbrock
q = 5.31e-4 * (self.conf["cont_hi_t"] / 1e4)**(-0.17) # fit DP de Osterbrock
n_crit = 8.226 / q
twophot_cont = self.conf["cont_hi_i"] * CST.HPLANCK * CST.CLIGHT * 1e8 / self.w**3. * 1215.7 * A / 8.226 * alfa_eff / (1. + self.conf["cont_edens"]/n_crit) / emis_Hi
twophot_cont[~np.isfinite(twophot_cont)] = 0.
self.conts['2photons'] = twophot_cont
else:
self.conts['2photons'] = np.zeros_like(self.w)
self.cont = np.zeros_like(self.w)
for key in self.conts:
self.cont += self.conts[key]
self.cont *= self.aire_ref
self.cont /= self.red_corr
def plot_conts(self, ax, keys = None):
if self.sp_synth_lr is None:
return
colors = {'bb': 'cyan', 'pl': 'green', '2photons': 'blue', 'FF': 'red',
'H': 'red', 'He1': 'green', 'He2': 'blue', 'user': 'black'}
labels = {'bb': 'bb', 'pl': 'pl', '2photons': '2q', 'FF': 'ff',
'H': 'H I', 'He1': 'He I', 'He2': 'He II', 'user': 'user cont'}
if keys == None:
keys = self.conts.keys()
for key in keys:
if key[0] == 'H':
style=':'
else:
style = '-'
ax.plot(self.w, self.conts[key], linestyle=style, label = labels[key], color = colors[key])
if 'user' in keys:
if self.get_conf('cont_user_table') is not None:
x = np.array([i[0] for i in self.get_conf('cont_user_table')])
y = np.array([i[1] for i in self.get_conf('cont_user_table')])
ax.plot(x, y, marker='o', ms=6, color = colors['user'], ls = '')
y = [self.cont_at(w) for w in x]
y[0] = self.cont_at(x[0], '+')
ax.plot(x, y, marker='o', ms=8, color = 'green', ls = '')
ax.plot(self.w, self.cont, label = 'total cont', linestyle='--', linewidth = 1.5, color = 'green')
ax.legend()
def append_lists(self, phyat_arr, model_arr, cosmetik_arr):
n_models = len(model_arr)
liste_totale = phyat_arr.copy()
liste_totale.resize(len(phyat_arr) + len(model_arr))
for i,j in enumerate(np.arange(len(phyat_arr), len(phyat_arr) + len(model_arr))):
liste_totale[j] = model_arr[i]
sp_theo = {}
sp_theo['raie_ref'] = model_arr
sp_theo['correc'] = np.zeros(n_models)
sp_theo['spectr'] = np.zeros((n_models, len(self.w)))
if "do_icor_outside_cosmetik" in self.conf:
liste_totale.i_rel *= liste_totale.i_cor
liste_totale.i_cor = 1.
sp_theo['raie_ref'].i_rel *= sp_theo['raie_ref'].i_cor
sp_theo['raie_ref'].i_cor = 1.
if self.do_cosmetik:
for line_cosmetik in cosmetik_arr:
if (line_cosmetik['ref'] == 0) and not bool(self.conf['do_icor_on_ref']):
log_.warn('No cosmetik on {0}, reference line'.format(line_cosmetik['num']),
calling = self.calling)
else:
to_change = (liste_totale.num == line_cosmetik['num'])
if to_change.sum() == 1:
line_to_change = liste_totale[to_change][0]
if (line_to_change['lambda'] == line_cosmetik['lambda']) or (line_cosmetik['l_shift'] == 0.):
line_to_change['l_shift'] = line_cosmetik['l_shift']
if (line_to_change['i_rel'] == line_cosmetik['i_rel']) or (line_cosmetik['i_cor'] == 1.):
line_to_change['i_cor'] = line_cosmetik['i_cor']
liste_totale[to_change] = line_to_change
log_.debug('Cosmetik on {0}'.format([line_cosmetik]), calling=self.calling)
elif to_change.sum() == 0:
if self.get_conf('warn_on_no_cosmetik'):
log_.warn('No cosmetik on {0}, undefined line'.format(line_cosmetik['num']),
calling = self.calling)
else:
log_.warn('No cosmetik on {0}, multiple defined line'.format(line_cosmetik['num']),
calling = self.calling)
liste_raies = self.restric_liste(liste_totale)
log_.message('Size of the line list: {0}, size of the restricted line list: {1}'.format(len(liste_totale),
len(liste_raies)), calling=self.calling)
if self.do_cosmetik:
for line_cosmetik in cosmetik_arr:
if bool(self.conf['do_icor_on_ref']):
to_change = (liste_raies.num == line_cosmetik['num'])
if to_change.sum() == 1:
line_to_change = liste_raies[to_change][0]
line_to_change['vitesse'] *= line_cosmetik['vitesse']
if line_cosmetik['profile'] != -1:
line_to_change['profile'] = line_cosmetik['profile']
liste_raies[to_change] = line_to_change
return sp_theo, liste_totale, liste_raies
def restric_liste(self, liste_in):
"""
This function changes liste_in
"""
"""
We set ref=999 for all the lines depending on a 999 one.
"""
while True:
the_end = True
non_affich = np.where(liste_in['ref'] == 999)[0]
for i_999 in non_affich:
this_num = liste_in['num'][i_999]
dep_non_affich = ((liste_in['ref'] == this_num) & (liste_in['ref'] != 999))
if dep_non_affich.sum() != 0:
before = liste_in['ref'][dep_non_affich]
liste_in['ref'][dep_non_affich] = 999
log_.message('Before = {0}, After = {1}'.format(before, liste_in['ref'][dep_non_affich]), calling=self.calling)
the_end = False
if the_end:
break
where_restr = (((liste_in['lambda'] + liste_in['l_shift']) < np.max(self.w)) &
((liste_in['lambda'] + liste_in['l_shift']) > np.min(self.w)) &
(liste_in['ref'] != 999))
liste_out = liste_in.copy()[where_restr]
log_.message('Old size = {0}, new_size = {1}'.format(len(liste_in), len(liste_out)), calling=self.calling)
last_loop = 0
the_end = False
while True:
if last_loop == 1:
the_end = True
satellites = np.where(liste_out['ref'] != 0)[0]
for i_satellite in satellites[::-1]:
if liste_out['ref'][i_satellite] != -1:
raie_synth = liste_out[i_satellite].copy()
i_main_line = np.where(liste_in['num'] == raie_synth['ref'])[0]
if len(i_main_line) != 1:
if self.get_conf('warn_on_no_reference'):
log_.warn('Satellite sans raie de reference:{0} looking for {1}'.format(raie_synth['num'], raie_synth['ref']),
calling=self.calling)
raie_synth['i_rel'] = 0.0
raie_synth['comment'] = '!pas de ref' + raie_synth['comment']
else:
main_line = liste_in[i_main_line]
if main_line['ref'] != 0:
raie_synth['i_rel'] *= main_line['i_rel']
raie_synth['ref'] = main_line['ref']
if bool(self.conf['recursive_i_cor']):
raie_synth['i_cor'] *= main_line['i_cor']
raie_synth['profile'] = main_line['profile']
last_loop = 2
log_.debug('filling {0} with {1}'.format(liste_out[i_satellite]['num'],
main_line['num']),
calling = self.calling)
if last_loop == 1:
raie_synth['i_rel'] *= main_line['i_rel']
raie_synth['vitesse'] *= main_line['vitesse']
raie_synth['l_shift'] += main_line['l_shift']
if bool(self.conf['recursive_i_cor']):
raie_synth['i_cor'] *= main_line['i_cor']
raie_synth['profile'] = main_line['profile']
log_.debug('filling {0} with {1}, last loop'.format(liste_out[i_satellite]['num'],
main_line['num']),
calling = self.calling)
liste_out[i_satellite] = raie_synth
if last_loop == 0:
last_loop = 1
else:
last_loop = 0
if the_end:
break
tt = (np.abs(liste_out['i_rel']) > 1e-50)
log_.message('number of lines with i_rel > 1e-50: {0}'.format(tt.sum()), calling=self.calling)
return liste_out[tt]
def make_synth_test(self, liste_raies):
sp_theo = self.sp_theo.copy()
sp_synth = np.zeros_like(self.w)
sp_theo['spectr'] *= 0.0
sp_theo['correc'] *= 0.0
for raie in liste_raies:
#sp_tmp = self.profil_emis(self.w, raie, self.conf['lambda_shift'])
sp_tmp = self.get_profile(raie)
aire = np.trapz(sp_tmp, self.w)
if np.isfinite(aire) and (aire != 0.):
max_sp = np.max(sp_tmp)
if (np.abs(sp_tmp[0]/max_sp) > 1e-3) or (np.abs(sp_tmp[-1]/max_sp) > 1e-3):
log_.message('Area of {0} {1} could be wrong'.format(raie['id'].decode().strip(), raie['lambda']),
calling = self.calling)
intens_pic = raie['i_rel'] * raie['i_cor'] * self.aire_ref / aire
if raie['ref'] == 0:
tab_tmp = (sp_theo['raie_ref'].num == raie['num'])
else:
tab_tmp = (sp_theo['raie_ref'].num == raie['ref'])
this_line = intens_pic * sp_tmp
if not no_red_corr(raie):
this_line /= self.red_corr
if not is_absorb(raie):
sp_synth += this_line
sp_theo['spectr'][tab_tmp] += this_line
sp_theo['correc'][tab_tmp] = 1.0
tt = (sp_theo['correc'] != 0.)
for key in ('correc', 'raie_ref', 'spectr'):
sp_theo[key] = sp_theo[key][tt]
log_.message('Number of theoretical spectra: {0}'.format(len(sp_theo['correc'])), calling=self.calling)
return sp_theo, sp_synth
def make_synth(self, liste_raies, sp_theo):
sp_synth = np.zeros_like(self.w)
sp_theo['spectr'] *= 0.0
sp_theo['correc'] *= 0.0
#TODO parallelize this loop
for raie in liste_raies:
#sp_tmp = self.profil_emis(self.w, raie, self.conf['lambda_shift'])
sp_tmp = self.get_profile(raie)
aire = np.trapz(sp_tmp, self.w)
if np.isfinite(aire) and (aire != 0.):
max_sp = np.max(sp_tmp)
if (np.abs(sp_tmp[0]/max_sp) > 1e-3) or (np.abs(sp_tmp[-1]/max_sp) > 1e-3):
log_.message('Area of {0} {1} could be wrong'.format(raie['id'].decode().strip(), raie['lambda']),
calling = self.calling)
intens_pic = raie['i_rel'] * raie['i_cor'] * self.aire_ref / aire
log_.debug('{} aire = {}'.format(raie['num'], aire), calling=self.calling)
if raie['ref'] == 0:
tab_tmp = (sp_theo['raie_ref'].num == raie['num'])
else:
tab_tmp = (sp_theo['raie_ref'].num == raie['ref'])
this_line = intens_pic * sp_tmp
if not no_red_corr(raie):
this_line /= self.red_corr
if not is_absorb(raie):
sp_synth += this_line
sp_theo['spectr'][tab_tmp] += this_line
sp_theo['correc'][tab_tmp] = 1.0
log_.debug('doing line {}'.format(raie['num']), calling=self.calling)
tt = (sp_theo['correc'] != 0.)
for key in ('correc', 'raie_ref', 'spectr'):
sp_theo[key] = sp_theo[key][tt]
log_.message('Number of theoretical spectra: {0}'.format(len(sp_theo['correc'])), calling=self.calling)
return sp_theo, sp_synth
def make_sp_abs_original(self, sp_theo):
if sp_theo is None:
return None
sp_tau = np.zeros_like(self.w)
"""
WARNING check also misc.is_absorb(raie)
"""
index_abs = is_absorb(self.sp_theo['raie_ref'])
for i_abs in index_abs:
sp_tau += self.sp_theo['spectr'][i_abs] * self.sp_theo['correc'][i_abs]
sp_abs = np.exp(sp_tau)
if self.get_conf('fic_atm') is not None:
if type(self.get_conf('fic_atm')) not in (list, tuple):
self.conf['fic_atm'] = (self.conf['fic_atm'],)
self.conf['coeff_atm'] = (self.conf['coeff_atm'],)
self.conf['shift_atm'] = (self.conf['shift_atm'],)
if len(self.get_conf('fic_atm')) != len(self.get_conf('coeff_atm')):
log_.error('fic_atm number {} != coeff_atm number {}'.format(len(self.get_conf('fic_atm')), len(self.get_conf('coeff_atm'))),
calling = self.calling)
for fic_atm, coeff_atm, shift_atm in zip(self.get_conf('fic_atm'), self.get_conf('coeff_atm'), self.get_conf('shift_atm')):
try:
d = np.genfromtxt(fic_atm, dtype=None, names=('wl', 'abs'))
d['wl'] = vactoair(d['wl'], self.conf['vactoair_inf'], self.conf['vactoair_sup'])
if type(coeff_atm) not in (list, tuple):
coeff_atm = (coeff_atm, )
if type(shift_atm) not in (list, tuple):
shift_atm = (shift_atm, )
for c_atm, s_atm in zip(coeff_atm, shift_atm):
abs_interp = interpolate.interp1d(d['wl']*(1+s_atm/CST.CLIGHT*1e5), d['abs'])
sp_abs *= np.exp(np.log(abs_interp(self.w)) * c_atm)
except:
log_.warn('Problem in using data from {}'.format(fic_atm),
calling = self.calling)
# sp_abs /= self.red_corr
return sp_abs
def make_sp_abs(self, sp_theo, index_abs=None):
if sp_theo is None:
return None
sp_tau = np.zeros_like(self.w)
"""
WARNING check also misc.is_absorb(raie)
"""
if index_abs is None:
index_abs = is_absorb(self.sp_theo['raie_ref'])
for i_abs in index_abs:
sp_tau += self.sp_theo['spectr'][i_abs] * self.sp_theo['correc'][i_abs]
sp_abs = np.exp(sp_tau)
if self.get_conf('fic_atm') is not None:
if type(self.get_conf('fic_atm')) not in (list, tuple):
self.conf['fic_atm'] = (self.conf['fic_atm'],)
self.conf['coeff_atm'] = (self.conf['coeff_atm'],)
self.conf['shift_atm'] = (self.conf['shift_atm'],)
if len(self.get_conf('fic_atm')) != len(self.get_conf('coeff_atm')):
log_.error('fic_atm number {} != coeff_atm number {}'.format(len(self.get_conf('fic_atm')), len(self.get_conf('coeff_atm'))),
calling = self.calling)
for fic_atm, coeff_atm, shift_atm in zip(self.get_conf('fic_atm'), self.get_conf('coeff_atm'), self.get_conf('shift_atm')):
try:
d = np.genfromtxt(fic_atm, dtype=None, names=('wl', 'abs'))
d['wl'] = vactoair(d['wl'], self.conf['vactoair_inf'], self.conf['vactoair_sup'])
if type(coeff_atm) not in (list, tuple):
coeff_atm = (coeff_atm, )
if type(shift_atm) not in (list, tuple):
shift_atm = (shift_atm, )
for c_atm, s_atm in zip(coeff_atm, shift_atm):
abs_interp = interpolate.interp1d(d['wl']*(1+s_atm/CST.CLIGHT*1e5), d['abs'])
sp_abs *= np.exp(np.log(abs_interp(self.w)) * c_atm)
except:
log_.warn('Problem in using data from {}'.format(fic_atm),
calling = self.calling)
# sp_abs /= self.red_corr
return sp_abs
def make_filter_instr(self):
if self.sp_synth is None:
self.filter_ = None
return None
filter_size = 11
increm = 1.1
detect_limit = 1e-3 / np.max(self.sp_synth)
while True:
filter_size = int(filter_size * increm)
# if filter_size/2*2 == filter_size:
if filter_size%2 == 0:
filter_size += 1
if filter_size > self.n_lambda:
break
self.filter_ = self.profil_instr(filter_size, self.conf['instr_prof'], self.lambda_pix)
if (abs(self.filter_[0]) < detect_limit) and (abs(self.filter_[-1]) < detect_limit):
break
self.filter_ /= self.filter_.sum()
def convol_synth(self, cont, sp_synth):
if sp_synth is None:
return None
input_arr = (cont + sp_synth) * self.sp_abs
kernel = self.filter_
sp_synth_tot = convol(input_arr, kernel)
return sp_synth_tot
def rebin_on_obs(self):
if self.sp_synth_tot is None:
return None, None
resol = self.get_conf('resol', undefined = 1, message=None)
cont_lr = rebin(self.cont, resol)
sp_synth_lr = rebin(self.sp_synth_tot, resol)
return cont_lr, sp_synth_lr
def adjust(self):
spectr0 = self.sp_theo['spectr'].copy()
new_model_arr, errorMsg = self.read_model(self.fic_model)
if len(errorMsg) > 0:
return -1, errorMsg
new_cosmetik_arr, errorMsg = self.read_cosmetik()
if len(errorMsg) > 0:
return -1, errorMsg
new_sp_theo, new_liste_totale, new_liste_raies = self.append_lists(self.phyat_arr, new_model_arr, new_cosmetik_arr)
mask_diff = np.zeros(len(new_liste_raies), dtype=bool)
for key in ('lambda', 'l_shift', 'i_rel', 'i_cor', 'vitesse', 'profile'):
mask_diff = mask_diff | (new_liste_raies[key] != self.liste_raies[key])
log_.debug('{} differences in lines from files'.format(mask_diff.sum()),
calling=self.calling + ' adjust')
ref_diff = self.compare_profiles()
log_.debug('{} differences in profile'.format(len(ref_diff)),
calling=self.calling + ' adjust')
for im, l in enumerate(new_liste_raies.profile):
if np.str(l) in ref_diff:
mask_diff[im] = True
if mask_diff.sum() > 0 and len(mask_diff) == len(self.liste_raies):
old_sp_theo = self.sp_theo.copy()
if len(new_sp_theo) != len(old_sp_theo):
log_.error('The new list has different number of elements',
calling = self.calling+'.adjust')
liste_old_diff = self.liste_raies[mask_diff]
old_sp_theo, old_sp_synth = self.make_synth(liste_old_diff, old_sp_theo)
if len(ref_diff) > 0:
self.do_profile_dict()
liste_new_diff = new_liste_raies[mask_diff]
new_sp_theo, new_sp_synth = self.make_synth(liste_new_diff, new_sp_theo)
if log_.level >= 3:
print('Old values:')
self.print_line(liste_old_diff)
print('New values:')
self.print_line(liste_new_diff)
do_abs = False
self.sp_theo['spectr'] = spectr0
old_sp_abs = self.make_sp_abs(old_sp_theo)
for i_change in np.arange(len(new_sp_theo['raie_ref'])):
to_change = (self.sp_theo['raie_ref']['num'] == new_sp_theo['raie_ref'][i_change]['num'])
new_sp_theo['correc'][i_change] = self.sp_theo['correc'][to_change].copy()
old_sp_theo['correc'][i_change] = self.sp_theo['correc'][to_change].copy()
if (new_sp_theo['raie_ref'][i_change]['i_rel'] != old_sp_theo['raie_ref'][i_change]['i_rel']):
new_sp_theo['correc'][i_change] = 1.0
self.sp_theo['spectr'][to_change] += new_sp_theo['spectr'][i_change] - old_sp_theo['spectr'][i_change]
self.sp_theo['raie_ref'][to_change] = new_sp_theo['raie_ref'][i_change]
self.sp_theo['correc'][to_change] = new_sp_theo['correc'][i_change]
if is_absorb(new_sp_theo['raie_ref'][i_change]):
do_abs = True
else:
self.sp_synth += (new_sp_theo['correc'][i_change] * new_sp_theo['spectr'][i_change] -
old_sp_theo['correc'][i_change] * old_sp_theo['spectr'][i_change])
log_.message('change line {0}'.format(new_sp_theo['raie_ref'][i_change]['num']),
calling=self.calling + ' adjust')
if do_abs:
self.sp_abs = self.sp_abs/old_sp_abs*self.make_sp_abs(self.sp_theo)
self.liste_raies = new_liste_raies
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
log_.message('{} differences'.format(mask_diff.sum()), calling=self.calling + ' adjust')
return mask_diff.sum(), errorMsg
#self.update_plot2()
# def modif_intens(self, raie_num, fact):
#
# if fact <= 0.:
# log_.error('fact must be >0. {0}'.format(fact))
# return None
# a_changer = (self.sp_theo['raie_ref']['num'] == raie_num)
# if a_changer.sum() == 1:
# old_correc = self.sp_theo['correc'][a_changer][0]
# if is_absorb(self.sp_theo['raie_ref'][a_changer]):
# sp_abs_old = self.make_sp_abs(self.sp_theo[a_changer])
# self.sp_theo['correc'][a_changer] = fact
# sp_abs_new = self.make_sp_abs(self.sp_theo[a_changer])
# self.sp_abs = self.sp_abs - sp_abs_old + sp_abs_new
# else:
# self.sp_synth += (fact-old_correc) * self.sp_theo['spectr'][a_changer][0]
# self.sp_theo['correc'][a_changer] = fact
# #self.sp_theo['spectr'][a_changer] *= fact/old_correc # adding this break the tool
# self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
# self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
# self.update_plot2()
#
def print_line(self, line, sort='lambda', reverse=False):
if type(line) == np.core.records.recarray:
sorts = np.argsort(line[sort])
if reverse:
sorts = sorts[::-1]
for isort in sorts:
self.print_line(line[isort])
return
print('{0[num]:>14d} {1:9s}{0[lambda]:11.3f}{0[l_shift]:6.3f}{0[i_rel]:10.3e}{0[i_cor]:7.3f}'\
' {0[ref]:>14d}{0[profile]:5d}{0[vitesse]:7.2f}{2:1s}'.format(line, line['id'].decode(), line['comment'].decode().strip()))
def get_line_info(self, line_num, sort='lambda', reverse=False):
line = None
refline = None
satellites = None
refline_num = -1
to_select = (self.liste_raies['num'] == line_num)
if to_select.sum() > 0:
line = self.liste_raies[to_select][0]
refline_num = line['ref']
if line is None:
refline_num = line_num
to_select = (self.sp_theo['raie_ref']['num'] == refline_num)
if to_select.sum() > 0:
refline = self.sp_theo['raie_ref'][to_select][0]
to_select = (self.liste_raies['ref'] == refline_num)
satellites = self.liste_raies[to_select]
order = np.argsort(satellites[sort])
satellites = np.array(satellites)[order]
return line, refline, satellites
def read_satellites(self, filename, refline_num):
with open(filename, 'r') as f:
satellites = []
for eachline in f:
if int(self.fieldStrFromLine(eachline,'ref')) == refline_num:
satellites.append(eachline)
return satellites
def cosmetic_line_unchanged_old(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.read_line(self.phyat_file, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line = line.rstrip()
keys = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse']
v0 = {i: np.float(self.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0 == v1:
return True
else:
return False
def cosmetic_line_unchanged(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.get_line(self.phyat_arr, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
keys = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse']
v0 = {i: np.float(line[i]) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0 == v1:
return True
else:
return False
def cosmetic_line_ok_old(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.read_line(self.phyat_file, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line = line.rstrip()
keys = [ 'lambda', 'i_rel' ]
v0 = {i: np.float(self.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0['i_rel'] != v1['i_rel'] or v0['lambda'] != v1['lambda']:
log_.warn('Error in cosmetic file for line {}\n'.format(str(line_num)), calling=self.calling)
log_.warn('(cosmetic) ' + line_c, calling=self.calling)
log_.warn('(database) ' + line, calling=self.calling)
return False
else:
return True
def cosmetic_line_ok(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.get_line(self.phyat_arr, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line_c_i_rel = np.float(self.fieldStrFromLine(line_c, 'i_rel'))
line_c_lambda = np.float(self.fieldStrFromLine(line_c, 'lambda'))
if line['i_rel'] != line_c_i_rel or line['lambda'] != line_c_lambda:
log_.warn('Error in cosmetic file for line {}\n'.format(str(line_num)), calling=self.calling)
log_.warn('(cosmetic) {}'.format(line_c), calling=self.calling)
log_.warn('(database) {}'.format(line), calling=self.calling)
log_.warn('lambda rel error {}'.format((line['lambda'] / line_c_lambda)/line_c_lambda), calling=self.calling)
return False
else:
return True
def read_line(self, filename, line_num):
line = None
line_num_str = str(line_num)
k = len(line_num_str)
if not os.path.isfile(filename):
return None
else:
with open(filename, 'r') as f:
line = None
for eachline in f:
s = self.fieldStrFromLine(eachline,'num')
s = str(int(s))
if (int(s) == line_num) or (s[:k] == line_num_str and s[k:].strip('0') == ''):
line = eachline
break
log_.debug('Reading line {} from {}'.format(line_num, filename), calling=self.calling+'.read_line')
return line
def get_line(self, arr, line_num):
mask = arr['num'] == int(line_num)
if mask.sum() == 1:
line = arr[mask][0]
else:
line = None
return line
def fmt(self, field, value):
fmt = self.field_format[field]
return fmt.format(value)
def replace_field(self, line, field, value):
w = self.field_width[field]
if len(value) > w:
return None
elif len(value) < w:
a = self.field_align[field]
value = '{:{a}{w}}'.format(value)
j = self.field_pos[field]
k = j + w
line = line[:j] + value + line[k:]
return line
def remove_line(self, filename, line_num):
line = self.read_line(filename, line_num)
if line == None:
return False
if not os.path.isfile(filename):
return False
else:
f = open(filename, 'r')
lines = f.readlines()
f.close()
i = lines.index(line)
if i >= 0:
del lines[i]
with open(filename, 'w') as f:
f.writelines(lines)
return True
else:
return False
def replace_line(self, filename, line):
line_num = int(self.fieldStrFromLine(line,'num'))
if os.path.isfile(filename):
lineNotFound = True
with open(filename, 'r') as f:
lines = f.read().splitlines()
for i in range(0, len(lines)):
curr_line = lines[i]
if int(self.fieldStrFromLine(curr_line,'num')) == line_num:
lines[i] = line + '\n'
lineNotFound = False
else:
lines[i] = lines[i] + '\n'
if lineNotFound:
lines.append(line)
else:
lines = [line]
with open(filename, 'w') as f:
f.writelines(lines)
def fieldStrFromLine(self, lineOfFile, field):
if lineOfFile == None:
return None
fieldStr = None
if field in self.fields:
i = self.field_pos[field]
j = i+self.field_width[field]
fieldStr = lineOfFile[i:j]
return fieldStr
def line_info(self, line_num, sat_info=True, print_header=True, sort='lambda', reverse=False):
if print_header:
print('\n{0:-^45}'.format(' INFO LINES '))
if type(line_num) == type(()) or type(line_num) == type([]):
for line in line_num:
self.line_info(line, sat_info=sat_info, print_header=False)
return
to_print = (self.liste_raies['num'] == line_num)
if to_print.sum() == 1:
raie = self.liste_raies[to_print][0]
self.print_line(raie)
if raie['ref'] != 0 and sat_info:
print('\nSatellite line of:')
self.line_info(raie['ref'], print_header=False)
to_print = (self.sp_theo['raie_ref']['num'] == line_num)
if to_print.sum() > 0 and sat_info:
raie = self.sp_theo['raie_ref'][to_print][0]
self.print_line(raie)
print('')
satellites_tab = (self.liste_raies['ref'] == raie['num'])
Nsat = satellites_tab.sum()
if Nsat > 0:
print('{0} satellites'.format(Nsat))
self.print_line(self.liste_raies[satellites_tab], sort=sort, reverse=reverse)
if self.sp_theo['correc'][to_print][0] != 1.0:
print('Intensity corrected by {0}'.format(self.sp_theo['correc'][to_print][0]))
if print_header:
print('-'*45)
def get_ref_list(self, ions):
ref_list = []
for ion in ions:
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9).encode())[0]
if len(i_ion) == 0:
ref_list.append(-1)
for i in i_ion:
ref_list.append(self.sp_theo['raie_ref'][i][0])
return ref_list
def set_ion_list(self):
l = list(set(self.sp_theo['raie_ref']['id']))
self.ion_list = [i.decode('utf-8') for i in list(set(self.liste_raies['id']))]
self.true_ion_list = list(set([self.true_ion(ion) for ion in self.ion_list]))
def get_element_and_int_ion(self, ion):
ion = self.true_ion(ion)
k = ion.find('_')
if k > -1 and self.isRoman(ion[k+1:]):
element = ion[:k]
int_ion = self.roman_to_int(ion[k+1:])
else:
element = ion
int_ion = 999
return element, int_ion
def get_ion_int_from_ion_str(self, ion_str):
k_list = np.where(self.sp_theo['raie_ref']['id'] == self.fmt('id', ion_str).encode())[0]
if len(k_list) > 0:
return self.sp_theo['raie_ref'][k_list][0]['num']/1000000000
else:
return -1
def set_selected_ions_data(self):
color = 'dummy'
linestyle = 'dummy'
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
ions = []
selected_ions = self.get_conf('selected_ions')
for ion in selected_ions:
if ion not in ions:
ions.append(ion)
selected_ions = ions
colors = self.get_conf('color_selected_ions')
linestyles = [ 'solid', 'dashed', 'dashdot', 'dotted' ]
label_list = []
ref_list = []
proc_type = self.get_conf('process_code_format')
label_list = []
if self.get_conf('diff_lines_by') == 1:
for ion in selected_ions:
if ion == self.true_ion(ion) and not self.isPseudoIon(ion):
ion_int = self.get_ion_int_from_ion_str(ion)
for i in range(len(proc_type)):
ref_set = set()
for j in proc_type[i][0]:
proc = ion_int*10+j
i_list = np.where(self.liste_raies['num']/100000000 == proc)
if len(i_list) > 0:
ref_set = ref_set.union(self.liste_raies['ref'][[i_list][0]])
if len(ref_set) > 0:
ref_list = list(ref_set)
i_ion = self.get_ref_index_from_ref_list(ref_list)
label = proc_type[i][1].format(ion)
label_list.append([label, [ion], ref_list, i_ion, proc_type[i][0], color, linestyle])
else:
i_list = np.where(self.liste_raies['id'] == self.fmt('id', ion).encode())
if len(i_list) > 0:
ref_list = list(set(self.liste_raies['ref'][[i_list][0]]))
proc_set = set()
"""
for line in ref_list:
proc = int(str(line)[-9])
proc_set.add(proc)
"""
proc_set = {ion}
i_ion = self.get_ref_index_from_ref_list(ref_list)
label_list.append([ion, [self.true_ion(ion)], ref_list, i_ion, list(proc_set), color, linestyle])
else:
for ion in selected_ions:
ion = self.true_ion(ion)
all_ions = self.get_all_ions_from_ion(ion)
i_ion = set()
for subion in all_ions:
i_ion = i_ion.union(np.where(self.sp_theo['raie_ref']['id'] == subion.ljust(9).encode())[0])
i_ion = list(i_ion)
ref_list = []
for i in range(0, len(i_ion)):
ref_line = self.sp_theo['raie_ref'][i_ion[i]]['num']
ref_list.append(ref_line)
if self.get_conf('diff_lines_by') == 0:
for i in range(0, len(i_ion)):
ref_line = ref_list[i]
refline_str = str(ref_line).strip('0')
label = ion + ' (' + refline_str + ')'
label_list.append([label, [ion], [ref_line], list(i_ion[i:i+1]), [], color, linestyle])
else:
label_list.append([ion, [ion], ref_list, list(i_ion), [], color, linestyle])
# sorting
if self.get_conf('selected_ions_sort'):
for i in range(0,len(label_list)-1):
label1 = label_list[i][pos_label]
true_ion1 = self.true_ion(label1)
element1, int_ion1 = self.get_element_and_int_ion(true_ion1)
for j in range(i+1, len(label_list)):
label2 = label_list[j][pos_label]
true_ion2 = self.true_ion(label2)
element2, int_ion2 = self.get_element_and_int_ion(true_ion2)
if (element2 < element1) or ((element2 == element1) and (int_ion2 < int_ion1)) or ((true_ion2 == true_ion1) and (label2 < label1)):
prov = label_list[i]
label_list[i] = label_list[j]
label_list[j] = prov
label1 = label2
true_ion1 = true_ion2
element1 = element2
int_ion1 = int_ion2
if self.get_conf('diff_lines_by') == 3:
i = 0
while i < len(label_list)-1:
ion = label_list[i][pos_ion][0]
ion_set = set()
ion_set.add(ion)
element = self.element(ion)
j = i+1
while j < len(label_list):
ion2 = label_list[j][pos_ion][0]
element2 = self.element(ion2)
if element2 == element:
ion_set.add(str(ion2))
ref_list = list(set(label_list[i][pos_ref] + label_list[j][pos_ref]))
i_ion = list(set(label_list[i][pos_i_ion] + label_list[j][pos_i_ion]))
label_list.pop(j)
label_list[i][pos_ion] = list(ion_set)
label_list[i][pos_ref] = ref_list
label_list[i][pos_i_ion] = i_ion
else:
j += 1
i += 1
for i in range(len(label_list)):
ion_list = label_list[i][pos_ion]
ion_label = label_list[i][pos_label]
ion_list.sort()
ion = ion_list[0]
for j in range(1, len(ion_list)):
s = ion_list[j]
k = s.index('_')
if k > -1:
s = s[k+1:]
ion = ion + '+' + s
label_list[i][pos_label] = ion
for k in range(0, len(label_list)):
color = colors[k%len(colors)]
linestyle = linestyles[(k//len(colors))%len(linestyles)]
label_list[k][pos_color] = color
label_list[k][pos_linestyle] = linestyle
self.selected_ions_data = label_list
return
def get_refline_lists(self, ions):
ref_code_list = []
ref_index_list = []
ref_label_list = []
for ion in ions:
ion = self.true_ion(ion)
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9).encode())[0]
if len(i_ion) == 0:
ref_code_list.append(-1)
ref_index_list.append(-1)
ref_label_list.append(ion.replace('_',' ') + ' (no lines)')
for i in i_ion:
ref_code_list.append(self.sp_theo['raie_ref'][i][0])
ref_index_list.append(i)
if len(i_ion) == 1:
ref_label_list.append(ion.replace('_',' '))
else:
ref_label_list.append(ion.replace('_',' ')+ ' - ' + str(np.where(i_ion==i)[0][0]))
# ref_label_list.append(ion.replace('_',' ')+ ' - ' + str(self.sp_theo['raie_ref'][i][0])[:-8])
return ref_code_list, ref_index_list, ref_label_list
def get_ref_index_list(self, ions):
ref_index_list = []
for ion in ions:
ion = self.true_ion(ion)
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9).encode())[0]
for i in i_ion:
ref_index_list.append(i)
return ref_index_list
def get_ref_index_from_ref_list(self, ref_list):
ref_index_list = []
for ref_num in ref_list:
i_ion = np.where(self.sp_theo['raie_ref']['num'] == ref_num)[0]
for i in i_ion:
ref_index_list.append(i)
return ref_index_list
def get_line_from_reduce_code(self, code_str):
if not code_str.isdigit():
line = None
else:
line = self.read_line(self.phyat_file, int(code_str))
if line is None:
line = self.read_line(self.fic_model, int(code_str))
return line
def get_refline_from_code(self, code_str):
s = ''
for line in self.liste_raies:
if code_str == str(line[0]):
s = line['ref']
return s
def get_ion_from_code(self,code_str):
s = ''
for line in self.liste_raies:
if code_str == str(line[0]):
s = line['id']
return s
def isRoman(self, s):
isRom = True
if len(s.strip()) == 0:
isRom = False
else:
for ch in s:
if ch not in ['I', 'V', 'X', 'L']:
isRom = False
return isRom
def roman_to_int(self, s):
x = {'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1}
s = s.strip()
if len(s) == 0:
return -1
n = x[s[-1]]
n += sum([x[i] if x[i] >= x[j] else -x[i] for i, j in zip(s, s[1:])])
return n
def isPseudoIon(self, ion):
ion = self.true_ion(ion)
k = ion.rfind('_')
if k > 0 and self.isRoman(ion[k+1:]):
return False
else:
return True
def true_ion(self, ion):
k = ion.find('_')
if k > 0:
s = ion[k+1:]
while len(s) > 0 and s[-1] not in [ 'I', 'V', 'X' ]:
s = s[:-1]
if self.isRoman(s):
ion = ion[:k+1] + s
return ion.strip()
def get_all_ions_from_ion(self, ion):
ion = self.true_ion(ion)
ion_list = [ion]
k = len(ion)
for s in self.ion_list:
if len(s) > k and s[:k] == ion and s[k] not in [ 'I', 'V', 'X' ]:
ion_list.append(s.strip())
return list(set(ion_list))
def element(self, ion_str):
k = ion_str.find('_')
if k > -1:
return ion_str[:k]
else:
return ion_str.strip()
def get_ions_from_element(self, elem):
def charge(ion):
s = ion[ion.index('_')+1:]
if self.isRoman(s):
return self.roman_to_int(s)
else:
return s
ion_list = []
for line in self.liste_raies:
ion = str(line['id'])
ion = line['id'].decode()
if elem == self.element(ion):
ion_list.append(self.true_ion(ion))
ion_list = list(set(ion_list))
ion_list.sort(key=charge)
return ion_list
def save_lines(self):
if self.get_conf('show_selected_intensities_only'):
cut = self.get_conf('cut_plot2')
else:
cut = 0.0
ref_list = self.get_ref_list(self.get_conf('selected_ions'))
sort_list = [ 'lambda', 'i_rel', 'id' ]
k = self.get_conf('save_lines_sort')
sort = sort_list[k//2]
filename = self.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
sep = ' '
end = '\n'
if extension == 'tex':
sep = ' & '
end = ' {0}{0}{1}'.format('\\', '\n')
elif extension == 'csv':
sep = ' ; '
end = '\n'
sorts = np.argsort(self.liste_raies[sort])
if k%2 == 1:
sorts = sorts[::-1]
with open(filename, 'w') as f:
field_print = self.get_conf('save_lines_fields')
n = len(field_print)
if self.get_conf('save_lines_header'):
s = ''
for item in field_print:
f.write('{0:9s} : {1:>}\n'.format(item, self.field_tip[item]))
for item in field_print:
width = self.field_width[item]
align = '<'
if ( item == field_print[n-1] ):
add_s = end
else:
add_s = sep
s = s + str('{:{a}{w}s}{}'.format(item, add_s, a=align, w=width))
f.write('\n'+s+'\n')
for i_sort in sorts:
line = self.liste_raies[i_sort]
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
i_tot = line['i_rel'] * line['i_cor']
#if (abs(i_rel) > cut) and ( not self.get_conf('show_selected_ions_only') or line['ref'] in ref_list):
if (abs(i_tot) > cut) and ( not self.get_conf('show_selected_ions_only') or line['ref'] in ref_list):
s = ''
n = len(field_print)
for item in field_print:
thisformat = self.field_format[item]
if item == 'l_tot':
r = wl
elif item == 'i_tot':
r = i_tot
else:
r = line[item]
if item == 'l_shift':
s = s + ' '
s = s + str(thisformat.format(r))
if ( item == field_print[n-1] ):
s = s + end
else:
s = s + sep
f.write(s)
def plot1(self):
f, ax = plt.subplots()
ax.step(self.w_ori, self.f_ori, where='mid', label='Obs')
ax.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth')
ax.legend()
return f, ax
def plot2(self, hr=False, cut=None, split=False, do_ax2 = True, do_ax3 = True,
do_buttons=True, xlims=None, fontsize=12, legend_loc=1, fig=None,
magenta_ref=None, magenta_lab=None,
cyan_ref = None, cyan_lab=None, call_init_axes=True):
log_.message('entering plots, ID(ax1)'.format(id(self.fig1)), calling=self.calling)
self.hr = hr
self.split = split
self.do_ax2 = do_ax2
self.do_buttons = do_buttons
self.do_ax3 = do_ax3
if cut is not None:
self.set_conf('cut_plot2', cut)
self.ax2_fontsize = fontsize
self.legend_loc = legend_loc
if magenta_ref is not None:
self.plot_magenta = magenta_ref
self.label_magenta = magenta_lab
else:
self.plot_magenta = self.get_conf('plot_magenta')
self.label_magenta = self.get_conf('label_magenta')
if cyan_ref is not None:
self.plot_cyan = cyan_ref
self.label_cyan = cyan_lab
else:
self.plot_cyan = self.get_conf('plot_cyan')
self.label_cyan = self.get_conf('label_cyan')
if fig is None:
self.fig1 = plt.figure()
log_.message('creating new figure ID {}'.format(id(self.fig1)), calling=self.calling)
else:
self.fig1 = fig
self.fig1.clf()
log_.message('using argument figure ID: {} {}'.format(id(self.fig1), id(fig)), calling=self.calling)
if split:
if do_ax2:
self.fig2 = plt.figure()
if do_ax3:
self.fig3 = plt.figure()
self.ax1 = self.fig1.add_subplot(111)
if do_ax2:
self.ax2 = self.fig2.add_subplot(111, sharex=self.ax1)
if do_ax3:
self.ax3 = self.fig3.add_subplot(111, sharex=self.ax1)
else:
n_subplots = 1
i_ax2 = 2
i_ax3 = 2
if do_ax2:
n_subplots += 1
i_ax3 += 1
if do_ax3:
n_subplots += 1
self.ax1 = self.fig1.add_subplot(n_subplots, 1, 1)
if do_ax2:
self.ax2 = self.fig1.add_subplot(n_subplots, 1, i_ax2, sharex=self.ax1)
if do_ax3:
self.ax3 = self.fig1.add_subplot(n_subplots, 1, i_ax3, sharex=self.ax1)
self.plot_ax1(self.ax1, xlims=xlims)
if do_ax2:
self.plot_ax2(self.ax2)
if do_ax3:
self.plot_ax3(self.ax3)
if do_buttons:
self._make_buttons(split=split)
if call_init_axes:
self.init_axes()
self.restore_axes()
plt.subplots_adjust(hspace=0.0)
def plot_ax1(self, ax, xlims=None, show_legend=True):
if self.show_uncor_spec:
ax.step(self.w_obs, self.f_ori, where='mid', label='Uncorr', c='yellow', linewidth=1.5)
ax.step(self.w_ori, self.f_ori, where='mid', label='Obs', c='red', linewidth=1.5)
if self.sp_synth_lr is None:
return
self.ax1_line_synth = ax.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth', c='blue', linewidth=1.5)[0]
if self.hr:
ax.step(self.w, self.sp_synth, where='mid', c='green')
selected_ions = self.get_conf('selected_ions')
self.set_selected_ions_data()
label_list = self.selected_ions_data
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
if selected_ions != [] and self.get_conf('plot_lines_of_selected_ions'):
j = self.get_conf('index_of_current_ion')
if j in range(0, len(selected_ions)):
ions = []
ions.append( selected_ions[j] )
else:
ions = selected_ions
if j in range(0, len(label_list)):
label_list = label_list[j:j+1]
for item in label_list:
label = item[pos_label].replace('_',' ')
i_ion = item[pos_i_ion]
color = item[pos_color]
linestyle = item[pos_linestyle]
y = 0
for i in range(0,len(i_ion)):
y = y + self.sp_theo['spectr'][i_ion][i]
ax.step(self.w, self.cont+y, where='mid', c=color, label=label, linestyle=linestyle )[0]
if show_legend:
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('ax1 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
# mvfc: old routine, still needed to run without Qt4 or Qt5
def plot_ax2(self, ax):
if self.sp_synth_lr is None:
return
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
if (abs(i_rel) > self.get_conf('cut_plot2')):
ax.axvline( wl, ymin=0.2, ymax=0.8, color = 'blue', linestyle = 'solid', linewidth = 1.5 )
# ax.plot([wl, wl], [0, 1], color='blue')
# ax.text(wl, -0.2, '{0} {1:7.4f}'.format(line['id'], i_rel),
# rotation='vertical', fontsize=self.ax2_fontsize).set_clip_on(True)
log_.debug('ax2 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def plot_line_ticks(self, ax, y1, y2, wmin=0., wmax=20000., show_legend=True):
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
dy = (y2-y1)*0.15
#dy = (y2-y1)/2
if self.sp_synth_lr is None:
return
lcolor = self.get_conf('line_tick_color')
label_list = self.selected_ions_data
j = self.get_conf('index_of_current_ion')
if j in range(0, len(label_list)):
label_list = label_list[j:j+1]
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
i_tot = line['i_rel'] * line['i_cor']
if (wmin < wl) and (wl < wmax) and ((abs(i_tot) > self.get_conf('cut_plot2')) or ( not self.get_conf('show_selected_intensities_only'))):
if not self.get_conf('show_selected_ions_only'):
ax.axvline( wl, ymin=y1+dy, ymax=y2-dy, color = lcolor, linestyle = 'solid' )
#ax.axvline( wl, ymin=y1+dy, ymax=y2, color = lcolor, linestyle = 'solid' )
refline = line['ref']
ion = line['id'].decode().strip()
proc = int(str(line['num'])[-9])
for item in label_list:
label = item[pos_label]
ion_list = item[pos_ion]
ref_list = item[pos_ref]
color = item[pos_color]
linestyle = item[pos_linestyle]
proc_list = item[pos_proc]
if refline in ref_list and self.true_ion(ion) in ion_list and ( self.get_conf('diff_lines_by') != 1 or proc in proc_list or ion in proc_list ):
ax.axvline( wl, ymin=y1, ymax=y2, color = color, linestyle = linestyle, linewidth = 1.5 )
# To add ticks to the legend of the figure when the spectrum of the selected ions are not plotted
if show_legend:
if not self.get_conf('plot_lines_of_selected_ions') or self.get_conf('line_tick_ax') == 1:
for item in label_list:
label = item[pos_label].replace('_',' ')
color = item[pos_color]
linestyle = item[pos_linestyle]
ax.step( [0,0], [0,100], color = color, linestyle = linestyle, label = label )
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('Line ticks drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def plot_line_ticks_for(self, satellites, ion, line_num, refline, ax, y1, y2, wmin=0., wmax=20000., addGreenTickToLegend=True):
if self.sp_synth_lr is None:
return
l_shift_refline = np.float(self.fieldStrFromLine(refline,'l_shift'))
ion = ion.replace('_',' ').strip()
line_num = line_num.strip().strip('0')
label = ion + ' (' + line_num + ')'
color = 'green'
for line in satellites:
wl = np.float(self.fieldStrFromLine(line,'lambda')) + \
np.float(self.fieldStrFromLine(line,'l_shift')) + \
self.conf['lambda_shift'] + l_shift_refline
if (wmin < wl) and (wl < wmax):
ax.axvline( wl, ymin=y1, ymax=y2, color = color, linestyle = 'solid', linewidth = 2.5 )
if addGreenTickToLegend:
ax.step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
log_.debug('Line ticks drawn on ax ID {} for line {}'.format(id(ax), line_num), calling=self.calling)
def plot_ax3(self, ax, show_legend=True):
if self.sp_synth_lr is not None:
ax.plot((0, 1e10), (0.0, 0.0), c='green')
#ax.step(self.w, self.f - self.cont, where='mid', c = 'red', linestyle='--')
#ax.step(self.w_ori, self.f_ori - self.cont_lr, where='mid', label='Obs-Cont', c='red', linewidth=2.0, alpha=0.5)
#ax.step(self.w, self.sp_abs*5, where='mid', label='Abs', c='magenta')
ax.step(self.w_ori, self.f_ori - self.cont_lr, where='mid', label='Obs-Cont', c=(1.0, 0.0, 0.0, 0.5), linewidth=1.0)
ax.step(self.w_ori, self.f_ori - self.sp_synth_lr, where='mid', label='Obs-Synth', c='blue', linewidth=1.5)[0]
if show_legend:
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('ax3 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def update_plot2(self):
if self.ax1 is None:
return
if self.sp_synth_lr is None:
return
self.ax1_line_synth.remove()
self.ax1_line_synth = self.ax1.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth', c='blue', linewidth=1.5)[0]
self.ax1.legend(loc=self.legend_loc)
if self.plot_magenta is not None:
try:
self.ax1_line_magenta.remove()
except:
pass
i_magenta = np.where(self.sp_theo['raie_ref']['num'] == self.plot_magenta)[0]
if self.label_magenta is None:
self.label_magenta = self.sp_theo['raie_ref'][i_magenta]['id']
if len(i_magenta) == 1:
self.ax1_line_magenta = self.ax1.step(self.w, self.cont+self.sp_theo['spectr'][i_magenta][0], where='mid', c='magenta',
label=self.label_magenta, linestyle='--')[0]
if self.plot_cyan is not None:
try:
self.ax1_line_cyan.remove()
except:
pass
i_cyan = np.where(self.sp_theo['raie_ref']['num'] == self.plot_cyan)[0]
if self.label_cyan is None:
self.label_cyan = self.sp_theo['raie_ref'][i_cyan]['id']
if len(i_cyan) == 1:
self.ax1_line_cyan = self.ax1.step(self.w, self.cont+self.sp_theo['spectr'][i_cyan][0], where='mid', c='cyan',
label=self.label_cyan, linestyle='-')[0]
for i in np.arange(len(self.ax2.texts)):
self.ax2.texts.pop()
for i in np.arange(len(self.ax2.lines)):
self.ax2.lines.pop()
i_max = np.max(self.liste_raies['i_rel'])
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
if (abs(i_rel) > self.get_conf('cut_plot2')) & (wl > self.ax2.get_xlim()[0]) & (wl < self.ax2.get_xlim()[1]):
self.ax2.axvline( wl, ymin=0.2, ymax=0.8, color = 'blue', linestyle = 'solid', linewidth = 1.5 )
#self.ax2.plot([wl, wl], [0, 1], color='blue')
#self.ax2.text(wl, -0.2, '{0} {1:7.4f}'.format(line['id'], i_rel),
# rotation='vertical', fontsize=self.ax2_fontsize)
#self.ax2.set_ylim((-1.5, 1))
if self.do_ax3:
#self.ax3_line_diff.remove()
self.ax3_line_diff = self.ax3.step(self.w_ori, self.f_ori - self.sp_synth_lr, where='mid', c='blue')[0]
self.fig1.canvas.draw()
def init_axes(self):
self.x_plot_lims = self.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.w), np.max(self.w))
self.y1_plot_lims = self.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
if self.sp_synth_lr is None:
self.y1_plot_lims = (np.min(self.f), np.max(self.f))
else:
mask = (self.w_ori > self.x_plot_lims[0]) & (self.w_ori < self.x_plot_lims[1])
self.y1_plot_lims = (np.min(self.sp_synth_lr[mask]), np.max(self.sp_synth_lr[mask]))
self.y2_plot_lims = self.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1,5)
self.y3_plot_lims = self.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.w_ori > self.x_plot_lims[0]) & (self.w_ori < self.x_plot_lims[1])
self.y3_plot_lims = (np.min((self.f - self.cont)[mask]), np.max((self.f - self.cont)[mask]))
log_.message('Axes initialized', calling=self.calling)
self.print_axes()
def save_axes(self):
if self.ax1 is not None:
self.x_plot_lims = self.ax1.get_xlim()
self.y1_plot_lims = self.ax1.get_ylim()
else:
self.x_plot_lims = None
self.y1_plot_lims = None
if self.ax2 is not None:
self.y2_plot_lims = self.ax2.get_ylim()
else:
self.y2_plot_lims = None
if self.ax3 is not None:
self.y3_plot_lims = self.ax3.get_ylim()
else:
self.y3_plot_lims = None
#log_.message('Axes saved', calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.ax1 is not None:
self.ax1.set_xlim(self.x_plot_lims)
log_.message('X-axes restored to {}'.format(self.ax1.get_xlim()), calling=self.calling)
else:
log_.message('ax1 is None', calling=self.calling)
else:
log_.message('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.ax1 is not None:
self.ax1.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.ax2 is not None:
self.ax2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.ax3 is not None:
self.ax3.set_ylim(self.y3_plot_lims)
log_.message('Axes restored', calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('{} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
def apply_post_proc(self):
if self.post_proc_file is not None and self.post_proc_file != "":
try:
user_module = {}
execfile(os.path.abspath(self.directory)+'/'+self.post_proc_file, user_module)
self.post_proc = user_module['post_proc']
log_.message('function post_proc read from {}'.format(self.post_proc_file), calling=self.calling)
except:
self.post_proc = None
log_.warn('function post_proc NOT read from {}'.format(self.post_proc_file), calling=self.calling)
if self.post_proc is not None:
self.post_proc(self.fig1)
def rerun(self):
self.run(do_synth = True, do_read_liste = True, do_profiles=True)
def replot2(self):
self.save_axes()
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.plot2(hr=self.hr, cut=self.get_conf('cut_plot2'), split=self.split,
do_ax2=self.do_ax2, do_ax3=self.do_ax3, do_buttons=self.do_buttons,
fontsize=self.ax2_fontsize, legend_loc=self.legend_loc, fig=self.fig1, call_init_axes=False)
self.fig1.canvas.draw()
def _make_buttons(self, split):
if split:
self.fig1.subplots_adjust(bottom=0.3)
else:
self.fig1.subplots_adjust(bottom=0.2)
self.buttons = {}
b_w = 0.06
b_h = 0.06
b_x0 = 0.05
b_y0 = 0.02
ax_zxm = self.fig1.add_axes([b_x0, b_y0, b_w, b_h])
self.buttons['ZX-'] = Button(ax_zxm, 'ZX-')
self.buttons['ZX-'].on_clicked(self._ZoomZXm)
ax_zxp = self.fig1.add_axes([b_x0, b_y0 + b_h, b_w, b_h])
self.buttons['ZX+'] = Button(ax_zxp, 'ZX+')
self.buttons['ZX+'].on_clicked(self._ZoomZXp)
ax_zym = self.fig1.add_axes([b_x0 + b_w, b_y0, b_w, b_h])
self.buttons['Zy-'] = Button(ax_zym, 'ZY-')
self.buttons['Zy-'].on_clicked(self._ZoomZYm)
ax_zyp = self.fig1.add_axes([b_x0 + b_w, b_y0 + b_h, b_w, b_h])
self.buttons['Zy+'] = Button(ax_zyp, 'ZY+')
self.buttons['Zy+'].on_clicked(self._ZoomZYp)
ax_sxm = self.fig1.add_axes([b_x0 + 2*b_w, b_y0, b_w, b_h])
self.buttons['SX-'] = Button(ax_sxm, 'SX-')
self.buttons['SX-'].on_clicked(self._ZoomSXm)
ax_sxp = self.fig1.add_axes([b_x0 + 2*b_w, b_y0 + b_h, b_w, b_h])
self.buttons['SX+'] = Button(ax_sxp, 'SX+')
self.buttons['SX+'].on_clicked(self._ZoomSXp)
ax_sym = self.fig1.add_axes([b_x0 + 3*b_w, b_y0, b_w, b_h])
self.buttons['Sy-'] = Button(ax_sym, 'SY-')
self.buttons['Sy-'].on_clicked(self._ZoomSYm)
ax_syp = self.fig1.add_axes([b_x0 + 3*b_w, b_y0 + b_h, b_w, b_h])
self.buttons['Sy+'] = Button(ax_syp, 'SY+')
self.buttons['Sy+'].on_clicked(self._ZoomSYp)
ax_curson = self.fig1.add_axes([b_x0 + 5*b_w, b_y0, 2*b_w, b_h])
self.buttons['CursOn'] = Button(ax_curson, 'CursOn')
self.buttons['CursOn'].on_clicked(self._cursOn)
ax_curson = self.fig1.add_axes([b_x0 + 5*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['CursOff'] = Button(ax_curson, 'CursOff')
self.buttons['CursOff'].on_clicked(self._cursOff)
ax_rerun = self.fig1.add_axes([b_x0 + 7*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['Rerun'] = Button(ax_rerun, 'Rerun')
self.buttons['Rerun'].on_clicked(self._call_rerun)
ax_adjust = self.fig1.add_axes([b_x0 + 7*b_w, b_y0, 2*b_w, b_h])
self.buttons['Adjust'] = Button(ax_adjust, 'Adjust')
self.buttons['Adjust'].on_clicked(self._call_adjust)
ax_readobs = self.fig1.add_axes([b_x0 + 9*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['ReadObs'] = Button(ax_readobs, 'ReadObs')
self.buttons['ReadObs'].on_clicked(self._call_readobs)
ax_replot = self.fig1.add_axes([b_x0 + 9*b_w, b_y0, 2*b_w, b_h])
self.buttons['RePlot'] = Button(ax_replot, 'RePlot')
self.buttons['RePlot'].on_clicked(self._call_replot)
def _ZoomZXm(self, event=None):
self._Zoom('ZX-')
def _ZoomZXp(self, event=None):
self._Zoom('ZX+')
def _ZoomZYm(self, event=None):
self._Zoom('ZY-')
def _ZoomZYp(self, event=None):
self._Zoom('ZY+')
def _ZoomSXm(self, event=None):
self._Zoom('SX-')
def _ZoomSXp(self, event=None):
self._Zoom('SX+')
def _ZoomSYm(self, event=None):
self._Zoom('SY-')
def _ZoomSYp(self, event=None):
self._Zoom('SY+')
def _Zoom(self, zoom_direction):
"""
zoom_direction = 'ABC', with A in ['S', 'Z'], B in ['X', 'Y'], and C in ['+', '-']
"""
xmin, xmax = self.ax1.get_xlim()
dx = xmax - xmin
ymin, ymax = self.ax1.get_ylim()
dy = ymax - ymin
if zoom_direction[0] == 'S':
if zoom_direction[2] == '+':
coeff = self.zoom_fact
elif zoom_direction[2] == '-':
coeff = -self.zoom_fact
if zoom_direction[1] == 'X':
xmin += coeff * dx
xmax += coeff * dx
elif zoom_direction[1] == 'Y':
ymin += coeff * dy
ymax += coeff * dy
elif zoom_direction[0] == 'Z':
if zoom_direction[2] == '+':
coeff = self.zoom_fact
elif zoom_direction[2] == '-':
coeff = -self.zoom_fact
if zoom_direction[1] == 'X':
xmin += coeff * dx
xmax -= coeff * dx
elif zoom_direction[1] == 'Y':
ymin += coeff * dy
ymax -= coeff * dy
self.ax1.set_xlim((xmin, xmax))
self.ax1.set_ylim((ymin, ymax))
self.fig1.canvas.draw()
def _cursOn(self, event=None):
self._cid = self.fig1.canvas.mpl_connect('button_press_event', self._curs_onclick)
log_.message('Cursor ON', calling=self.calling)
def _cursOff(self, event=None):
if self._cid is not None:
self.fig1.canvas.mpl_disconnect(self._cid)
log_.message('Cursor OFF', calling=self.calling)
def get_nearby_lines(self, w1, w2, do_print=True, sort='i_tot', reverse=True):
if w1 == None or w2 == None:
return None
w = (w1 + w2)/2
w_lim = abs(w2 - w1)/2
tt = (np.abs(self.liste_raies['lambda'] + self.liste_raies['l_shift'] + self.conf['lambda_shift'] - w) < w_lim)
nearby_lines = self.liste_raies[tt]
i_tot = nearby_lines['i_rel']*nearby_lines['i_cor']
if sort == 'i_tot':
sorts = np.argsort(i_tot)
else:
sorts = np.argsort(nearby_lines[sort])
if reverse:
sorts = sorts[::-1]
nearby_lines = np.array(nearby_lines)[sorts]
if tt.sum() > 0:
if do_print:
print('\n{0:-^45}'.format(' CURSOR on {0:.3f} '.format(w)))
self.print_line(self.liste_raies[tt])
print('-'*45)
return nearby_lines
def nearby_lines(self, event, do_print=True, sort='i_tot', reverse=True):
nearby_lines = None
w = event.xdata
try:
if (w > self.ax1.get_xlim()[1]) or (w < self.ax1.get_xlim()[0]) or (event.button == 2):
self._cursOff()
return None
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
try:
if event.button in (1,3):
if self.firstClick:
self.cursor_w0 = w
self.firstClick = False
else:
self.cursor_w1 = self.cursor_w0
self.cursor_w2 = w
self.firstClick = True
w = (self.cursor_w1+self.cursor_w2)/2
w_lim = self.cursor_width * (self.ax1.get_xlim()[1] - self.ax1.get_xlim()[0])
if abs(self.cursor_w2-w) < w_lim/10:
self.cursor_w1 = self.limit_sp[0]
self.cursor_w2 = self.limit_sp[1]
else:
if abs(self.cursor_w2-w) > w_lim:
w_lim = abs(self.cursor_w2-w)
w_lim = abs(self.cursor_w2-w)
self.cursor_w1 = w - w_lim
self.cursor_w2 = w + w_lim
nearby_lines = self.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=do_print)
return nearby_lines
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
def _curs_onclick(self, event):
wl = event.xdata
try:
if (wl > self.ax1.get_xlim()[1]) or (wl < self.ax1.get_xlim()[0]) or (event.button == 2):
self._cursOff()
return None
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
try:
if event.button in (1,3):
wl_lim = self.cursor_width * (self.ax1.get_xlim()[1] - self.ax1.get_xlim()[0])
tt = (np.abs(self.liste_raies['lambda'] + self.liste_raies['l_shift'] + self.conf['lambda_shift'] - wl) < wl_lim)
if tt.sum() > 0:
print('\n{0:-^45}'.format(' CURSOR on {0:.3f} '.format(wl)))
self.print_line(self.liste_raies[tt])
print('-'*45)
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
def _call_adjust(self, event=None):
self.adjust()
self.update_plot2()
def _call_rerun(self, event=None):
self.rerun()
self.replot2()
def _call_readobs(self, event=None):
self.init_obs(spectr_obs=None, sp_norm=None, obj_velo=None, limit_sp=self.limit_sp)
self.init_red_corr()
self.make_continuum()
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
self.replot2()
def _call_replot(self, event=None):
self.replot2()
def plot_indiv_sp(self, y_shift_coeff=None, legend_zoom=.115):
"""
Seems buggy, loosing ax1.xlim when plotted.
"""
if y_shift_coeff is None:
y_shift_coeff = np.max(self.sp_theo['spectr'])/100.
fig_indiv_spectra = plt.figure()
if self.ax1 is not None:
ax_is = fig_indiv_spectra.add_subplot(111, sharex=self.ax1)
else:
ax_is = fig_indiv_spectra.add_subplot(111)
for i in np.arange(self.n_sp_theo):
label = self.sp_theo['raie_ref']['id'][i]
ax_is.plot(self.w, self.sp_theo['spectr'][i] + y_shift_coeff*(self.n_sp_theo - i), label=label)
ax_is.set_ylim((0, y_shift_coeff*(i+2)))
ax_is.legend(fontsize= i * legend_zoom )
self.fig_indiv_spectra = fig_indiv_spectra
self.ax_indiv_spectra = ax_is
def plot_profile(self):
self.fig_prof = plt.figure()
self.ax_prof = plt.semilogy(self.filter_)
def main_loc(config_file):
"""
In case of not having Qt4.
Usage:
from pyssn.core.spectrum import main_loc
sp = main_loc('./s6302_n_c_init.py')
"""
sp = spectrum(config_file=config_file)
fig = plt.figure(figsize=(20, 7))
sp.plot2(fig=fig)
sp.save_axes()
plt.show()
return sp
def main():
"""
"""
parser = get_parser()
args = parser.parse_args()
if args.file is None:
log_.error('A file name is needed, use option -f')
log_.level = args.verbosity
sp = spectrum(config_file=args.file, post_proc_file=args.post_proc)
fig = plt.figure(figsize=(20, 7))
sp.plot2(fig=fig)
sp.apply_post_proc()
plt.show()
| Morisset/pySSN | pyssn/core/spectrum.py | Python | gpl-3.0 | 124,663 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
#!/usr/bin/python
def factors(b):
for i in range(1, b+1):
if (b % i == 0):
print(i)
if __name__ == '__main__':
b = input('Your Number Please: ')
b = float(b)
if (b > 0) and b.is_integer():
factors(int(b))
else:
print('Please enter a positive integer')
| davidam/python-examples | basics/factors.py | Python | gpl-3.0 | 1,210 |
#! /usr/bin/env python
import gflex
import numpy as np
from matplotlib import pyplot as plt
flex = gflex.F1D()
flex.Quiet = True
flex.Method = 'FD' # Solution method: * FD (finite difference)
# * SAS (superposition of analytical solutions)
# * SAS_NG (ungridded SAS)
flex.Solver = 'direct' # direct or iterative
# convergence = 1E-3 # convergence between iterations, if an iterative solution
# method is chosen
flex.g = 9.8 # acceleration due to gravity
flex.E = 65E9 # Young's Modulus
flex.nu = 0.25 # Poisson's Ratio
flex.rho_m = 3300. # MantleDensity
flex.rho_fill = 1000. # InfiillMaterialDensity
flex.Te = 30000.#*np.ones(500) # Elastic thickness -- scalar but may be an array
#flex.Te[-3:] = 0
flex.qs = np.zeros(300); flex.qs[100:200] += 1E6 # surface load stresses
flex.dx = 4000. # grid cell size [m]
flex.BC_W = '0Displacement0Slope' # west boundary condition
flex.BC_E = '0Moment0Shear' # east boundary condition
flex.sigma_xx = 100. # Normal stress on the edge of the plate
flex.initialize()
flex.run()
flex.finalize()
# If you want to plot the output
flex.plotChoice='combo'
# An output file for deflections could also be defined here
# flex.wOutFile =
flex.output() # Plots and/or saves output, or does nothing, depending on
# whether flex.plotChoice and/or flex.wOutFile have been set
# TO OBTAIN OUTPUT DIRECTLY IN PYTHON, you can assign the internal variable,
# flex.w, to another variable -- or as an element in a list if you are looping
# over many runs of gFlex:
deflection = flex.w
| awickert/gFlex | input/run_in_script_1D.py | Python | gpl-3.0 | 1,622 |
'''
Created on Feb 19, 2015
@author: rch
'''
import types
from traits.api import \
provides, \
Array
import numpy as np
from oricreate.opt import \
IGu
from oricreate.viz3d import \
Visual3D
from .gu import Gu
from .gu_psi_constraints_viz3d2 import \
GuPsiConstraintsViz3D
@provides(IGu)
class GuPsiConstraints(Gu, Visual3D):
'''Explicit constraints for selected of freedom.
'''
psi_constraints = Array
'''Specification of explicit constraint for particular dihedral psis.
psi constraints are specified as a list of equations with values
to be inserted on the left- and the right-hand-side of the equation system.
The dof is identified with the number of the node and the direction (0,1,2)
for x,y,z values::
[([(line1, coefficient1), ... ], value1 ),
([(line2, coefficient2), ... ], value2 )
... ]
Convenience constructors for containers of (node, direction pairs)
are provided for the most usual cases:
:func:`oricrete.fix_psis` and :func:`oricrete.link_psis`.
'''
def validate_input(self):
cp = self.formed_object
for i, psi_cnstr in enumerate(self.psi_constraints): # @UnusedVariable
lhs, rhs = psi_cnstr # @UnusedVariable
for l, c in lhs: # @UnusedVariable
if cp.L_iL[l] < 0:
raise IndexError('GuPsiConstraint: line index %d does '
'not refer to an interior line: '
'must be one of %s' % (l, cp.iL))
def __str__(self):
s = 'Gu: %s - %d\n' % (self.label, len(self.psi_constraints))
cp = self.formed_object
iL_psi = cp.iL_psi
for i, psi_cnstr in enumerate(self.psi_constraints):
s += '#:%3d;\n' % i
lhs, rhs = psi_cnstr
for l, c in lhs: # @UnusedVariable
s += '\t+ l:%3d; c:%g; ' % (l, c)
il = cp.L_iL[l]
s += ' il: %d = %g\n' % (il, c * iL_psi[il])
s += '\t= r: %s\n' % str(rhs)
return s
def get_G(self, t=0):
''' Calculate the residue for given constraint equations
'''
cp = self.formed_object
iL_psi = cp.iL_psi
G = np.zeros((len(self.psi_constraints)), dtype='float_')
for i, psi_cnstr in enumerate(self.psi_constraints):
lhs, rhs = psi_cnstr
for l, c in lhs: # @UnusedVariable
il = cp.L_iL[l]
G[i] += c * iL_psi[il]
G[i] -= rhs(t) if isinstance(rhs, types.FunctionType) else rhs
return G
def get_G_du(self, t=0.0):
''' Calculate the residue for given constraint equations
'''
cp = self.formed_object
iL_psi_du = cp.iL_psi_du
G_du = np.zeros((len(self.psi_constraints), cp.n_dofs),
dtype='float_')
for i, psi_cnstr in enumerate(self.psi_constraints):
lhs, rhs = psi_cnstr # @UnusedVariable
for l, c in lhs: # @UnusedVariable
il = cp.L_iL[l]
G_du[i, :] += c * iL_psi_du[il, :].flatten()
return G_du
viz3d_classes = dict(psi_constraints=GuPsiConstraintsViz3D)
| simvisage/oricreate | oricreate/gu/gu_psi_constraints.py | Python | gpl-3.0 | 3,250 |
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='django_linotp',
version='0.1',
description='linotp authentication for django',
author='Cornelius Koelbel',
license='GPL v3, (C) Cornelius Koelbel',
author_email='[email protected]',
url='http://www.github.com/cornelinux/django-linotp-auth',
install_requires=[
"django",
],
classifiers=[
"License :: OSI Approved :: GNU General Public License v3",
"Programming Language :: Python",
"Topic :: Internet",
"Topic :: Security",
],
packages = [ 'django_linotp' ]
)
| cornelinux/django-linotp-auth | setup.py | Python | gpl-3.0 | 703 |
#!/usr/bin/env python
# -*- coding:Utf-8 -*-
import os
import sys
import django
from django.core.management import call_command
HERE = os.path.realpath(os.path.dirname(__file__) + '')
sys.path.insert(0, os.path.abspath('./'))
# setup Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'remotecare.settings'
django.setup()
try:
# Get the list of applications from the settings
from django.conf import settings
except ImportError:
raise ImportError("The script should be run from the project root")
class Modules(object):
"""
auto generate template directory structure
"""
def __init__(self):
self.internal_apps = {}
self.fname = settings.DS_FILENAME
def create_dir(self, directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def write_file(self, path_and_file_name, lines):
f = open(path_and_file_name, 'w')
f.writelines(lines)
f.close()
def write(self):
"""Write the created list in the new file"""
app_lines = ['Remote Care\'s documentation']
app_lines.append('=' * len(app_lines[0]))
app_lines.append('')
app_lines.append('Contents:')
app_lines.append('')
app_lines.append('.. toctree::')
app_lines.append(' :maxdepth: 1')
app_lines.append('')
for internal_app in sorted(self.internal_apps):
app_lines.append(' ' + internal_app + '/index.rst')
if '.' in internal_app:
internal_app_dir = '/'.join(internal_app.split('.'))
else:
internal_app_dir = internal_app
self.create_dir(settings.DS_ROOT + '/' + internal_app_dir)
module_lines = []
module_lines.append(internal_app)
module_lines.append('=' * len(internal_app))
module_lines.append('.. automodule:: ' + internal_app)
module_lines.append('')
module_lines.append('Contents:')
module_lines.append('')
module_lines.append('.. toctree::')
module_lines.append(' :maxdepth: 2')
module_lines.append('')
for module in sorted(self.internal_apps[internal_app]):
module_lines.append(' ' + module + '.rst')
self.write_file(
settings.DS_ROOT + '/' +
internal_app_dir + '/' + module + '.rst',
self.internal_apps[internal_app][module])
self.write_file(
settings.DS_ROOT + '/' +
internal_app_dir + '/index.rst',
self.add_lf(module_lines))
app_lines.append('')
app_lines.append('Indices and tables')
app_lines.append('==================')
app_lines.append('')
app_lines.append('* :ref:`genindex`')
app_lines.append('* :ref:`modindex`')
app_lines.append('* :ref:`search`')
self.write_file(settings.DS_ROOT + '/index.rst',
self.add_lf(app_lines))
def process_app_dict(self, app_dict, path):
"""recursively process the app_dict"""
index_rst = []
if path == '':
index_rst.append('Remote Care\'s documentation')
index_rst.append('=' * len(index_rst[0]))
index_rst.append('')
index_rst.append('.. toctree::')
index_rst.append('')
index_rst.append(' main.rst')
index_rst.append(' install.rst')
index_rst.append('')
else:
app = path.split('/')[-2]
index_rst.append(app)
index_rst.append('=' * len(app))
index_rst.append('.. automodule:: ' + path.replace('/', '.')[:-1])
index_rst.append('')
self.create_dir(settings.DS_ROOT + '/' + path)
if app_dict != {}:
index_rst.append(':subtitle:`Packages:`')
index_rst.append('')
index_rst.append('.. toctree::')
index_rst.append(' :maxdepth: 2')
index_rst.append('')
for app in app_dict:
if path:
app_name = path.replace('/', '.') + app
else:
app_name = app
index_rst.append(' ' + app + '/index.rst')
self.process_app_dict(app_dict[app], path + app + '/')
app_name = path.replace('/', '.')
if app_name != '':
app_instance = App(app_name)
if len(app_instance.modules) > 0:
index_rst.append('')
index_rst.append(':subtitle:`Modules:`')
index_rst.append('')
index_rst.append('.. toctree::')
index_rst.append(' :maxdepth: 1')
index_rst.append('')
self.create_dir(settings.DS_ROOT + '/' + path)
for module in app_instance.modules:
index_rst.append(' ' + module + '.rst')
module_lines = []
module_lines.append(module + '.py')
module_lines.append("-" * len(module_lines[0])),
module_lines.append('')
module_lines.append('.. toctree::')
module_lines.append(' :maxdepth: 2')
module_lines.append('')
module_lines.append('')
module_lines.append(".. automodule:: %s%s" %
(app_name, module))
module_lines.append(" :members:")
module_lines.append(" :show-inheritance:")
self.write_file(
settings.DS_ROOT + '/' +
path + module + '.rst',
self.add_lf(module_lines))
if path == '':
index_rst.append('')
index_rst.append('Indices and tables')
index_rst.append('==================')
index_rst.append('')
index_rst.append('* :ref:`genindex`')
index_rst.append('* :ref:`modindex`')
index_rst.append('* :ref:`search`')
self.write_file(settings.DS_ROOT + '/' +
path + 'index.rst',
self.add_lf(index_rst))
def add_lf(self, l):
"""Append line feed \n to all elements of the given list"""
return ["%s\n" % line for line in l]
class App(object):
"""Application with its name and the list of python files it contains"""
def __init__(self, name):
self.name = name
self.is_internal = self.name in os.listdir(HERE)
self.path = self.get_path()
self.modules = self.get_modules()
def get_path(self):
"""return absolute path for this application"""
try:
path = __import__(self.name).__path__[0]
splitedName = self.name.split(".")
if len(splitedName) > 1:
path = os.path.join(path, *splitedName[1:])
return path
except ImportError:
print(("The application %s couldn't" +
" be autodocumented" % self.name))
return False
def get_modules(self):
"""Scan the repository for any python files"""
if not self.path:
return []
# Move inside the application
os.chdir(self.path)
modules = [f.split(".py")[0] for f in os.listdir(".") if f not
in settings.DS_EXCLUDED_MODULES and f.endswith(".py")]
# Remove all irrelevant modules. A module is relevant if he
# contains a function or class
not_relevant = []
for module in modules:
f_module = open("%s.py" % module, "r")
content = f_module.read()
f_module.close()
keywords = ["def", "class"]
relevant = sum([value in content for value in keywords])
if not relevant:
not_relevant.append(module)
# print "%s.%s not relevant, removed" % (self.name, module)
[modules.remove(module) for module in not_relevant]
return modules
def has_description(self):
"""Get the application docstring from __init__.py if it exists"""
f_init = open("%s/__init__.py" % self.path, "r")
content = f_init.read()
if '"""' in content or "'''" in content:
return True
return False
def create_app_dict(l_apps):
app_dict = {}
for app in l_apps:
if '.' in app:
pointer = app_dict
splitted = app.split('.')
for index, app_name in enumerate(splitted):
if app_name in pointer:
pointer = pointer[app_name]
else:
pointer.update({app_name: {}})
pointer = pointer[app_name]
else:
if app not in app_dict:
app_dict.update({app: {}})
return app_dict
def main():
# Define some variables
settings.DS_ROOT = getattr(settings, "DS_ROOT",
os.path.join(HERE, "doc/source"))
settings.DS_MASTER_DOC = getattr(settings, "DS_MASTER_DOC", "index.rst")
settings.DS_FILENAME = getattr(settings, "DS_FILENAME", "auto_modules")
settings.DS_EXCLUDED_APPS = getattr(settings, "DS_EXCLUDED_APPS", [])
settings.DS_EXCLUDED_MODULES = getattr(
settings, "DS_EXCLUDED_MODULES",
["__init__.py", ])
# Create a file for new modules
f_modules = Modules()
# Write all the apps autodoc in the newly created file
l_apps = set(settings.PROJECT_APPS + settings.EXTRA_DOC_APPS) -\
set(settings.DS_EXCLUDED_APPS)
app_dict = create_app_dict(l_apps)
f_modules.process_app_dict(app_dict, '')
# Create dot files
call_command('graph_models',
'account', 'healthperson', 'healthprofessional',
'secretariat', 'patient', 'management',
exclude_models='LoginSMSCode,OldPassword,' +
'PasswordChangeRequest,AgreedwithRules,' +
'PolymorphicModel,LoginAttempt,' +
'AbstractBaseUser,EncryptModel,' +
'PermissionsMixin,Hospital,Group,Permission',
disable_fields=True,
inheritance=True,
outputfile=settings.DS_ROOT +
'/_static/user_healthperson.dot')
include_list = ['QuestionnaireRequest',
'RequestStep',
'WizardDatabaseStorage',
'QuestionnaireBase']
exclude = []
import inspect
from apps.questionnaire import models as models_module
for name, obj in inspect.getmembers(models_module):
if inspect.isclass(obj) and name not in include_list:
exclude.append(name)
# Create dot files
call_command('graph_models',
'questionnaire',
exclude_models=','.join(exclude),
disable_fields=True,
inheritance=True,
outputfile=settings.DS_ROOT + '/_static/questionnaire.dot')
if __name__ == '__main__':
main()
| acesonl/remotecare | remotecare/generate_autodoc.py | Python | gpl-3.0 | 11,308 |
#!/usr/bin/env python
fvcf = open(r'/home/Lab/homo/Neandertal/vindija/simulationchr21/AltaiNea.hg19_1000g.21.mod.vcf','r')
f2 = open(r'/home/Lab/homo/Neandertal/vindija/simulationchr21/AltaiNea.hg19_1000g.21.indel_removed.vcf','w')
f3 = open(r'/home/Lab/homo/Neandertal/vindija/simulationchr21/AltaiNea.hg19_1000g.21.indel.vcf','w')
indel = 0
for line in fvcf:
if line[0] == '#':
f2.write(line)
continue
col = line.split('\t')
chromosome = col[0]
pos = int(col[1])
ref = col[3]
alt = col[4]
if len(ref) != 1 or len(alt) != 1:
if not (',' in alt):
indel += 1
f3.write(line)
continue
f2.write(line)
print(indel)
fvcf.close()
f2.close()
| BoyanZhou/AntCaller | scripts used in study/Simulation/diploid simulation/indel-remove.py | Python | gpl-3.0 | 755 |
# cc-turtle-bot
# Copyright (C) 2015 Collin Eggert
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ccturtle.sqlitestorageitem import SQLiteStorageItem
class Building(SQLiteStorageItem):
def __init__(self, x, y, z, building_type, mk=1, id=None, plotids=None):
self.x = x
self.y = y
self.z = z
self.building_type = building_type
self.mk = mk
self.id = id
self.plotids = plotids
def sql(self):
return (self.id, self.x, self.y, self.z, self.building_type, self.mk)
@staticmethod
def args():
return "id x y z building_type mk".split(" ") | AHelper/cc-turtle-bot | ccturtle/building.py | Python | gpl-3.0 | 1,183 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.status_in_project'
db.add_column('pledger_profile', 'status_in_project',
self.gf('django.db.models.fields.CharField')(default='sole_developer', max_length=80),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.status_in_project'
db.delete_column('pledger_profile', 'status_in_project')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pledger.donationsubscription': {
'Meta': {'object_name': 'DonationSubscription'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'datetime_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'needs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['project.ProjectNeed']", 'through': "orm['pledger.DonationSubscriptionNeeds']", 'symmetrical': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'pledger.donationsubscriptionneeds': {
'Meta': {'object_name': 'DonationSubscriptionNeeds'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'donation_subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pledger.DonationSubscription']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'need': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.ProjectNeed']"})
},
'pledger.donationtransaction': {
'Meta': {'object_name': 'DonationTransaction'},
'accepting_goal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.ProjectGoal']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'accepting_goal_datetime_ending': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'accepting_goal_key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'accepting_goal_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'accepting_need': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.ProjectNeed']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'accepting_need_key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'accepting_need_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'accepting_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'accepting_project'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['project.Project']"}),
'accepting_project_key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'accepting_project_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.ProjectOtherSource']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'other_source_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pledger_donation_subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pledger.DonationSubscription']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'pledger_donation_type': ('django.db.models.fields.CharField', [], {'default': "'onetime'", 'max_length': '7', 'null': 'True', 'blank': 'True'}),
'pledger_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pledger_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'pledger_username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'redonation_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redonation_project'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['project.Project']"}),
'redonation_project_key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'redonation_project_title': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'redonation_transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pledger.DonationTransaction']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'transaction_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'transaction_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'transaction_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'transaction_status': ('django.db.models.fields.CharField', [], {'default': "'unpaid'", 'max_length': '64'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'default': "'pledge'", 'max_length': '64'})
},
'pledger.profile': {
'Meta': {'object_name': 'Profile', '_ormbases': ['auth.User']},
'api_token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'donation_amount_is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'projects_list_is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_in_project': ('django.db.models.fields.CharField', [], {'default': "'sole_developer'", 'max_length': '80'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['auth.User']"})
},
'project.project': {
'Meta': {'object_name': 'Project'},
'brief': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['project.ProjectCategory']", 'symmetrical': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_refused_to_give_to_bitfund': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unclaimed'", 'max_length': '80'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'project.projectcategory': {
'Meta': {'object_name': 'ProjectCategory'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'project.projectgoal': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectGoal'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '0'}),
'brief': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'date_ending': ('django.db.models.fields.DateTimeField', [], {}),
'date_starting': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'long_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}),
'short_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vimeo_video_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'youtube_video_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'project.projectneed': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectNeed'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '0', 'blank': 'True'}),
'brief': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'date_ending': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starting': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'project.projectothersource': {
'Meta': {'object_name': 'ProjectOtherSource'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'brief': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 14, 0, 0)'}),
'date_received': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['pledger'] | alexykot/bitfund | bitfund/pledger/migrations/0007_auto__add_field_profile_status_in_project.py | Python | gpl-3.0 | 16,425 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { u'attendance': { 'clipInput': True,
'fieldname': u'attendance',
'maxval': 36067,
'minval': 0,
'n': 50,
'name': u'attendance',
'type': 'AdaptiveScalarEncoder',
'w': 7},
u'daynight': { 'fieldname': u'daynight',
'n': 100,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 7},
u'home_winloss': { 'clipInput': True,
'fieldname': u'home_winloss',
'maxval': 0.69999999999999996,
'minval': 0.0,
'n': 50,
'name': u'home_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 7},
u'precip': { 'fieldname': u'precip',
'n': 100,
'name': u'precip',
'type': 'SDRCategoryEncoder',
'w': 7},
u'timestamp_dayOfWeek': { 'dayOfWeek': (7, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 1),
'type': 'DateEncoder'},
u'visitor_winloss': { 'clipInput': True,
'fieldname': u'visitor_winloss',
'maxval': 0.78600000000000003,
'minval': 0.0,
'n': 50,
'name': u'visitor_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 7}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
################################################################################
control = {
# The environment that the current model is being run in
"environment": 'grok',
# Input stream specification per py/grokengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'baseball benchmark test',
u'streams': [ { u'columns': [ u'daynight',
u'precip',
u'home_winloss',
u'visitor_winloss',
u'attendance',
u'timestamp'],
u'info': u'OAK01.csv',
u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.prediction,
metric='aae', params={'window': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.prediction,
metric='trivial_aae', params={'window': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.encodings,
metric='grokScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.encodings,
metric='grokScore_scalar',
params={'frequencyWindow': 1000})
],
'inferenceArgs':dict(testFields=['attendance']),
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*grokScore.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| jkthompson/nupic | tests/integration/py2/nupic/swarming/experiments/max_branching_temporal/description.py | Python | gpl-3.0 | 17,499 |
# -*- coding: utf-8 -*-
'''
Created on 12 mai 2012
'''
class Robot:
'''Le but est de pouvoir faire depuis n'importe quelle classe
Robot.attribut que je veux, sans que la classe ait besoin de savoir
si on doit utiliser PetitRobot ou GrosRobot
'''
_vrille = 0
@classmethod
def copy_from(self, robot):
'''inconvnient de cette mthode : c'est une copie l'instant t
de robot. Si une instance de robot est modifie par erreur,
les attributs de classe de Robot ne sont plus jour'''
for attr in robot.__dict__:
if attr[0] != "_":
setattr(Robot, attr, robot.__dict__[attr])
Robot._vrille = robot.__dict__['_vrille']
def vrille():
if Robot.side == "red":
return -Robot._vrille
else:
return Robot._vrille
| 7Robot/cerveau | ia/robots/robot.py | Python | gpl-3.0 | 875 |
# -*- coding: utf-8 -*-
# Scrapy settings for Jobspider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
from Jobspider.spiders.SpiderHelper import FilePath
FilePath.__new__(FilePath)
filePath = FilePath._instance
BOT_NAME = 'Jobspider'
SPIDER_MODULES = ['Jobspider.spiders']
NEWSPIDER_MODULE = 'Jobspider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Jobspider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Jobspider.middlewares.JobspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# for anti_crawl
DOWNLOADER_MIDDLEWARES = {
'Jobspider.JobSpiderUtils.AntiCrawl.RotateHttpProxyMiddleware': 542,
'Jobspider.JobSpiderUtils.AntiCrawl.RotateUserAgentMiddleware':543
}
# disable cookies
COOKIES_ENABLED = False
# set download relay
DOWNLOAD_DELAY = 2
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Jobspider.pipelines.JobspiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# log setting
LOG_STDOUT = True
LOG_FILE = filePath.logPath
LOG_LEVEL = 'WARNING'
# item pipeline setting
# FEED_URI = 'file:///D:/Scrapy/item.json'
# FEED_FORMAT = 'json'
# FEED_EXPORTERS = 'JsonItemExporter'
# order field when output
# FEED_EXPORT_FIELDS
| everywan/Everywan.Spider | Lagou/Jobspider/settings.py | Python | gpl-3.0 | 3,681 |
import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import pytest
from tools.wpt import wpt
here = os.path.abspath(os.path.dirname(__file__))
def is_port_8000_in_use():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", 8000))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
return True
else:
raise e
finally:
s.close()
return False
@pytest.fixture(scope="module")
def manifest_dir():
def update_manifest():
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["manifest", "--no-download", "--path", os.path.join(path, "MANIFEST.json")])
assert excinfo.value.code == 0
if os.environ.get('TRAVIS') == "true":
path = "~/meta"
update_manifest()
yield path
else:
try:
path = tempfile.mkdtemp()
old_path = os.path.join(wpt.localpaths.repo_root, "MANIFEST.json")
if os.path.exists(os.path.join(wpt.localpaths.repo_root, "MANIFEST.json")):
shutil.copyfile(old_path, os.path.join(path, "MANIFEST.json"))
update_manifest()
yield path
finally:
shutil.rmtree(path)
@pytest.fixture
def temp_test():
os.makedirs("../../.tools-tests")
test_count = {"value": 0}
def make_test(body):
test_count["value"] += 1
test_name = ".tools-tests/%s.html" % test_count["value"]
test_path = "../../%s" % test_name
with open(test_path, "w") as handle:
handle.write("""
<!DOCTYPE html>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>%s</script>
""" % body)
return test_name
yield make_test
shutil.rmtree("../../.tools-tests")
def test_missing():
with pytest.raises(SystemExit):
wpt.main(argv=["#missing-command"])
def test_help():
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["--help"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests(manifest_dir):
"""The `--list-tests` option should not produce an error under normal
conditions."""
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--metadata", manifest_dir, "--list-tests",
"--yes", "chrome", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_missing_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the absence of
a test manifest file."""
os.remove(os.path.join(manifest_dir, "MANIFEST.json"))
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_invalid_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the presence of
a malformed test manifest file."""
manifest_filename = os.path.join(manifest_dir, "MANIFEST.json")
assert os.path.isfile(manifest_filename)
with open(manifest_filename, "a+") as handle:
handle.write("extra text which invalidates the file")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_firefox(manifest_dir):
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
if sys.platform == "darwin":
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "Firefox Nightly.app")
else:
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "firefox")
if os.path.exists(fx_path):
shutil.rmtree(fx_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--no-pause", "--install-browser", "--yes",
# The use of `--binary-args` is intentional: it
# demonstrates that internally-managed command-line
# arguments are properly merged with those specified by
# the user. See
# https://github.com/web-platform-tests/wpt/pull/13154
"--binary-arg=-headless",
"--metadata", manifest_dir,
"firefox", "/dom/nodes/Element-tagName.html"])
assert os.path.exists(fx_path)
shutil.rmtree(fx_path)
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_chrome(manifest_dir):
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--metadata", manifest_dir,
"chrome", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_zero_tests():
"""A test execution describing zero tests should be reported as an error
even in the presence of the `--no-fail-on-unexpected` option."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_failing_test():
"""Failing tests should be reported with a non-zero exit status unless the
`--no-fail-on-unexpected` option has been specified."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
failing_test = "/infrastructure/expected-fail/failing-test.html"
assert os.path.isfile("../../%s" % failing_test)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"chrome", failing_test])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected",
"chrome", failing_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_verify_unstable(temp_test):
"""Unstable tests should be reported with a non-zero exit status. Stable
tests should be reported with a zero exit status."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
unstable_test = temp_test("""
test(function() {
if (localStorage.getItem('wpt-unstable-test-flag')) {
throw new Error();
}
localStorage.setItem('wpt-unstable-test-flag', 'x');
}, 'my test');
""")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"chrome", unstable_test])
assert excinfo.value.code != 0
stable_test = temp_test("test(function() {}, 'my test');")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"chrome", stable_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_chromedriver():
chromedriver_path = os.path.join(wpt.localpaths.repo_root, "_venv", "bin", "chromedriver")
if os.path.exists(chromedriver_path):
os.unlink(chromedriver_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "chrome", "webdriver"])
assert excinfo.value.code == 0
assert os.path.exists(chromedriver_path)
os.unlink(chromedriver_path)
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_firefox():
if sys.platform == "darwin":
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "Firefox Nightly.app")
else:
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "firefox")
if os.path.exists(fx_path):
shutil.rmtree(fx_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "firefox", "browser", "--channel=nightly"])
assert excinfo.value.code == 0
assert os.path.exists(fx_path)
shutil.rmtree(fx_path)
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == """html/browsers/offline/appcache/workers/appcache-worker.html
html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-worker-data.py
html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
html/browsers/offline/appcache/workers/resources/appcache-worker.manifest
html/browsers/offline/appcache/workers/resources/appcache-worker.py
"""
assert err == ""
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed_null(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "--null", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == "\0".join(["html/browsers/offline/appcache/workers/appcache-worker.html",
"html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-worker-data.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker-import.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker.manifest",
"html/browsers/offline/appcache/workers/resources/appcache-worker.py",
""])
assert err == ""
def test_files_changed_ignore():
from tools.wpt.testfiles import exclude_ignored
files = ["resources/testharness.js", "resources/webidl2/index.js", "test/test.js"]
changed, ignored = exclude_ignored(files, ignore_rules=["resources/testharness*"])
assert changed == [os.path.join(wpt.wpt_root, item) for item in
["resources/webidl2/index.js", "test/test.js"]]
assert ignored == [os.path.join(wpt.wpt_root, item) for item in
["resources/testharness.js"]]
def test_files_changed_ignore_rules():
from tools.wpt.testfiles import compile_ignore_rule
assert compile_ignore_rule("foo*bar*/baz").pattern == "^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == "^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foobar/baz/*").pattern == "^foobar/baz/[^/]*$"
assert compile_ignore_rule("foobar/baz/**").pattern == "^foobar/baz/.*$"
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "3a055e818218f548db240c316654f3cc1aeeb733"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "infrastructure/reftest-wait.html" in out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_idlharness(capsys, manifest_dir):
commit = "47cea8c38b88c0ddd3854e4edec0c5b6f2697e62"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "webrtc/idlharness.https.window.js\n" == out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_null(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "9bf1daa3d8b4425f2354c3ca92c4cf0398d329dd"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--null", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
tests = out.split("\0")
assert "dom/interfaces.html" in tests
assert "html/dom/interfaces.https.html" in tests
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_serve():
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve"],
preexec_fn=os.setsid)
start = time.time()
try:
while True:
if p.poll() is not None:
assert False, "server not running"
if time.time() - start > 60:
assert False, "server did not start responding within 60s"
try:
resp = urllib2.urlopen("http://web-platform.test:8000")
print resp
except urllib2.URLError:
print "URLError"
time.sleep(1)
else:
assert resp.code == 200
break
finally:
os.killpg(p.pid, 15)
# The following commands are slow running and used implicitly in other CI
# jobs, so we skip them here:
# wpt check-stability
# wpt manifest
# wpt lint
| sadmansk/servo | tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py | Python | mpl-2.0 | 18,672 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-03 10:00
from __future__ import unicode_literals
from django.db import migrations
def ensure_ldap_groups(apps, schema_editor):
from lib.auth.backends import GROUP_MAPPINGS
Group = apps.get_model('auth', 'Group')
for django_group in GROUP_MAPPINGS:
Group.objects.get_or_create(name=django_group)
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length')
]
operations = [
migrations.RunPython(ensure_ldap_groups),
]
| Pike/elmo | apps/accounts/migrations/0001_initial.py | Python | mpl-2.0 | 584 |
from flask import Flask, session, url_for, redirect, json, request
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.script import Manager
from flask.ext.marshmallow import Marshmallow
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.security import Security, SQLAlchemyUserDatastore, current_user
from flask.ext.security.utils import encrypt_password
from flask.ext.cache import Cache
from flask_mail import Mail
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_admin import Admin, AdminIndexView
from flask_social_blueprint.core import SocialBlueprint as SocialBp
from beavy.utils.deepmerge import deepmerge
from flask_environments import Environments
from pprint import pprint
from celery import Celery
import os
import yaml
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_FOLDER = os.path.join(BASE_DIR, '..', 'assets')
# The app
app = Flask(__name__,
static_url_path='/assets',
static_folder=STATIC_FOLDER)
# --------- helpers for setup ----------------------------
def make_env(app):
# environment-based configuration loading
env = Environments(app, var_name="BEAVY_ENV")
env.from_yaml(os.path.join(BASE_DIR, 'config.yml'))
# env.from_yaml(os.path.join(os.getcwd(), 'config.yml'))
with open(os.path.join(os.getcwd(), 'config.yml'), "r") as r:
deepmerge(app.config, yaml.load(r))
# allow for environment variables to update items
if os.environ.get("BEAVY_CONFIG_FROM_ENV", False):
app.config.update(os.environ)
if "DATABASE_URL" in os.environ:
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ["DATABASE_URL"]
if "RABBITMQ_URL" in os.environ:
app.config["CELERY_BROKER_URL"] = os.environ["RABBITMQ_URL"]
if "REDIS_URL" in os.environ:
app.config["RATELIMIT_STORAGE_URL"] = os.environ["REDIS_URL"]
app.config["CACHE_REDIS_URL"] = os.environ["REDIS_URL"]
# update social buttons
_FLBLPRE = "flask_social_blueprint.providers.{}"
if "SOCIAL_BLUEPRINT" not in app.config:
app.config["SOCIAL_BLUEPRINT"] = dict([
("." in name and name or _FLBLPRE.format(name), values)
for name, values in app.config.get("SOCIAL_LOGINS").items()])
return env
def setup_statics(app):
files = dict(main_js="main.js", main_css="main.css")
if not app.debug:
with open(os.path.join(STATIC_FOLDER, "manifest.json")) as r:
manifest = json.load(r)
files = dict([(key.replace(".", "_"), value)
for (key, value) in manifest.items()])
@app.context_processor
def injnect_manifest():
return dict(static_files=files)
def make_celery(app):
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
class SocialBlueprint(SocialBp):
# our own wrapper around the SocialBlueprint
# to forward for registring
def login_failed_redirect(self, profile, provider):
if not app.config.get("SECURITY_REGISTERABLE"):
return redirect("/")
session["_social_profile"] = profile.data
# keep the stuff around, so we can set it up after
# the user provided us with a nice email address
return redirect(url_for('security.register',
name="{} {}".format(profile.data.get("first_name"),
profile.data.get("last_name"))))
class BeavyAdminIndexView(AdminIndexView):
def is_accessible(self):
if not current_user.is_active or not current_user.is_authenticated:
return False
if current_user.has_role('admin'):
return True
return False
def _limiter_key():
if current_user.is_authenticated:
return "u_{}".format(current_user.id)
return "ip_{}".format(get_ipaddr())
# --------------------------- Setting stuff up in order ----------
# load the environment and configuration
env = make_env(app)
# initialize the celery task queue
celery = make_celery(app)
# start database
db = SQLAlchemy(app)
# and database migrations
migrate = Migrate(app, db)
# initialize Resource-Based API-System
ma = Marshmallow(app)
# scripts manager
manager = Manager(app)
# add DB+migrations commands
manager.add_command('db', MigrateCommand)
# initialize email support
mail = Mail(app)
# limit access to the app
limiter = Limiter(app, key_func=_limiter_key)
# configure logging for limiter
for handler in app.logger.handlers:
limiter.logger.addHandler(handler)
# add caching support
cache = Cache(app)
# -------------- initialize i18n --------------
from flask.ext.icu import ICU, get_messages # noqa
icu = ICU(app, app.config.get("DEFAULT_LANGUAGE"))
# Inject ICU messages for delivery to client via _preload.html template
@app.context_processor
def inject_messages():
return dict(MESSAGES=json.dumps(get_messages()))
@icu.localeselector
def get_locale():
locale = None
if current_user is not None and current_user.is_authenticated:
locale = current_user.language_preference
elif app.config.get("LANGUAGES") is not None:
languages = app.config.get("LANGUAGES")
locale = request.accept_languages.best_match(languages)
return locale # If no locale, Flask-ICU uses the default setting.
# ------ Database setup is done after here ----------
from beavy.models.user import User # noqa
from beavy.models.role import Role # noqa
from beavy.models.social_connection import SocialConnection # noqa
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# add social authentication
SocialBlueprint.init_bp(app, SocialConnection, url_prefix="/_social")
# initialize admin backend
admin = Admin(app,
'{} Admin'.format(app.config.get("NAME")),
index_view=BeavyAdminIndexView(),
# base_template='my_master.html',
template_mode='bootstrap3',)
from beavy.common.admin_model_view import AdminModelView # noqa
# setup admin UI stuff
admin.add_view(AdminModelView(User, db.session,
name="Users",
menu_icon_type='glyph',
menu_icon_value='glyphicon-user'))
# ----- finally, load all configured modules ---------
from .setup import replaceHomeEndpoint, generate_capability_maps # noqa
# set up static files loading using the manifest in production
setup_statics(app)
# and set the home endpoint
replaceHomeEndpoint(app)
from .models.object import Object # noqa
generate_capability_maps(Object)
from .models.activity import Activity # noqa
generate_capability_maps(Activity)
# ----- some debug features
if app.debug:
@app.before_first_request
def ensure_users():
from datetime import datetime # noqa
admin_role = user_datastore.find_or_create_role('admin')
pw = encrypt_password("password")
if not user_datastore.find_user(email="[email protected]"):
user_datastore.create_user(email="[email protected]",
confirmed_at=datetime.now(),
active=True,
password=pw)
if not user_datastore.find_user(email="[email protected]"):
user_datastore.add_role_to_user(
user_datastore.create_user(email="[email protected]",
confirmed_at=datetime.now(),
active=True,
password=pw),
admin_role)
user_datastore.commit()
@app.before_first_request
def print_routes():
pprint(["{} -> {}".format(rule.rule, rule.endpoint)
for rule in app.url_map.iter_rules()
if rule.endpoint != 'static'])
| beavyHQ/beavy | beavy/app.py | Python | mpl-2.0 | 8,289 |
import urllib
from xml.etree import ElementTree
import sqlite3
import logging
import base64
import inspect
from datetime import datetime, timedelta
import time
from config import configuration
logger = logging.getLogger('temperature')
logger.setLevel(10)
noaa_url = "http://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php?lat=%s&lon=%s&product=time-series&Unit=e"
def convert_to_utc(datestr):
hours = int(datestr[-6:-3]) + time.localtime().tm_isdst # The NOAA feed doesn't take daylight savings time into account - so we must instead
sign = hours / abs(hours) # Determine if we are subtracting time or adding time
minutes = sign * int(datestr[-2:])
dttime = datetime.strptime(datestr[:-6], "%Y-%m-%dT%H:%M:%S")
delta = timedelta(hours=hours, minutes=minutes)
return (dttime - delta).strftime("%Y-%m-%dT%H:%M:%S+00:00") # Force this to be UTC
class Forecast(object):
name = 'forecast'
keyword = 'forecast'
def __init__(self):
lat = configuration.get('latitude')
lon = configuration.get('longitude')
self.url = noaa_url % (lat, lon)
# This is invoked when installed as a Bottle plugin
def setup(self, app):
self.routes = app
for other in app.plugins:
if not isinstance(other, Forecast):
continue
if other.keyword == self.keyword:
raise PluginError("Found another instance of Forecast running!")
self.update()
# This is invoked within Bottle as part of each route when installed
def apply(self, callback, route):
args = inspect.getargspec(callback)[0]
if self.keyword not in args:
return callback
def wrapper(*args, **kwargs):
kwargs[self.keyword] = self
rv = callback(*args, **kwargs)
return rv
return wrapper
# De-installation from Bottle as a plugin
def close(self):
logger.info("Closing Forecast")
def __load_timeseries(self, tree):
logger.debug("Refreshing timeseries layouts")
self.timespans = {}
for layout in tree.getroot().iter(tag="time-layout"):
key = layout.find("layout-key").text
start_times = map(lambda time: convert_to_utc(time.text), layout.iter(tag="start-valid-time"))
end_times = map(lambda time: convert_to_utc(time.text), layout.iter(tag="end-valid-time"))
self.timespans[key] = zip(start_times, end_times) if any(end_times) else start_times
def __load_liquid_precipitation(self, tree):
logger.debug("Refreshing precipitation data")
self.precipitations = []
for precipitation in tree.getroot().iter(tag="precipitation"):
time_layout = precipitation.attrib['time-layout']
precip_type = precipitation.attrib['type']
if precip_type == "liquid":
times = iter(self.timespans[time_layout])
for value in precipitation.iter(tag="value"):
starttime, endtime = times.next()
if value.attrib['type'] == "NDFD":
self.precipitations.append((starttime, float(value.text)))
def __load_hourly_temperature(self, tree):
logger.debug("Refreshing temperature data")
self.temperatures = []
for temperature in tree.getroot().iter(tag="temperature"):
time_layout = temperature.attrib['time-layout']
temp_type = temperature.attrib['type']
if temp_type == "hourly":
times = iter(self.timespans[time_layout])
for value in temperature.iter(tag="value"):
starttime = times.next()
if value.attrib['type'] == "NDFD":
self.temperatures.append((starttime, int(value.text)))
def __load_dew_point(self, tree):
logger.debug("Refreshing dewpoint data")
self.dewpoints = []
for temperature in tree.getroot().iter(tag="temperature"):
time_layout = temperature.attrib['time-layout']
temp_type = temperature.attrib['type']
if temp_type == "dew point":
times = iter(self.timespans[time_layout])
for value in temperature.iter(tag="value"):
starttime = times.next()
if value.attrib['type'] == "NDFD":
self.dewpoints.append((starttime, int(value.text)))
def __load_hourly_wind(self, tree):
logger.debug("Refreshing wind data")
self.winds = []
for speed in tree.getroot().iter(tag="wind-speed"):
time_layout = speed.attrib['time-layout']
wind_type = speed.attrib['type']
if wind_type == "sustained":
times = iter(self.timespans[time_layout])
for value in speed.iter(tag="value"):
starttime = times.next()
if value.attrib['type'] == "NDFD":
self.winds.append((starttime, int(value.text)))
for direction in tree.getroot().iter(tag="direction"):
table = direction.attrib['time-layout']
wind_type = direction.attrib['type']
if wind_type == "wind":
sequence = -1
for value in direction.iter(tag="value"):
if value.attrib['type'] == "NDFD":
sequence += 1
starttime, speed = self.winds[sequence]
self.winds[sequence] = (starttime, speed, int(value.text))
def __load_hourly_cloudcover(self, tree):
logger.debug("Refreshing cloud cover data")
self.clouds = []
for cover in tree.getroot().iter(tag="cloud-amount"):
time_layout = cover.attrib['time-layout']
cloud_type = cover.attrib['type']
if cloud_type == "total":
times = iter(self.timespans[time_layout])
for value in cover.iter(tag="value"):
starttime = times.next()
if value.attrib['type'] == "NDFD":
self.clouds.append((starttime, int(value.text) if value.text else 0))
def __load_apparent_temperature(self, tree):
logger.debug("Refreshing apparent temperature data")
self.apparent_temps = []
for apparent_temp in tree.getroot().iter(tag="temperature"):
time_layout = apparent_temp.attrib['time-layout']
temperature_type = apparent_temp.attrib['type']
if temperature_type == "apparent":
times = iter(self.timespans[time_layout])
for value in apparent_temp.iter(tag="value"):
starttime = times.next()
self.apparent_temps.append((starttime, int(value.text)))
def __load_hourly_humidity(self, tree):
logger.debug("Refreshing humidity data")
self.humidities = []
for relative_humidity in tree.getroot().iter(tag="humidity"):
time_layout = relative_humidity.attrib['time-layout']
humidity_type = relative_humidity.attrib['type']
if humidity_type == "relative":
times = iter(self.timespans[time_layout])
for value in relative_humidity.iter(tag="value"):
starttime = times.next()
self.humidities.append((starttime, int(value.text)))
def update(self):
logger.info("Refreshing data from NOAA")
resp = urllib.urlopen(self.url)
tree = ElementTree.parse(resp)
self.__load_timeseries(tree)
self.__load_liquid_precipitation(tree)
self.__load_hourly_temperature(tree)
self.__load_dew_point(tree)
self.__load_hourly_wind(tree)
self.__load_hourly_cloudcover(tree)
self.__load_apparent_temperature(tree)
self.__load_hourly_humidity(tree)
self.updated_datetime = datetime.now()
def temperature(self):
return self.temperatures
def dewpoint(self):
return self.dewpoints
def precipitation(self):
return self.precipitations
def wind(self):
return self.winds
def cloudcover(self):
return self.clouds
def apparent_temperature(self):
return self.apparent_temps
def humidity(self):
return self.humidities
def last_updated(self):
return self.updated_datetime.isoformat()
class PluginError(Exception):
pass
Plugin = Forecast
| deckerego/SprinklerSwitch | app/noaa.py | Python | mpl-2.0 | 8,563 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import random
import string
from bot.tweepy_api import get_api
logger = logging.getLogger(__name__)
if __name__ == '__main__':
hashtag = "#doctoctoctest"
randomchars = "".join( [random.choice(string.ascii_letters) for i in range(15)] )
status = hashtag + " " + randomchars
logger.debug(status)
api = get_api()
api.update_status(status)
| jeromecc/doctoctocbot | src/bot/bin/randomstatus.py | Python | mpl-2.0 | 425 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.send_to_device import SendToDevice
class FirefoxWelcomePage4(BasePage):
_URL_TEMPLATE = '/{locale}/firefox/welcome/4/'
_get_firefox_qr_code_locator = (By.ID, 'firefox-qr')
@property
def send_to_device(self):
return SendToDevice(self)
@property
def is_firefox_qr_code_displayed(self):
return self.is_element_displayed(*self._get_firefox_qr_code_locator)
| MichaelKohler/bedrock | tests/pages/firefox/welcome/page4.py | Python | mpl-2.0 | 695 |
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Universal functions (ufuncs) for ODL-wrapped arrays.
These functions are internal and should only be used as methods on
`Tensor`-like classes.
See `numpy.ufuncs
<http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
for more information.
Notes
-----
The default implementation of these methods uses the ``__array_ufunc__``
dispatch machinery `introduced in Numpy 1.13
<https://github.com/charris/numpy/blob/master/doc/source/reference/\
arrays.classes.rst#special-attributes-and-methods>`_.
"""
from __future__ import print_function, division, absolute_import
from builtins import object
import numpy as np
import re
__all__ = ('TensorSpaceUfuncs', 'ProductSpaceUfuncs')
# Some are ignored since they don't cooperate with dtypes, needs fix
RAW_UFUNCS = ['absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_or',
'bitwise_xor', 'ceil', 'conj', 'copysign', 'cos', 'cosh',
'deg2rad', 'divide', 'equal', 'exp', 'exp2', 'expm1', 'floor',
'floor_divide', 'fmax', 'fmin', 'fmod', 'greater',
'greater_equal', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan',
'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p',
'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not',
'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf',
'multiply', 'negative', 'not_equal', 'power',
'rad2deg', 'reciprocal', 'remainder', 'right_shift', 'rint',
'sign', 'signbit', 'sin', 'sinh', 'sqrt', 'square', 'subtract',
'tan', 'tanh', 'true_divide', 'trunc']
# ,'isreal', 'iscomplex', 'ldexp', 'frexp'
# Add some standardized information
UFUNCS = []
for name in RAW_UFUNCS:
ufunc = getattr(np, name)
n_in, n_out = ufunc.nin, ufunc.nout
descr = ufunc.__doc__.splitlines()[2]
# Numpy occasionally uses single ticks for doc, we only use them for links
descr = re.sub('`+', '``', descr)
doc = descr + """
See Also
--------
numpy.{}
""".format(name)
UFUNCS.append((name, n_in, n_out, doc))
# TODO: add the following reductions (to the CUDA implementation):
# ['var', 'trace', 'tensordot', 'std', 'ptp', 'mean', 'diff', 'cumsum',
# 'cumprod', 'average']
# --- Wrappers for `Tensor` --- #
def wrap_ufunc_base(name, n_in, n_out, doc):
"""Return ufunc wrapper for implementation-agnostic ufunc classes."""
ufunc = getattr(np, name)
if n_in == 1:
if n_out == 1:
def wrapper(self, out=None, **kwargs):
if out is None or isinstance(out, (type(self.elem),
type(self.elem.data))):
out = (out,)
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, out=out, **kwargs)
elif n_out == 2:
def wrapper(self, out=None, **kwargs):
if out is None:
out = (None, None)
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, out=out, **kwargs)
else:
raise NotImplementedError
elif n_in == 2:
if n_out == 1:
def wrapper(self, x2, out=None, **kwargs):
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, x2, out=(out,), **kwargs)
else:
raise NotImplementedError
else:
raise NotImplementedError
wrapper.__name__ = wrapper.__qualname__ = name
wrapper.__doc__ = doc
return wrapper
class TensorSpaceUfuncs(object):
"""Ufuncs for `Tensor` objects.
Internal object, should not be created except in `Tensor`.
"""
def __init__(self, elem):
"""Create ufunc wrapper for elem."""
self.elem = elem
# Reductions for backwards compatibility
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
"""Return the sum of ``self``.
See Also
--------
numpy.sum
prod
"""
return self.elem.__array_ufunc__(
np.add, 'reduce', self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
"""Return the product of ``self``.
See Also
--------
numpy.prod
sum
"""
return self.elem.__array_ufunc__(
np.multiply, 'reduce', self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
def min(self, axis=None, dtype=None, out=None, keepdims=False):
"""Return the minimum of ``self``.
See Also
--------
numpy.amin
max
"""
return self.elem.__array_ufunc__(
np.minimum, 'reduce', self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
def max(self, axis=None, dtype=None, out=None, keepdims=False):
"""Return the maximum of ``self``.
See Also
--------
numpy.amax
min
"""
return self.elem.__array_ufunc__(
np.maximum, 'reduce', self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
# Add ufunc methods to ufunc class
for name, n_in, n_out, doc in UFUNCS:
method = wrap_ufunc_base(name, n_in, n_out, doc)
setattr(TensorSpaceUfuncs, name, method)
# --- Wrappers for `ProductSpaceElement` --- #
def wrap_ufunc_productspace(name, n_in, n_out, doc):
"""Return ufunc wrapper for `ProductSpaceUfuncs`."""
if n_in == 1:
if n_out == 1:
def wrapper(self, out=None, **kwargs):
if out is None:
result = [getattr(x.ufuncs, name)(**kwargs)
for x in self.elem]
return self.elem.space.element(result)
else:
for x, out_x in zip(self.elem, out):
getattr(x.ufuncs, name)(out=out_x, **kwargs)
return out
elif n_out == 2:
def wrapper(self, out1=None, out2=None, **kwargs):
if out1 is None:
out1 = self.elem.space.element()
if out2 is None:
out2 = self.elem.space.element()
for x, out1_x, out2_x in zip(self.elem, out1, out2):
getattr(x.ufuncs, name)(out1=out1_x, out2=out2_x, **kwargs)
return out1, out2
else:
raise NotImplementedError
elif n_in == 2:
if n_out == 1:
def wrapper(self, x2, out=None, **kwargs):
if x2 in self.elem.space:
if out is None:
result = [getattr(x.ufuncs, name)(x2p, **kwargs)
for x, x2p in zip(self.elem, x2)]
return self.elem.space.element(result)
else:
for x, x2p, outp in zip(self.elem, x2, out):
getattr(x.ufuncs, name)(x2p, out=outp, **kwargs)
return out
else:
if out is None:
result = [getattr(x.ufuncs, name)(x2, **kwargs)
for x in self.elem]
return self.elem.space.element(result)
else:
for x, outp in zip(self.elem, out):
getattr(x.ufuncs, name)(x2, out=outp, **kwargs)
return out
else:
raise NotImplementedError
else:
raise NotImplementedError
wrapper.__name__ = wrapper.__qualname__ = name
wrapper.__doc__ = doc
return wrapper
class ProductSpaceUfuncs(object):
"""Ufuncs for `ProductSpaceElement` objects.
Internal object, should not be created except in `ProductSpaceElement`.
"""
def __init__(self, elem):
"""Create ufunc wrapper for ``elem``."""
self.elem = elem
def sum(self):
"""Return the sum of ``self``.
See Also
--------
numpy.sum
prod
"""
results = [x.ufuncs.sum() for x in self.elem]
return np.sum(results)
def prod(self):
"""Return the product of ``self``.
See Also
--------
numpy.prod
sum
"""
results = [x.ufuncs.prod() for x in self.elem]
return np.prod(results)
def min(self):
"""Return the minimum of ``self``.
See Also
--------
numpy.amin
max
"""
results = [x.ufuncs.min() for x in self.elem]
return np.min(results)
def max(self):
"""Return the maximum of ``self``.
See Also
--------
numpy.amax
min
"""
results = [x.ufuncs.max() for x in self.elem]
return np.max(results)
# Add ufunc methods to ufunc class
for name, n_in, n_out, doc in UFUNCS:
method = wrap_ufunc_productspace(name, n_in, n_out, doc)
setattr(ProductSpaceUfuncs, name, method)
| aringh/odl | odl/util/ufuncs.py | Python | mpl-2.0 | 9,467 |
import re
from urllib.parse import ParseResult, urlencode, urlparse
import requests
from recommendation.memorize import memorize
from recommendation.search.classification.base import BaseClassifier
from recommendation.util import image_url
class MovieClassifier(BaseClassifier):
"""
Classifier that is applied if the returned result is a Wikipedia article.
Adds:
abstract - an excerpt from the Wikipedia article.
slug - the article's URL slug.
title - the article's title.
"""
api_url = 'http://www.omdbapi.com/'
type = 'movie'
title_match = '\/title\/([A-Za-z0-9]+)\/'
def _get_imdb_id(self, all_results):
"""
Gets an appropriate IMDb ID for the search result set.
"""
for result in all_results:
url = urlparse(result['url'])
if self._url_is_imdb(url):
return re.search(self.title_match, url.path).group(1)
return None
def _url_is_imdb(self, url):
"""
Passed a ParseResult instance, returns True if the URL is that of a
movie or TV show on IMDB.
"""
if not isinstance(url, ParseResult):
url = urlparse(url)
return (url.netloc.endswith('imdb.com') and
len(re.search(self.title_match, url.path).groups()) > 0)
def is_match(self, best_result, all_results):
"""
Matches if any result in the full set is an IMDB detail page.
"""
for result in all_results:
if self._url_is_imdb(result['url']):
return True
return False
def _api_url(self, all_results):
"""
Passed a set of results, determines the appropriate API URL.
"""
return '%s?%s' % (self.api_url, urlencode({
'i': self._get_imdb_id(all_results),
'plot': 'short',
'r': 'json',
}))
@memorize(prefix='omdb')
def _api_response(self, all_results):
"""
Passed a set of results, returns the parsed JSON for an appropriate API
request for those results.
"""
return requests.get(self._api_url(all_results)).json()
def _stars(self, score, max_score):
"""
Passed a score and maximum score, normalizes to a 0-5 scale.
"""
return float(score) / float(max_score) * 5
def _score(self, score, max_score):
"""
Passed a score and maximum score, returns a dict containing both the
raw score and a normalized one.
"""
if score == 'N/A':
return None
return {
'raw': float(score),
'stars': self._stars(score, max_score)
}
def enhance(self):
data = self._api_response(self.all_results)
return {
'is_movie': data.get('Type') == 'movie',
'is_series': data.get('Type') == 'series',
'title': data.get('Title'),
'year': data.get('Year'),
'plot': data.get('Plot'),
'poster': image_url(data.get('Poster')),
'rating': {
'imdb': self._score(data.get('imdbRating'), 10),
'metacritic': self._score(data.get('Metascore'), 100)
},
'imdb_url': 'http://www.imdb.com/title/%s/' % data.get('imdbID'),
'genre': data.get('Genre'),
'runtime': data.get('Runtime')
}
| mozilla/universal-search-recommendation | recommendation/search/classification/movies.py | Python | mpl-2.0 | 3,414 |
"""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import random
import re
import urllib
import urllib2
from commander.deploy import commands, task, hostgroups
import commander_settings as settings
PYTHON = getattr(settings, 'PYTHON_PATH', 'python2.6')
NEW_RELIC_API_KEY = getattr(settings, 'NEW_RELIC_API_KEY', None)
NEW_RELIC_APP_ID = getattr(settings, 'NEW_RELIC_APP_ID', None)
NEW_RELIC_URL = 'https://rpm.newrelic.com/deployments.xml'
GITHUB_URL = 'https://github.com/mozilla/bedrock/compare/{oldrev}...{newrev}'
def management_cmd(ctx, cmd):
"""Run a Django management command correctly."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('LANG=en_US.UTF-8 {0} manage.py {1}'.format(PYTHON, cmd))
@task
def reload_crond(ctx):
ctx.local("killall -SIGHUP crond")
@task
def update_code(ctx, tag):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git fetch --all")
ctx.local("git checkout -f %s" % tag)
ctx.local("git submodule sync")
ctx.local("git submodule update --init --recursive")
ctx.local("find . -name '*.pyc' -delete")
@task
def update_locales(ctx):
with ctx.lcd(os.path.join(settings.SRC_DIR, 'locale')):
ctx.local("svn up")
@task
def update_assets(ctx):
management_cmd(ctx, 'compress_assets')
management_cmd(ctx, 'update_product_details')
management_cmd(ctx, 'update_externalfiles')
management_cmd(ctx, 'collectstatic --noinput')
@task
def update_revision_file(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("mv media/revision.txt media/prev-revision.txt")
ctx.local("git rev-parse HEAD > media/revision.txt")
@task
def database(ctx):
management_cmd(ctx, 'syncdb --migrate --noinput')
@task
def checkin_changes(ctx):
ctx.local(settings.DEPLOY_SCRIPT)
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy_app(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
# ctx.remote("/bin/touch %s" % settings.REMOTE_WSGI)
ctx.remote("service httpd graceful")
@task
def update_info(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("date")
ctx.local("git branch")
ctx.local("git log -3")
ctx.local("git status")
ctx.local("git submodule status")
with ctx.lcd("locale"):
ctx.local("svn info")
ctx.local("svn status")
management_cmd(ctx, 'migrate --list')
@task
def ping_newrelic(ctx):
if NEW_RELIC_API_KEY and NEW_RELIC_APP_ID:
with ctx.lcd(settings.SRC_DIR):
oldrev = ctx.local('cat media/prev-revision.txt').out.strip()
newrev = ctx.local('cat media/revision.txt').out.strip()
log_cmd = 'git log --oneline {0}..{1}'.format(oldrev, newrev)
changelog = ctx.local(log_cmd).out.strip()
print 'Post deployment to New Relic'
desc = generate_desc(oldrev, newrev, changelog)
if changelog:
github_url = GITHUB_URL.format(oldrev=oldrev, newrev=newrev)
changelog = '{0}\n\n{1}'.format(changelog, github_url)
data = urllib.urlencode({
'deployment[description]': desc,
'deployment[revision]': newrev,
'deployment[app_id]': NEW_RELIC_APP_ID,
'deployment[changelog]': changelog,
})
headers = {'x-api-key': NEW_RELIC_API_KEY}
try:
request = urllib2.Request(NEW_RELIC_URL, data, headers)
urllib2.urlopen(request)
except urllib2.URLError as exp:
print 'Error notifying New Relic: {0}'.format(exp)
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
commands['update_code'](ref)
commands['update_info']()
@task
def update(ctx):
commands['database']()
commands['update_assets']()
commands['update_locales']()
commands['update_revision_file']()
commands['reload_crond']()
@task
def deploy(ctx):
if 'update_cron' in commands:
commands['update_cron']()
commands['checkin_changes']()
commands['deploy_app']()
commands['ping_newrelic']()
@task
def update_bedrock(ctx, tag):
"""Do typical bedrock update"""
commands['pre_update'](tag)
commands['update']()
# utility functions #
# shamelessly stolen from https://github.com/mythmon/chief-james/
def get_random_desc():
return random.choice([
'No bugfixes--must be adding infinite loops.',
'No bugfixes--must be rot13ing function names for code security.',
'No bugfixes--must be demonstrating our elite push technology.',
'No bugfixes--must be testing james.',
])
def extract_bugs(changelog):
"""Takes output from git log --oneline and extracts bug numbers"""
bug_regexp = re.compile(r'\bbug (\d+)\b', re.I)
bugs = set()
for line in changelog:
for bug in bug_regexp.findall(line):
bugs.add(bug)
return sorted(list(bugs))
def generate_desc(from_commit, to_commit, changelog):
"""Figures out a good description based on what we're pushing out."""
if from_commit.startswith(to_commit):
desc = 'Pushing {0} again'.format(to_commit)
else:
bugs = extract_bugs(changelog.split('\n'))
if bugs:
bugs = ['bug #{0}'.format(bug) for bug in bugs]
desc = 'Fixing: {0}'.format(', '.join(bugs))
else:
desc = get_random_desc()
return desc
def generate_cron_file(ctx, tmpl_name):
with ctx.lcd(settings.SRC_DIR):
ctx.local("{python} bin/gen-crons.py -p {python} -s {src_dir} -w {www_dir} "
"-t {template}".format(python=PYTHON,
src_dir=settings.SRC_DIR,
www_dir=settings.WWW_DIR,
template=tmpl_name))
| amjadm61/bedrock | bin/update/deploy_base.py | Python | mpl-2.0 | 5,911 |
import pytest
import os
import escpos.printer
from random import choice
from unittest.mock import MagicMock
import app.views.core
import app.printer
import app.database
import app.utils
from .. import NAMES, TEST_REPEATS, fill_tickets, do_until_truthy
from app.middleware import db
from app.utils import absolute_path
from app.database import (Task, Office, Serial, Settings, Touch_store, Display_store,
Printer, Aliases)
@pytest.mark.usefixtures('c')
def test_welcome_root_and_login(c):
response = c.post('/log/a', follow_redirects=True)
page_content = response.data.decode('utf-8')
assert response.status == '200 OK'
assert 'Free Queue Manager' in page_content
@pytest.mark.usefixtures('c')
def test_new_registered_ticket(c):
touch_screen_settings = Touch_store.query.first()
touch_screen_settings.n = True
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING REGISTERED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
@pytest.mark.usefixtures('c')
def test_new_noisy_registered_ticket(c):
touch_screen_settings = Touch_store.query.first()
touch_screen_settings.n = True
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = '0002020000'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name[3:]
@pytest.mark.usefixtures('c')
def test_new_printed_ticket(c, monkeypatch):
last_ticket = None
mock_printer = MagicMock()
monkeypatch.setattr(escpos.printer, 'Usb', mock_printer)
# NOTE: set ticket setting to printed
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
touch_screen_settings.n = False
printer_settings.vendor = 150
printer_settings.product = 3
printer_settings.in_ep = 170
printer_settings.out_ep = 170
header = printer_settings.header = 'testing header'
sub = printer_settings.sub = 'testing sub-header'
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
office = new_ticket.office
tickets = Serial.all_office_tickets(office.id, desc=False)\
.filter(Serial.number != new_ticket.number)
cur_ticket = tickets.first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
assert mock_printer().text.call_count == 12
assert mock_printer().set.call_count == 7
mock_printer().set.assert_called_with(align='left', height=1, width=1)
mock_printer().cut.assert_called_once()
mock_printer().text.assert_any_call(f'{header}\n')
mock_printer().text.assert_any_call(f'\n{sub}\n')
mock_printer().text.assert_any_call(f'\nOffice : {office.prefix}{office.name}\n')
mock_printer().text.assert_any_call(f'\n{office.prefix}.{new_ticket.number}\n')
mock_printer().text.assert_any_call(f'\nTickets ahead : {tickets.count()}\n')
mock_printer().text.assert_any_call(f'\nTask : {new_ticket.task.name}\n')
mock_printer().text.assert_any_call(
f'\nCurrent ticket : {office.prefix}.{cur_ticket and cur_ticket.number}\n')
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_with_aliases(c, monkeypatch):
last_ticket = None
mock_printer = MagicMock()
monkeypatch.setattr(escpos.printer, 'Usb', mock_printer)
# NOTE: set ticket setting to printed
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
touch_screen_settings.n = False
printer_settings.vendor = 150
printer_settings.product = 3
printer_settings.in_ep = 170
printer_settings.out_ep = 170
# NOTE: setting aliases
office_alt = 'Department'
task_alt = 'Mission'
ticket_alt = 'Token'
aliases = Aliases.get()
aliases.office = office_alt
aliases.task = task_alt
aliases.ticket = ticket_alt
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
office = new_ticket.office
tickets = Serial.all_office_tickets(office.id, desc=False)\
.filter(Serial.number != new_ticket.number)
cur_ticket = tickets.first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
assert mock_printer().text.call_count == 12
assert mock_printer().set.call_count == 7
mock_printer().set.assert_called_with(align='left', height=1, width=1)
mock_printer().cut.assert_called_once()
mock_printer().text.assert_any_call(f'\n{office_alt} : {office.prefix}{office.name}\n')
mock_printer().text.assert_any_call(f'\n{office.prefix}.{new_ticket.number}\n')
mock_printer().text.assert_any_call(f'\n{ticket_alt}s ahead : {tickets.count()}\n')
mock_printer().text.assert_any_call(f'\n{task_alt} : {new_ticket.task.name}\n')
mock_printer().text.assert_any_call(
f'\nCurrent {ticket_alt.lower()} : {office.prefix}.{cur_ticket and cur_ticket.number}\n')
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_windows(c, monkeypatch):
last_ticket = None
printer_name = 'testing_printer'
printer_path = 'testing_path'
printer_full_path = os.path.join(os.getcwd(), f'{printer_path}.txt')
mock_uuid = MagicMock()
mock_uuid.uuid4 = MagicMock(return_value=printer_path)
mock_os = MagicMock()
mock_os.name = 'nt'
mock_system = MagicMock()
monkeypatch.setattr(app.database, 'os', mock_os)
monkeypatch.setattr(app.printer, 'name', 'nt')
monkeypatch.setattr(app.printer, 'uuid', mock_uuid)
monkeypatch.setattr(app.printer, 'system', mock_system)
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
touch_screen_settings.n = False
printer_settings.name = printer_name
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
mock_system.assert_called_once_with(
f'print /D:\\\localhost\\"{printer_name}" "{printer_full_path}"') # noqa
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_lp(c, monkeypatch):
last_ticket = None
printer_name = 'testing_printer'
printer_path = 'testing_path'
printer_full_path = os.path.join(os.getcwd(), f'{printer_path}.txt')
mock_uuid = MagicMock()
mock_uuid.uuid4 = MagicMock(return_value=printer_path)
mock_os = MagicMock()
mock_os.name = 'linux'
mock_system = MagicMock()
monkeypatch.setattr(app.views.core, 'os', mock_os)
monkeypatch.setattr(app.printer, 'name', 'linux')
monkeypatch.setattr(app.printer, 'uuid', mock_uuid)
monkeypatch.setattr(app.printer, 'system', mock_system)
settings = Settings.get()
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
settings.lp_printing = True
touch_screen_settings.n = False
printer_settings.name = printer_name
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
mock_system.assert_called_once_with(
f'lp -d "{printer_name}" -o raw "{printer_full_path}"')
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_arabic(c, monkeypatch):
last_ticket = None
mock_printer = MagicMock()
image_path = os.path.join(os.getcwd(), 'dummy.jpg')
mock_pil = MagicMock()
mock_pil.truetype().getsize.return_value = (0, 0)
mock_pos = MagicMock()
mock_pos().output = b''
monkeypatch.setattr(escpos.printer, 'Usb', mock_printer)
monkeypatch.setattr(app.printer, 'ImageDraw', mock_pil)
monkeypatch.setattr(app.printer, 'Image', mock_pil)
monkeypatch.setattr(app.printer, 'ImageFont', mock_pil)
monkeypatch.setattr(app.printer, 'Dummy', mock_pos)
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
touch_screen_settings.n = False
printer_settings.vendor = 150
printer_settings.product = 3
printer_settings.in_ep = 170
printer_settings.out_ep = 170
printer_settings.langu = 'ar'
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
assert mock_printer().text.call_count == 1
mock_printer().cut.assert_called_once()
mock_printer().close.assert_called_once()
mock_printer().image.assert_called_once_with(image_path,
fragment_height=580,
high_density_vertical=True)
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_windows_arabic(c, monkeypatch):
last_ticket = None
printer_name = 'testing_printer'
printer_path = 'testing_path'
printer_full_path = os.path.join(os.getcwd(), f'{printer_path}.txt')
mock_uuid = MagicMock()
mock_uuid.uuid4 = MagicMock(return_value=printer_path)
mock_os = MagicMock()
mock_os.name = 'nt'
mock_system = MagicMock()
mock_pil = MagicMock()
mock_pil.truetype().getsize.return_value = (0, 0)
mock_pos = MagicMock()
mock_pos().output = b''
monkeypatch.setattr(app.database, 'os', mock_os)
monkeypatch.setattr(app.printer, 'name', 'nt')
monkeypatch.setattr(app.printer, 'uuid', mock_uuid)
monkeypatch.setattr(app.printer, 'system', mock_system)
monkeypatch.setattr(app.printer, 'ImageDraw', mock_pil)
monkeypatch.setattr(app.printer, 'Image', mock_pil)
monkeypatch.setattr(app.printer, 'ImageFont', mock_pil)
monkeypatch.setattr(app.printer, 'Dummy', mock_pos)
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
touch_screen_settings.n = False
printer_settings.name = printer_name
printer_settings.langu = 'ar'
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
mock_system.assert_called_once_with(
f'print /D:\\\localhost\\"{printer_name}" "{printer_full_path}"') # noqa
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_lp_arabic(c, monkeypatch):
last_ticket = None
printer_name = 'testing_printer'
printer_path = 'testing_path'
printer_full_path = os.path.join(os.getcwd(), f'{printer_path}.txt')
mock_uuid = MagicMock()
mock_uuid.uuid4 = MagicMock(return_value=printer_path)
mock_os = MagicMock()
mock_os.name = 'linux'
mock_system = MagicMock()
mock_pil = MagicMock()
mock_pil.truetype().getsize.return_value = (0, 0)
mock_pos = MagicMock()
mock_pos().output = b''
monkeypatch.setattr(app.views.core, 'os', mock_os)
monkeypatch.setattr(app.printer, 'name', 'linux')
monkeypatch.setattr(app.printer, 'uuid', mock_uuid)
monkeypatch.setattr(app.printer, 'system', mock_system)
monkeypatch.setattr(app.printer, 'ImageDraw', mock_pil)
monkeypatch.setattr(app.printer, 'Image', mock_pil)
monkeypatch.setattr(app.printer, 'ImageFont', mock_pil)
monkeypatch.setattr(app.printer, 'Dummy', mock_pos)
settings = Settings.get()
printer_settings = Printer.get()
touch_screen_settings = Touch_store.get()
settings.lp_printing = True
touch_screen_settings.n = False
printer_settings.name = printer_name
printer_settings.langu = 'ar'
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
name = 'TESTING PRINTED TICKET'
response = c.post(f'/serial/{task.id}', data={
'name': name
}, follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
assert response.status == '200 OK'
assert last_ticket.number != new_ticket.number
assert new_ticket.name == name
mock_system.assert_called_once_with(
f'lp -d "{printer_name}" -o raw "{printer_full_path}"')
@pytest.mark.usefixtures('c')
def test_new_printed_ticket_fail(c):
touch_screen_settings = Touch_store.query.first()
touch_screen_settings.n = False
db.session.commit()
task = choice(Task.query.all())
last_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
response = c.post(f'/serial/{task.id}', follow_redirects=True)
new_ticket = Serial.query.filter_by(task_id=task.id)\
.order_by(Serial.number.desc()).first()
with open(absolute_path('errors.log'), 'r') as errors_log:
errors_log_content = errors_log.read()
assert response.status == '200 OK'
assert new_ticket.id == last_ticket.id
assert 'escpos.exceptions.USBNotFoundError: USB device not found' in errors_log_content
@pytest.mark.usefixtures('c')
def test_reset_office(c):
ticket = Serial.query.order_by(Serial.number.desc()).first()
office = Office.get(ticket.office_id)
tickets = Serial.query.filter_by(office_id=office.id).all()
response = c.get(f'/serial_r/{office.id}', follow_redirects=True)
assert response.status == '200 OK'
assert Serial.query.filter_by(office_id=office.id).count() != len(tickets)
assert Serial.query.filter(Serial.office_id == office.id, Serial.number != 100)\
.count() == 0
@pytest.mark.usefixtures('c')
def test_reset_task(c):
task = Task.query.first()
office = choice(task.offices)
def getter():
tickets = Serial.query.filter_by(office_id=office.id,
task_id=task.id)\
.all()
return len(tickets) > 1 and tickets
tickets = do_until_truthy(fill_tickets, getter)
response = c.get(f'/serial_rt/{task.id}/{office.id}', follow_redirects=True)
assert response.status == '200 OK'
assert Serial.query.filter_by(task_id=task.id).count() != len(tickets)
assert Serial.query.filter(Serial.task_id == task.id, Serial.number != 100)\
.count() == 0
@pytest.mark.usefixtures('c')
def test_reset_all(c):
all_tickets = Serial.query.all()
response = c.get('/serial_ra', follow_redirects=True)
assert response.status == '200 OK'
assert Serial.query.count() != len(all_tickets)
assert Serial.query.count() == Task.query.count()
@pytest.mark.usefixtures('c')
@pytest.mark.parametrize('_', range(TEST_REPEATS))
def test_generate_new_tickets(_, c):
touch_screen_settings = Touch_store.query.first()
touch_screen_settings.n = True
db.session.commit()
tickets_before = Serial.query.order_by(Serial.number.desc()).all()
last_ticket = Serial.query.order_by(Serial.number.desc()).first()
random_task = choice(Task.query.all())
name = choice(NAMES)
response = c.post(f'/serial/{random_task.id}', data={
'name': name
}, follow_redirects=True)
assert response.status == '200 OK'
assert Serial.query.count() > len(tickets_before)
assert Serial.query.order_by(Serial.number.desc())\
.first()\
.number == (last_ticket.number + 1)
@pytest.mark.parametrize('_', range(TEST_REPEATS))
@pytest.mark.usefixtures('c')
def test_pull_tickets_from_all(_, c):
ticket_to_be_pulled = do_until_truthy(
fill_tickets,
lambda: Serial.query.order_by(Serial.number)
.filter(Serial.number != 100, Serial.p != True)
.first())
response = c.get(f'/pull', follow_redirects=True)
assert response.status == '200 OK'
assert ticket_to_be_pulled is not None
assert ticket_to_be_pulled.p is False
assert Serial.get(ticket_to_be_pulled.id).number == ticket_to_be_pulled.number
assert Serial.get(ticket_to_be_pulled.id).p is True
@pytest.mark.parametrize('_', range(TEST_REPEATS))
@pytest.mark.usefixtures('c')
def test_pull_random_ticket(_, c):
ticket = choice(Serial.query.filter_by(n=False)
.limit(10)
.all())
office = choice(ticket.task.offices)
c.get(f'/pull_unordered/{ticket.id}/testing/{office.id}')
assert Serial.query.filter_by(id=ticket.id).first().p is True
@pytest.mark.parametrize('_', range(TEST_REPEATS))
@pytest.mark.usefixtures('c')
def test_pull_tickets_from_common_task(_, c):
settings = Settings.get()
settings.strict_pulling = False
db.session.commit()
task = Task.get_first_common()
office = choice(task.offices)
ticket_to_be_pulled = do_until_truthy(
fill_tickets,
lambda: Serial.query.order_by(Serial.number)
.filter(Serial.number != 100,
Serial.p != True,
Serial.task_id == task.id)
.first())
response = c.get(f'/pull/{task.id}/{office.id}', follow_redirects=True)
pulled_ticket = Serial.get(ticket_to_be_pulled.id)
assert response.status == '200 OK'
assert ticket_to_be_pulled is not None
assert ticket_to_be_pulled.p is False
assert pulled_ticket is not None
assert pulled_ticket.task_id == task.id
assert pulled_ticket.office_id == office.id
@pytest.mark.parametrize('_', range(TEST_REPEATS))
@pytest.mark.usefixtures('c')
def test_pull_common_task_strict_pulling(_, c):
def getter():
tickets = Serial.query.order_by(Serial.number)\
.filter(Serial.number != 100, Serial.p != True)\
.all()
for ticket in tickets:
task = Task.get(ticket.task_id)
office = Office.get(ticket.office_id)
if task.common:
return ticket, office, task
ticket_to_be_pulled, office, task = do_until_truthy(fill_tickets, getter)
response = c.get(f'/pull/{task.id}/{office.id}', follow_redirects=True)
pulled_ticket = Serial.query.filter_by(number=ticket_to_be_pulled.number,
office_id=office.id,
task_id=task.id,
p=True)\
.order_by(Serial.number)\
.first()
assert response.status == '200 OK'
assert pulled_ticket is not None
assert pulled_ticket.task_id == task.id
assert pulled_ticket.office_id == office.id
@pytest.mark.usefixtures('c')
def test_pull_ticket_on_hold(c):
ticket_to_be_pulled = None
ticket_to_be_pulled = Serial.query.order_by(Serial.number)\
.filter(Serial.number != 100, Serial.p != True)\
.first()
c.get(f'/on_hold/{ticket_to_be_pulled.id}/testing')
response = c.get(f'/pull', follow_redirects=True)
assert response.status == '200 OK'
assert ticket_to_be_pulled is not None
assert ticket_to_be_pulled.p is False
assert Serial.query.filter_by(number=ticket_to_be_pulled.number,
office_id=ticket_to_be_pulled.office_id,
task_id=ticket_to_be_pulled.task_id,
p=True)\
.order_by(Serial.number)\
.first() is None
@pytest.mark.usefixtures('c')
def test_feed_stream_tickets_preferences_enabled(c):
c.get('/pull', follow_redirects=True) # NOTE: initial pull to fill stacks
display_settings = Display_store.query.first()
display_settings.prefix = True
display_settings.always_show_ticket_number = True
display_settings.tmp = 3
db.session.commit()
tickets = Serial.get_waiting_list_tickets(limit=8)
processed_tickets = Serial.get_processed_tickets(limit=8, offset=1)
current_ticket = Serial.get_last_pulled_ticket()
response = c.get('/feed', follow_redirects=True)
assert response.status == '200 OK'
assert response.json.get('con') == current_ticket.office.display_text
assert response.json.get('cott') == current_ticket.task.name
assert response.json.get('cot') == current_ticket.display_text
for i, ticket in enumerate(tickets):
assert ticket.name in response.json.get(f'w{i + 1}')
assert f'{ticket.office.prefix}{ticket.number}' in response.json.get(f'w{i + 1}')
for i, ticket in enumerate(processed_tickets):
assert ticket.name in response.json.get(f'p{i + 1}')
assert f'{ticket.office.prefix}{ticket.number}' in response.json.get(f'p{i + 1}')
@pytest.mark.usefixtures('c')
def test_feed_office_with_preferences_enabled(c):
c.get('/pull', follow_redirects=True) # NOTE: initial pull to fill stacks
display_settings = Display_store.query.first()
display_settings.prefix = True
display_settings.always_show_ticket_number = True
db.session.commit()
current_ticket = Serial.get_last_pulled_ticket()
tickets = Serial.get_waiting_list_tickets(office_id=current_ticket.office.id,
limit=8)
response = c.get(f'/feed/{current_ticket.office.id}', follow_redirects=True)
assert response.status == '200 OK'
assert response.json.get('con') == current_ticket.office.display_text
assert response.json.get('cott') == current_ticket.task.name
assert response.json.get('cot') == current_ticket.display_text
for i, ticket in enumerate(tickets):
assert ticket.name in response.json.get(f'w{i + 1}')
assert f'{ticket.office.prefix}{ticket.number}' in response.json.get(f'w{i + 1}')
@pytest.mark.usefixtures('c')
def test_feed_stream_tickets_preferences_disabled(c):
c.get('/pull', follow_redirects=True) # NOTE: initial pull to fill stacks
display_settings = Display_store.query.first()
display_settings.prefix = False
display_settings.always_show_ticket_number = False
db.session.commit()
tickets = Serial.get_waiting_list_tickets(limit=8)
current_ticket = Serial.get_last_pulled_ticket()
response = c.get('/feed', follow_redirects=True)
assert response.status == '200 OK'
assert response.json.get('con') == current_ticket.office.display_text
assert response.json.get('cott') == current_ticket.task.name
assert response.json.get('cot') == current_ticket.display_text
for i, ticket in enumerate(tickets):
assert ticket.name in response.json.get(f'w{i + 1}')
assert f'{ticket.office.prefix}{ticket.number}' not in response.json.get(f'w{i + 1}')
@pytest.mark.usefixtures('c')
def test_display_screen(c):
display_settings = Display_store.query.first()
response = c.get('/display', follow_redirects=True)
page_content = response.data.decode('utf-8')
assert display_settings.title in page_content
@pytest.mark.usefixtures('c')
def test_touch_screen(c):
touch_screen_settings = Touch_store.query.first()
tasks = Task.query.all()
response = c.get('/touch/0', follow_redirects=True)
page_content = response.data.decode('utf-8')
assert touch_screen_settings.title in page_content
for task in tasks:
assert task.name in page_content
@pytest.mark.usefixtures('c')
def test_touch_screen_office(c):
office = choice(Office.query.all())
touch_screen_settings = Touch_store.query.first()
tasks = Task.query.filter(Task.offices.contains(office))
response = c.get(f'/touch/0/{office.id}', follow_redirects=True)
page_content = response.data.decode('utf-8')
assert touch_screen_settings.title in page_content
for task in tasks:
assert task.name in page_content
@pytest.mark.usefixtures('c')
def test_toggle_setting(c):
setting = 'visual_effects'
setting_value = getattr(Settings.get(), setting)
c.get(f'/settings/{setting}/testing')
assert getattr(Settings.get(), setting) == (not setting_value)
@pytest.mark.usefixtures('c')
def test_repeat_announcement(c):
c.get('/set_repeat_announcement/0')
assert Display_store.get().r_announcement is False
assert c.get('/repeat_announcement').json.get('status') is False
c.get('/set_repeat_announcement/1')
assert Display_store.get().r_announcement is True
assert c.get('/repeat_announcement').json.get('status') is True
| mrf345/FQM | tests/views/core.py | Python | mpl-2.0 | 27,596 |
#!/usr/bin/env python
# Copyright (C) 2008, Vincent Povirk for CodeWeavers
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
try:
from sugar.activity import bundlebuilder
bundlebuilder.start()
except ImportError:
import os
info = {}
f = open(os.path.join('activity', 'activity.info'), 'U')
for line in f.readlines():
if '=' in line:
key, value = line.rstrip('\n').split('=')
info[key.strip()] = value.strip()
f.close()
name = info['name']
version = int(info['activity_version'])
archive_name = '%s-%s.xo' % (name, version)
activity_dir = '%s.activity' % name
f = open('MANIFEST', 'w')
for path, dirs, files in os.walk('.'):
if path.startswith('./'): path = path[2:]
elif path == '.': path = ''
for filename in files:
if filename == 'MANIFEST':
continue
f.write('%s\n' % os.path.join(path, filename))
f.close()
# we can't use zipfile because it doesn't preserve permissions *grumble grumble*
os.chdir('..')
os.system('zip -r %s %s' % (archive_name, activity_dir))
os.system('mv %s ./%s' % (archive_name, activity_dir))
os.chdir(activity_dir)
| cristian99garcia/Wine.activity | setup.py | Python | mpl-2.0 | 1,904 |
# Copyright 2021 Tecnativa - Carlos Roca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class Company(models.Model):
_inherit = "res.company"
barcode_default_format = fields.Selection(
[("gs1_128", "Display GS1_128 format for barcodes")],
string="Method to choose the barcode formating",
)
barcode_default_report = fields.Many2one(
comodel_name="ir.actions.report",
string="Default template for barcode labels",
domain=[("is_barcode_label", "=", True)],
)
| OCA/stock-logistics-barcode | stock_picking_product_barcode_report/models/res_company.py | Python | agpl-3.0 | 574 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2014 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm, fields
import re
class FinancingSource(orm.Model):
_name = 'financing.source'
_description = 'Financing Source'
def _calc_grant(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for data in self.browse(cr, uid, ids, context=context):
res[data.id] = (data.grant_without_overheads + data.overheads +
data.transfered)
return res
def _calc_transfered(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for data in self.browse(cr, uid, ids, context=context):
total = 0
for line in data.transfer_fund_origin_ids:
total = total - line.amount
for line in data.transfer_fund_target_ids:
total = total + line.amount
res[data.id] = total
return res
def _calc_total_allocated(self, cr, uid, ids, field_names, arg=None,
context=None):
res = {}
analytic_line_obj = self.pool['account.analytic.line']
for data in self.browse(cr, uid, ids, context=context):
cond = [('financing_source_id', '=', data.id)]
account_analytic_line_parent_ids = analytic_line_obj.search(
cr, uid, cond, context=context)
cond = [('account_analytic_line_financing_source_id', 'in',
account_analytic_line_parent_ids),
('type', 'in', ['initial_financial_source',
'modif_financial_source'])]
analytic_line_ids = analytic_line_obj.search(cr, uid, cond,
context=context)
assigned = 0
for line in analytic_line_obj.browse(cr, uid, analytic_line_ids,
context):
assigned = assigned + line.assigned
res[data.id] = assigned
return res
def _calc_total_allocated_percent(self, cr, uid, ids, field_names,
arg=None, context=None):
res = {}
for data in self.browse(cr, uid, ids, context=context):
res[data.id] = 0
if data.grant != 0:
res[data.id] = data.total_allocated / data.grant * 100
return res
def _calc_pending_allocation(self, cr, uid, ids, field_names, arg=None,
context=None):
res = {}
for data in self.browse(cr, uid, ids, context=context):
res[data.id] = data.grant - data.total_allocated
return res
def _check_pending_allocation(self, cr, uid, ids, context=None):
for data in self.browse(cr, uid, ids, context=context):
if data.pending_allocation < 0:
return False
return True
def _calc_pending_allocation_percent(self, cr, uid, ids, field_names,
arg=None, context=None):
res = {}
for data in self.browse(cr, uid, ids, context=context):
res[data.id] = 0
if data.grant != 0:
res[data.id] = data.pending_allocation / data.grant * 100
return res
_columns = {
# CAMPOS BASE (TAREA 2.1.A)
# Nombre
'name': fields.char('Name', size=128, required=True),
# Organismo Financiador
'res_partner_id': fields.many2one('res.partner', 'Financing Organism',
required=True,
domain="[('funder', '=', '1'), "
"('customer', '=', '1')]"),
# Soporte Jurídico
'legal_support_id': fields.many2one('legal.support', 'Legal Support'),
# Código Convocatoria
'code_call_id': fields.many2one('code.call', 'Code Call'),
# Código expediente organismo financiador
'funder_record_code': fields.char('Funder Record Code', size=64),
# Tipo de Financiación
'financing_type_id': fields.many2one('financing.type',
'Financing Type'),
# Sistema de disposición de fondos
'availability_fund': fields.selection([('granted', 'Granted'),
('accepted', 'Accepted'),
('charged', 'Charged')],
string="Availability Fund",
required=True),
# Fecha de concesión
'grant_date': fields.date('Grant Date'),
# Proyectos
'project_ids':
fields.one2many('project.financing', 'financing_source_id',
'Projects', readonly=True),
# Fecha Elegibilidad Desde
'eligibility_date_from': fields.date('Eligibility Date From'),
# Fecha Elegibilidad HAsta
'eligibility_date_to': fields.date('Eligibility Date To'),
# Fechas de justificación
'justification_date_ids':
fields.one2many('justification.date', 'financing_source_id',
'Justification Dates'),
# Importe Concedido = grant_without_overheads + overheads + transfered
'grant': fields.function(_calc_grant, method=True, string='Grant',
type="float", store=False),
# Concedido sin Overheads
'grant_without_overheads': fields.integer('Grant Without Overheads'),
# Overheads
'overheads': fields.integer('Overheads'),
# Transferido = Sumatorio de Traspasos entre Fuentes
'transfered':
fields.function(_calc_transfered, method=True, string='Transfered',
type="float", store=False),
# Total Asignado = Sumatorio de Asignados de Líneas Analíticas
'total_allocated':
fields.function(_calc_total_allocated, method=True,
string='Total Allocated', type="float",
store=False),
# Total Asignado % = total_allocated / grant * 100
'total_allocated_percent':
fields.function(_calc_total_allocated_percent, method=True,
string='%', type="float",
store=False),
# Pendiente Asignación = grant - total_allocated
'pending_allocation':
fields.function(_calc_pending_allocation, method=True,
string='Pending Allocation', type="float",
store=False),
# Pendiente Asignación % = pending_allocation / grant * 100
'pending_allocation_percent':
fields.function(_calc_pending_allocation_percent, method=True,
string='%', type="float",
store=False),
# Reconocimientos de derecho
'right_recognition_ids':
fields.one2many('right.recognition', 'financing_source_id',
'Right Recognitions'),
# Fondo Financiador de Ingresos
'financier_fund_income_id':
fields.many2one('financier.fund', 'Financier Fund Income'),
# Fondo Financiador de Gastos
'financier_fund_expense_id':
fields.many2one('financier.fund', 'Financier Fund Expense'),
# historico de las prorrogas por determinada fuente de financiacion
'historical_extension_ids':
fields.one2many('historical.extension', 'historical_extension_id',
'Historical_Extension', readonly=False),
# Campo para observaciones
'observations': fields.text('observations'),
# Traspaso Fuente - Origen
'transfer_fund_origin_ids':
fields.one2many('transfer.fund', 'financing_source_origin_id',
'Transfer Funds Origin', readonly=True),
# Traspaso Fuente - Target
'transfer_fund_target_ids':
fields.one2many('transfer.fund', 'financing_source_target_id',
'Transfer Funds Target', readonly=True),
}
_constraints = [
(_check_pending_allocation,
'Field Pending Allocation must be positive', ['pending_allocation']),
]
# OnChange cuando se cambia el Organismo Financiador
def onchange_res_partner_id(self, cr, uid, ids, name, res_partner_id):
partner_obj = self.pool['res.partner']
data = {}
if res_partner_id:
ref = ""
res_partner = partner_obj.browse(cr, uid, res_partner_id)
ref = res_partner.ref
if name:
match = re.search('\[([\w]+)\]\s*([\w\s\W]*)', name)
if match:
name = match.group(2).strip()
if ref:
name = '[' + ref + '] ' + name
else:
if ref:
name = '[' + ref + ']'
data = {'name': name
}
return {'value': data}
| avanzosc/UPV | onglibre_financial_source/models/financing_source.py | Python | agpl-3.0 | 10,092 |
import json
from sopn_parsing.helpers.text_helpers import NoTextInDocumentError, clean_text
from sopn_parsing.models import ParsedSOPN
def extract_ballot_table(ballot, parse_flavor="lattice"):
"""
Given a OfficialDocument model, update or create a ParsedSOPN model with the
contents of the table as a JSON string.
:type ballot: candidates.models.Ballot
"""
import camelot # import here to avoid import error running tests without pdf deps installed
document = ballot.sopn
if not document.relevant_pages:
raise ValueError(
"Pages for table not known for document, extract page numbers first"
)
try:
tables = camelot.read_pdf(
document.uploaded_file.path,
pages=document.relevant_pages,
flavor=parse_flavor,
)
except (NotImplementedError, AttributeError):
# * NotImplementedError is thrown if the PDF is an image or generally
# unreadable.
# * AttributeError is thrown on some PDFs saying they need a password.
# Assume this is a bug in camelot, and ignore these PDFs
raise NoTextInDocumentError()
# Tables can span pages, camelot assumes they're different tables, so we
# need to join them back together
table_list = []
for table in tables:
table_list.append(table)
table_list.sort(key=lambda t: (t.page, t.order))
if not table_list:
return
table_data = table_list.pop(0).df
for table in table_list:
# It's possible to have the "situation of poll" document on the SOPN
# Ignore any table that contains "polling station" (SOPNs tables don't)
first_row = table.df.iloc[0].to_string()
if "polling station" in clean_text(first_row):
break
# Append the continuation table to the first one in the document.
# ignore_index is needed so the e.g table 2 row 1 doesn't replace
# table 1 row 1
table_data = table_data.append(table.df, ignore_index=True)
if not table_data.empty:
parsed, _ = ParsedSOPN.objects.update_or_create(
sopn=document,
defaults={"raw_data": json.dumps(table_data.to_dict())},
)
return parsed
| DemocracyClub/yournextrepresentative | ynr/apps/sopn_parsing/helpers/extract_tables.py | Python | agpl-3.0 | 2,256 |
# -*- coding: utf-8 -*-
#
# This file is part of agora-tools.
# Copyright (C) 2014-2016 Agora Voting SL <[email protected]>
# agora-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora-tools. If not, see <http://www.gnu.org/licenses/>.
import json
from json import *
def serialize(data):
return json.dumps(data,
indent=4, ensure_ascii=False, sort_keys=True, separators=(',', ': '))
| agoravoting/agora-tools | utils/json_serialize.py | Python | agpl-3.0 | 900 |
#!/usr/bin/env python
"""
Script run by upload.py when a file is received.
Given an uploaded file and its summary file,
1. Archives them
2. Encrypts the archive
3. Uploads the encrypted archive to S3
4. Shreds everything
"""
import os
import sys
import subprocess
import hashlib
from datetime import datetime
import zipfile
import random
import gnupg
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import logwrapper as LOG
# Settings
# To store deployment settings, save them in a local file settings.py
# settings.py is in .gitignore, so you can safely use git
PUBLIC_KEY_ID = '' # public key to encrypt files, must be in gpg keyring
AWS_ACCESS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = 'upload-dev'
TEMPORARY_DIR = "/tmp"
MAX_NOISE_BYTES = 5 * (1024**2)
try:
from settings import *
except ImportError:
pass
# Make sure we have write access to TEMPORARY_DIR
# This is bad - drops files. At least not silently.
try:
testfn = os.path.join(TEMPORARY_DIR, "tempdirtest")
f = open(testfn, "w")
f.write("This is a test")
f.close()
os.remove(testfn)
except Exception, e:
LOG.critical(
"Failed performing file operations in TEMPORARY_DIR %s: %s"
% (TEMPORARY_DIR, e)
)
sys.exit(1)
def sha256(path):
"""
Returns the SHA256 sum of the file given by path
"""
sha256 = hashlib.sha256()
block_size = 8192
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(block_size), ''):
sha256.update(chunk)
return sha256.hexdigest()
def write_summary_file(upload, comment):
"""
Upload and comment are paths to the uploaded file and the comment file
Returns the path to the summary file
"""
# Extract filename from upload file path
filename = upload.split("/")[-1]
# Get comment
with open(comment, 'r') as f:
comment = f.read()
summary_filename = "%s-summary" % filename
sf_path = os.path.join(TEMPORARY_DIR, summary_filename)
sf = open(sf_path, 'w')
sf.write("Filename: %s\n" % filename)
sf.write(" SHA256: %s\n" % sha256(upload))
sf.write(" Comment: %s\n" % comment)
sf.close()
return sf_path
def write_noise_file():
"""
Writes a NOISE file with a random amount of random bytes
to obfuscate file size correlation
"""
noise_path = os.path.join(TEMPORARY_DIR, "NOISE")
num_bytes = random.randint(1, MAX_NOISE_BYTES)
with open(noise_path, 'w') as f:
for byte in xrange(num_bytes):
f.write('%c' % random.randint(0, 255))
return noise_path
def archive(*paths):
"""
*paths is an arbitrary number of absolute paths to files
Returns the path to the archive file
"""
# Name based on the SHA256 sum of the first file
archive_name = sha256(paths[0])[:16] + ".zip"
archive_path = os.path.join(TEMPORARY_DIR, archive_name)
zf = zipfile.ZipFile(archive_path, mode='w')
try:
for p in paths:
if os.path.isfile(p):
zf.write(p, arcname=p.split("/")[-1])
else:
LOG.warning(
"Tried to archive %s, which is not a file. Skipping."
% p )
except Exception, err:
LOG.error("Error from archive(): %s", err)
finally:
zf.close()
return archive_path
def encrypt(source_file,
destination_dir=TEMPORARY_DIR, key=PUBLIC_KEY_ID):
'''
GPG-encrypts source_file with key, saving encrypted file to destination_dir
source_file -- absolute path of file to encrypt
destination_dir -- absolute path of directory to save encrypted file in
key -- keyid of public key to use; must be in gpg keyring
Returns path to the encrypted file
'''
# Init GPG
gpg = gnupg.GPG() # Defaults to current user's $HOME/.gnupg
public_keys = gpg.list_keys()
assert key in [k['keyid'] for k in public_keys], \
"Could not find PUBLIC_KEY_ID in keyring"
# Build encrypted filename and path
e_filename = source_file.split("/")[-1] + ".gpg"
ef_path = os.path.join(destination_dir, e_filename)
# Might be easier just to do this with subprocess
# p = subprocess.Popen(
# ["gpg", "--output", ef_path, "--recipient", key, source_file],
# shell=False
# )
# if p.wait() == 0: ...
# or use subprocess.call, .check_call, .check_output, etc
try:
fp = open(source_file, 'rb')
encrypted_data = gpg.encrypt_file(
fp, # file object to encrypt
key, # public key of recipient
output=ef_path # path to encrypted file
)
fp.close()
except IOError as e:
LOG.error(e)
# Hack - unfortunately, when GPG fails to encrypt a file, it prints an
# error message to the console but does not provide a specific error
# that python-gnupg can use. So we need to double check.
assert os.path.exists(ef_path), \
"GPG encryption failed -- check the public key."
LOG.info("Encrypted %s -> %s" % (source_file, ef_path))
return ef_path
def upload_to_s3(local_file,
bucket_name=AWS_BUCKET, key_name=None, acl='private'):
'''
Uploads local_file to bucket on Amazon S3
key_name is the "filename" on Amazon S3, defaults to the local file's name
'''
# Connect to Amazon S3
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
bucket = conn.create_bucket(bucket_name)
k = Key(bucket)
# Set key, defaulting to local file's name
if key_name:
k.key = key_name
else:
k.key = local_file.split("/")[-1]
# encrypt_key=True for AES-256 encryption while at rest on Amazon's servers
k.set_contents_from_filename(local_file, encrypt_key=True)
k.set_acl(acl)
LOG.info("Uploaded %s to S3 bucket %s" % (local_file, bucket_name))
def shred(f):
'''
Securely erases f with shred
'''
process = subprocess.Popen(['shred', '-fuz', f], shell=False)
if process.wait() == 0: # wait for shred to complete, check return code
LOG.info("Shredded %s" % f)
else: # some kind of error occurred; log
LOG.error("Shredding %s failed: shred returned %s"
% (f, process.returncode))
def main():
'''
Takes command line args as absolute paths to files to handle
'''
if len(sys.argv) < 2: # program name is first arg
sys.exit("Must provide at least one file to process")
filenames = sys.argv[1:]
try:
# Write summary file
sf_path = write_summary_file(filenames[0], filenames[1])
# Write noise file
nf_path = write_noise_file()
# Archive the files
archive_path = archive(filenames[0], sf_path, nf_path)
# Encrypt the archive
ea_path = encrypt(archive_path)
# Upload to S3
upload_to_s3(ea_path)
# Shred everything
for fn in filenames:
shred(fn)
shred(sf_path)
shred(nf_path)
shred(archive_path)
shred(ea_path)
except Exception, err:
LOG.error(err)
if __name__ == "__main__": main()
| honestappalachia/haps-hidserv | htdocs/cgi-bin/dhandler.py | Python | agpl-3.0 | 7,179 |
from __future__ import annotations
import logging
import warnings
import ckan.model as model
import ckan.plugins as plugins
import ckan.plugins.toolkit as tk
from ckan.model.domain_object import DomainObjectOperation
import ckanext.syndicate.cli as cli
import ckanext.syndicate.utils as utils
from .interfaces import ISyndicate
from .types import Topic
log = logging.getLogger(__name__)
def get_syndicate_flag():
return tk.config.get("ckan.syndicate.flag", "syndicate")
class SyndicatePlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IDomainObjectModification, inherit=True)
plugins.implements(plugins.IClick)
plugins.implements(plugins.IConfigurable)
plugins.implements(ISyndicate, inherit=True)
# IConfigurable
def configure(self, config):
if tk.asbool(config.get("debug")):
warnings.filterwarnings(
"default", category=utils.SyndicationDeprecationWarning
)
# IClick
def get_commands(self):
return cli.get_commands()
# Based on ckanext-webhooks plugin
# IDomainObjectNotification & IResourceURLChange
def notify(self, entity, operation=None):
if not operation:
# This happens on IResourceURLChange
return
if not isinstance(entity, model.Package):
return
_syndicate_dataset(entity, operation)
def _get_topic(operation: str) -> Topic:
if operation == DomainObjectOperation.new:
return Topic.create
if operation == DomainObjectOperation.changed:
return Topic.update
return Topic.unknown
def _syndicate_dataset(package, operation):
topic = _get_topic(operation)
if topic is Topic.unknown:
log.debug(
"Notification topic for operation [%s] is not defined",
operation,
)
return
implementations = plugins.PluginImplementations(ISyndicate)
skipper: ISyndicate = next(iter(implementations))
for profile in utils.get_syndicate_profiles():
if skipper.skip_syndication(package, profile):
log.debug(
"Plugin %s decided to skip syndication of %s for profile %s",
skipper.name,
package.id,
profile.id,
)
continue
log.debug("Syndicate <{}> to {}".format(package.id, profile.ckan_url))
utils.syndicate_dataset(package.id, topic, profile)
| aptivate/ckanext-syndicate | ckanext/syndicate/plugin.py | Python | agpl-3.0 | 2,443 |
# -*- coding: utf-8 -*-
# © 2015-2017 Elico corp (https://www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import timesheet_reminder
| Elico-Corp/odoo-addons | timesheet_automatic_reminder/__init__.py | Python | agpl-3.0 | 182 |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.db import models
from application.fields import IntegerListField
from application.models import Applicant, Major
class ReportCategory(models.Model):
result_set_id = models.IntegerField(null=True)
name = models.CharField(max_length=5)
order = models.IntegerField()
_categories = None
@staticmethod
def is_cons(c):
u"""
>>> ReportCategory.is_cons(u'ก')
True
>>> ReportCategory.is_cons(u'ฮ')
True
>>> ReportCategory.is_cons(u'เ')
False
>>> ReportCategory.is_cons(u'แ')
False
"""
return (c >= u'ก') and (c <= u'ฮ')
@staticmethod
def get_category_name_from_first_name(first_name):
u"""
>>> ReportCategory.get_category_name_from_first_name('John')
'J'
>>> ReportCategory.get_category_name_from_first_name('john')
'J'
>>> print ReportCategory.get_category_name_from_first_name(u'สมชาย')
ส
>>> print ReportCategory.get_category_name_from_first_name(u'เกียรติ')
ก
>>> print ReportCategory.get_category_name_from_first_name(u'ใจดี')
จ
"""
if (((first_name[0] >= u'a') and (first_name[0] <= u'z')) or
((first_name[0] >= u'A') and (first_name[0] <= u'Z'))): # roman?
return first_name[0].upper()
for c in first_name:
if ReportCategory.is_cons(c):
return c
return ''
@staticmethod
def get_category_by_name(result_set_id, name):
if ReportCategory._categories==None:
cat = {}
for category in ReportCategory.objects.all():
cat[(category.result_set_id, category.name)] = category
ReportCategory._categories = cat
return ReportCategory._categories[(result_set_id, name)]
@staticmethod
def get_category_by_app_first_name(result_set_id, first_name):
return ReportCategory.get_category_by_name(
result_set_id,
ReportCategory.get_category_name_from_first_name(
first_name))
class Meta:
ordering = ['order']
class QualifiedApplicant(models.Model):
ticket_number = models.CharField(max_length=15)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=300)
order = models.IntegerField()
category = models.ForeignKey(ReportCategory)
applicant = models.ForeignKey(Applicant)
class Meta:
ordering = ['order']
def __unicode__(self):
return u'%s %s %s' % (
self.ticket_number,
self.first_name,
self.last_name)
class AdmissionRound(models.Model):
number = models.IntegerField(unique=True)
start_date = models.DateField()
last_date = models.DateField()
is_available = models.BooleanField(default=False)
_recent_round = None
_cache_timestamp = None
class Meta:
ordering = ['-number']
@staticmethod
def get_recent():
from datetime import datetime, timedelta
now = datetime.now()
if ((not AdmissionRound._cache_timestamp) or
(AdmissionRound._cache_timestamp + timedelta(minutes=1) < now)):
rounds = AdmissionRound.objects.filter(is_available=True)
if len(rounds)!=0:
AdmissionRound._recent_round = rounds[0]
else:
AdmissionRound._recent_round = None
AdmissionRound._cache_timestamp = now
return AdmissionRound._recent_round
@staticmethod
def time_to_recent_round_deadline(now=None):
adm_round = AdmissionRound.get_recent()
if adm_round:
if now==None:
now = datetime.now()
last = adm_round.last_date
deadline = datetime(last.year, last.month, last.day)
return deadline - now + timedelta(1)
else:
return timedelta.max
def __unicode__(self):
return "Round %d" % self.number
class AdmissionResult(models.Model):
applicant = models.ForeignKey(Applicant,
related_name='admission_results')
round_number = models.IntegerField(default=0)
is_admitted = models.BooleanField()
is_waitlist = models.BooleanField()
admitted_major = models.ForeignKey(Major, null=True)
additional_info = models.TextField(null=True)
class Meta:
ordering = ['round_number']
@staticmethod
def new_for_applicant(applicant):
res = AdmissionResult(applicant=applicant,
is_admitted=False,
is_waitlist=False)
return res
class ScoreStat:
SUPER_ZMAX = 13
def __init__(self, mean, sd, max_score):
self.mean = mean
self.sd = sd
self.max_score = max_score
def cal_score(self, x):
if x==-1:
return 0
z = (x - self.mean) / self.sd
return 0.5 + 0.5 * z / ScoreStat.SUPER_ZMAX
SCORE_STATS = [
#{ 'gat': ScoreStat(78.09, 44.32, 290),
# 'pat1': ScoreStat(88.33, 30.63, 300),
# 'pat3': ScoreStat(108.66, 26.17, 240) },
#{ 'gat': ScoreStat(93.10, 51.13, 287.5),
# 'pat1': ScoreStat(87.11, 31.14, 300),
# 'pat3': ScoreStat(97.86, 28.56, 260) },
#{ 'gat': ScoreStat(106.78, 55.59, 292.5),
# 'pat1': ScoreStat(63.56, 25.90, 270),
# 'pat3': ScoreStat(86.73, 24.64, 237) },
# mar53
{ 'gat': ScoreStat(130.78, 58.32, 295),
'pat1': ScoreStat(64.00, 30.88, 294),
'pat3': ScoreStat(103.20, 42.47, 276) },
# jul53
{ 'gat': ScoreStat(128.43, 61.32, 300),
'pat1': ScoreStat(56.26, 25.92, 300),
'pat3': ScoreStat(83.54, 35.78, 300) },
# oct53
{ 'gat': ScoreStat(139.38, 67.85, 300),
'pat1': ScoreStat(48.34, 23.45, 300),
'pat3': ScoreStat(121.25, 41.56, 300) },
# mar54
{ 'gat': ScoreStat(171.89, 48.04, 297.5),
'pat1': ScoreStat(64.22, 18.08, 274),
'pat3': ScoreStat(101.95, 40.68, 270) },
# dec54
{ 'gat': ScoreStat(130.59, 68.04, 300),
'pat1': ScoreStat(39.64, 20.07, 288),
'pat3': ScoreStat(83.45, 32.44, 267) },
]
EXAM_COUNT = len(SCORE_STATS)
class NIETSScores(models.Model):
applicant = models.OneToOneField(Applicant,
related_name='NIETS_scores')
is_request_successful = models.NullBooleanField()
requested_at = models.DateTimeField(null=True)
score_list = models.CharField(max_length=200)
@staticmethod
def extract_gatpat_scores(score_list):
scores = {'gat': [-1] * EXAM_COUNT,
'pat1': [-1] * EXAM_COUNT,
'pat3': [-1] * EXAM_COUNT}
i = 0
for e in range(EXAM_COUNT):
for exam in ['gat','pat1','pat3']:
scores[exam][e] = score_list[i]
i += 1
return scores
def as_list(self):
if self.score_list!='':
return [float(s) for s in self.score_list.split(',')]
else:
return None
def as_list_by_exam_round(self):
if self.score_list=='':
return None
else:
l = self.as_list()
out = []
while len(l)!=0:
out.append(l[:3])
l = l[3:]
return out
def as_calculated_list_by_exam_round(self):
all_scores = self.as_list_by_exam_round()
if not all_scores:
return []
exams = ['gat','pat1','pat3']
scores = []
best_scores = dict([(ex,(0,None)) for ex in exams])
for e in range(EXAM_COUNT):
rscores = {}
i = 0
for exam_name in exams:
x = all_scores[e][i]
n = SCORE_STATS[e][exam_name].cal_score(x) * 10000
if x==-1:
x = None
rscores[exam_name] = {
'raw': x,
'normalized': n,
'selected': False
}
if n > best_scores[exam_name][0]:
best_scores[exam_name] = (n, rscores[exam_name])
i+=1
scores.append(rscores)
for ex in exams:
if best_scores[ex][1]:
best_scores[ex][1]['selected'] = True
return scores
def get_best_normalized_score(self, test_name):
all_scores = self.as_list()
scores = NIETSScores.extract_gatpat_scores(all_scores)
best_score = 0
raw_score = 0
for i in range(EXAM_COUNT):
x = scores[test_name][i]
score = SCORE_STATS[i][test_name].cal_score(x)
if score > best_score:
best_score = score
raw_score = x
return best_score, raw_score
def get_score(self):
gat, gs = self.get_best_normalized_score('gat')
pat1, p1s = self.get_best_normalized_score('pat1')
pat3, p3s = self.get_best_normalized_score('pat3')
score = (gat * 0.25 +
pat1 * 0.25 +
pat3 * 0.5)
return 10000.0 * score
def get_best_test_scores(self):
gat, gs = self.get_best_normalized_score('gat')
pat1, p1s = self.get_best_normalized_score('pat1')
pat3, p3s = self.get_best_normalized_score('pat3')
return [gs, p1s, p3s]
class AdditionalResult(models.Model):
applicant = models.OneToOneField(Applicant,
related_name='additional_result')
name = models.CharField(max_length=200)
round_number = models.IntegerField()
is_waived = models.BooleanField(default=False)
waived_at = models.DateTimeField(blank=True,
null=True,
default=None)
class Meta:
ordering = ['round_number', 'name']
def __unicode__(self):
return self.name
class ClearingHouseResult(models.Model):
applicant = models.OneToOneField(Applicant,
related_name='clearing_house_result')
admitted_major = models.ForeignKey(Major, null=True)
is_additional_result = models.BooleanField(default=False)
password = models.CharField(max_length=20)
| jittat/adm2 | result/models.py | Python | agpl-3.0 | 10,463 |
#!/usr/bin/env python
# bpworker.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (bpworker.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
from __future__ import absolute_import
if __name__ == "__main__":
import os
import sys
sys.path.append(os.path.abspath(os.path.join('.', 'parallelp', 'pp')))
from parallelp.pp.ppworker import _WorkerProcess
wp = _WorkerProcess()
wp.run()
| vesellov/bitdust.devel | bpworker.py | Python | agpl-3.0 | 1,158 |
from nose import tools as nosetools
from nose import SkipTest
import ckan.plugins.toolkit as toolkit
import ckan.new_tests.factories as factories
import ckan.new_tests.helpers as helpers
class TestShowcaseShow(helpers.FunctionalTestBase):
def test_showcase_show_no_args(self):
'''
Calling showcase show with no args raises a ValidationError.
'''
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_show')
def test_showcase_show_with_id(self):
'''
Calling showcase show with id arg returns showcase dict.
'''
my_showcase = factories.Dataset(type='showcase', name='my-showcase')
showcase_shown = helpers.call_action('ckanext_showcase_show', id=my_showcase['id'])
nosetools.assert_equal(my_showcase['name'], showcase_shown['name'])
def test_showcase_show_with_name(self):
'''
Calling showcase show with name arg returns showcase dict.
'''
my_showcase = factories.Dataset(type='showcase', name='my-showcase')
showcase_shown = helpers.call_action('ckanext_showcase_show', id=my_showcase['name'])
nosetools.assert_equal(my_showcase['id'], showcase_shown['id'])
def test_showcase_show_with_nonexisting_name(self):
'''
Calling showcase show with bad name arg returns ObjectNotFound.
'''
factories.Dataset(type='showcase', name='my-showcase')
nosetools.assert_raises(toolkit.ObjectNotFound, helpers.call_action,
'ckanext_showcase_show', id='my-bad-name')
def test_showcase_show_num_datasets_added(self):
'''
num_datasets property returned with showcase dict.
'''
my_showcase = factories.Dataset(type='showcase', name='my-showcase')
showcase_shown = helpers.call_action('ckanext_showcase_show', id=my_showcase['name'])
nosetools.assert_true('num_datasets' in showcase_shown)
nosetools.assert_equal(showcase_shown['num_datasets'], 0)
def test_showcase_show_num_datasets_correct_value(self):
'''
num_datasets property has correct value.
'''
sysadmin = factories.User(sysadmin=True)
my_showcase = factories.Dataset(type='showcase', name='my-showcase')
package_one = factories.Dataset()
package_two = factories.Dataset()
context = {'user': sysadmin['name']}
# create an association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_one['id'],
showcase_id=my_showcase['id'])
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_two['id'],
showcase_id=my_showcase['id'])
showcase_shown = helpers.call_action('ckanext_showcase_show', id=my_showcase['name'])
nosetools.assert_equal(showcase_shown['num_datasets'], 2)
def test_showcase_show_num_datasets_correct_only_count_active_datasets(self):
'''
num_datasets property has correct value when some previously
associated datasets have been datasets.
'''
sysadmin = factories.User(sysadmin=True)
my_showcase = factories.Dataset(type='showcase', name='my-showcase')
package_one = factories.Dataset()
package_two = factories.Dataset()
package_three = factories.Dataset()
context = {'user': sysadmin['name']}
# create the associations
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_one['id'],
showcase_id=my_showcase['id'])
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_two['id'],
showcase_id=my_showcase['id'])
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_three['id'],
showcase_id=my_showcase['id'])
# delete the first package
helpers.call_action('package_delete', context=context, id=package_one['id'])
showcase_shown = helpers.call_action('ckanext_showcase_show', id=my_showcase['name'])
# the num_datasets should only include active datasets
nosetools.assert_equal(showcase_shown['num_datasets'], 2)
class TestShowcaseList(helpers.FunctionalTestBase):
def test_showcase_list(self):
'''Showcase list action returns names of showcases in site.'''
showcase_one = factories.Dataset(type='showcase')
showcase_two = factories.Dataset(type='showcase')
showcase_three = factories.Dataset(type='showcase')
showcase_list = helpers.call_action('ckanext_showcase_list')
showcase_list_name_id = [(sc['name'], sc['id']) for sc in showcase_list]
nosetools.assert_equal(len(showcase_list), 3)
nosetools.assert_true(sorted(showcase_list_name_id) ==
sorted([(showcase['name'], showcase['id'])
for showcase in [showcase_one,
showcase_two,
showcase_three]]))
def test_showcase_list_no_datasets(self):
'''
Showcase list action doesn't return normal datasets (of type
'dataset').
'''
showcase_one = factories.Dataset(type='showcase')
dataset_one = factories.Dataset()
dataset_two = factories.Dataset()
showcase_list = helpers.call_action('ckanext_showcase_list')
showcase_list_name_id = [(sc['name'], sc['id']) for sc in showcase_list]
nosetools.assert_equal(len(showcase_list), 1)
nosetools.assert_true((showcase_one['name'], showcase_one['id']) in showcase_list_name_id)
nosetools.assert_true((dataset_one['name'], dataset_one['id']) not in showcase_list_name_id)
nosetools.assert_true((dataset_two['name'], dataset_two['id']) not in showcase_list_name_id)
class TestShowcasePackageList(helpers.FunctionalTestBase):
'''Tests for ckanext_showcase_package_list'''
def test_showcase_package_list_no_packages(self):
'''
Calling ckanext_showcase_package_list with a showcase that has no
packages returns an empty list.
'''
showcase_id = factories.Dataset(type='showcase')['id']
pkg_list = helpers.call_action('ckanext_showcase_package_list',
showcase_id=showcase_id)
nosetools.assert_equal(pkg_list, [])
def test_showcase_package_list_works_with_name(self):
'''
Calling ckanext_showcase_package_list with a showcase name doesn't
raise a ValidationError.
'''
showcase_name = factories.Dataset(type='showcase')['name']
pkg_list = helpers.call_action('ckanext_showcase_package_list',
showcase_id=showcase_name)
nosetools.assert_equal(pkg_list, [])
def test_showcase_package_list_wrong_showcase_id(self):
'''
Calling ckanext_showcase_package_list with a bad showcase id raises a
ValidationError.
'''
factories.Dataset(type='showcase')['id']
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_package_list',
showcase_id='a-bad-id')
def test_showcase_package_list_showcase_has_package(self):
'''
Calling ckanext_showcase_package_list with a showcase that has a
package should return that package.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
# create an association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase_id)
pkg_list = helpers.call_action('ckanext_showcase_package_list',
showcase_id=showcase_id)
# We've got an item in the pkg_list
nosetools.assert_equal(len(pkg_list), 1)
# The list item should have the correct name property
nosetools.assert_equal(pkg_list[0]['name'], package['name'])
def test_showcase_package_list_showcase_has_two_packages(self):
'''
Calling ckanext_showcase_package_list with a showcase that has two
packages should return the packages.
'''
sysadmin = factories.User(sysadmin=True)
package_one = factories.Dataset()
package_two = factories.Dataset()
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
# create first association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_one['id'],
showcase_id=showcase_id)
# create second association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_two['id'],
showcase_id=showcase_id)
pkg_list = helpers.call_action('ckanext_showcase_package_list',
showcase_id=showcase_id)
# We've got two items in the pkg_list
nosetools.assert_equal(len(pkg_list), 2)
def test_showcase_package_list_showcase_only_contains_active_datasets(self):
'''
Calling ckanext_showcase_package_list will only return active datasets
(not deleted ones).
'''
sysadmin = factories.User(sysadmin=True)
package_one = factories.Dataset()
package_two = factories.Dataset()
package_three = factories.Dataset()
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
# create first association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_one['id'],
showcase_id=showcase_id)
# create second association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_two['id'],
showcase_id=showcase_id)
# create third association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_three['id'],
showcase_id=showcase_id)
# delete the first package
helpers.call_action('package_delete', context=context, id=package_one['id'])
pkg_list = helpers.call_action('ckanext_showcase_package_list',
showcase_id=showcase_id)
# We've got two items in the pkg_list
nosetools.assert_equal(len(pkg_list), 2)
pkg_list_ids = [pkg['id'] for pkg in pkg_list]
nosetools.assert_true(package_two['id'] in pkg_list_ids)
nosetools.assert_true(package_three['id'] in pkg_list_ids)
nosetools.assert_false(package_one['id'] in pkg_list_ids)
def test_showcase_package_list_package_isnot_a_showcase(self):
'''
Calling ckanext_showcase_package_list with a package id should raise a
ValidationError.
Since Showcases are Packages under the hood, make sure we treat them
differently.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
# create an association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase_id)
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_package_list',
showcase_id=package['id'])
class TestPackageShowcaseList(helpers.FunctionalTestBase):
'''Tests for ckanext_package_showcase_list'''
def test_package_showcase_list_no_showcases(self):
'''
Calling ckanext_package_showcase_list with a package that has no
showcases returns an empty list.
'''
package_id = factories.Dataset()['id']
showcase_list = helpers.call_action('ckanext_package_showcase_list',
package_id=package_id)
nosetools.assert_equal(showcase_list, [])
def test_package_showcase_list_works_with_name(self):
'''
Calling ckanext_package_showcase_list with a package name doesn't
raise a ValidationError.
'''
package_name = factories.Dataset()['name']
showcase_list = helpers.call_action('ckanext_package_showcase_list',
package_id=package_name)
nosetools.assert_equal(showcase_list, [])
def test_package_showcase_list_wrong_showcase_id(self):
'''
Calling ckanext_package_showcase_list with a bad package id raises a
ValidationError.
'''
factories.Dataset()['id']
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_package_showcase_list',
showcase_id='a-bad-id')
def test_package_showcase_list_showcase_has_package(self):
'''
Calling ckanext_package_showcase_list with a package that has a
showcase should return that showcase.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
showcase = factories.Dataset(type='showcase')
context = {'user': sysadmin['name']}
# create an association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase['id'])
showcase_list = helpers.call_action('ckanext_package_showcase_list',
package_id=package['id'])
# We've got an item in the showcase_list
nosetools.assert_equal(len(showcase_list), 1)
# The list item should have the correct name property
nosetools.assert_equal(showcase_list[0]['name'], showcase['name'])
def test_package_showcase_list_showcase_has_two_packages(self):
'''
Calling ckanext_package_showcase_list with a package that has two
showcases should return the showcases.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
showcase_one = factories.Dataset(type='showcase')
showcase_two = factories.Dataset(type='showcase')
context = {'user': sysadmin['name']}
# create first association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase_one['id'])
# create second association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase_two['id'])
showcase_list = helpers.call_action('ckanext_package_showcase_list',
package_id=package['id'])
# We've got two items in the showcase_list
nosetools.assert_equal(len(showcase_list), 2)
def test_package_showcase_list_package_isnot_a_showcase(self):
'''
Calling ckanext_package_showcase_list with a showcase id should raise a
ValidationError.
Since Showcases are Packages under the hood, make sure we treat them
differently.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
showcase = factories.Dataset(type='showcase')
context = {'user': sysadmin['name']}
# create an association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package['id'],
showcase_id=showcase['id'])
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_package_showcase_list',
package_id=showcase['id'])
class TestShowcaseAdminList(helpers.FunctionalTestBase):
'''Tests for ckanext_showcase_admin_list'''
def test_showcase_admin_list_no_showcase_admins(self):
'''
Calling ckanext_showcase_admin_list on a site that has no showcases
admins returns an empty list.
'''
showcase_admin_list = helpers.call_action('ckanext_showcase_admin_list')
nosetools.assert_equal(showcase_admin_list, [])
def test_showcase_admin_list_users(self):
'''
Calling ckanext_showcase_admin_list will return users who are showcase
admins.
'''
user_one = factories.User()
user_two = factories.User()
user_three = factories.User()
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_one['name'])
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_two['name'])
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_three['name'])
showcase_admin_list = helpers.call_action('ckanext_showcase_admin_list', context={})
nosetools.assert_equal(len(showcase_admin_list), 3)
for user in [user_one, user_two, user_three]:
nosetools.assert_true({'name': user['name'], 'id': user['id']} in showcase_admin_list)
def test_showcase_admin_only_lists_admin_users(self):
'''
Calling ckanext_showcase_admin_list will only return users who are
showcase admins.
'''
user_one = factories.User()
user_two = factories.User()
user_three = factories.User()
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_one['name'])
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_two['name'])
showcase_admin_list = helpers.call_action('ckanext_showcase_admin_list', context={})
nosetools.assert_equal(len(showcase_admin_list), 2)
# user three isn't in list
nosetools.assert_true({'name': user_three['name'], 'id': user_three['id']} not in showcase_admin_list)
class TestPackageSearchBeforeSearch(helpers.FunctionalTestBase):
'''
Extension uses the `before_search` method to alter search parameters.
'''
def test_package_search_no_additional_filters(self):
'''
Perform package_search with no additional filters should not include
showcases.
'''
factories.Dataset()
factories.Dataset()
factories.Dataset(type='showcase')
factories.Dataset(type='custom')
search_results = helpers.call_action('package_search', context={})['results']
types = [result['type'] for result in search_results]
nosetools.assert_equal(len(search_results), 3)
nosetools.assert_true('showcase' not in types)
nosetools.assert_true('custom' in types)
def test_package_search_filter_include_showcase(self):
'''
package_search filtered to include datasets of type showcase should
only include showcases.
'''
factories.Dataset()
factories.Dataset()
factories.Dataset(type='showcase')
factories.Dataset(type='custom')
search_results = helpers.call_action('package_search', context={},
fq='dataset_type:showcase')['results']
types = [result['type'] for result in search_results]
nosetools.assert_equal(len(search_results), 1)
nosetools.assert_true('showcase' in types)
nosetools.assert_true('custom' not in types)
nosetools.assert_true('dataset' not in types)
class TestUserShowBeforeSearch(helpers.FunctionalTestBase):
'''
Extension uses the `before_search` method to alter results of user_show
(via package_search).
'''
def test_user_show_no_additional_filters(self):
'''
Perform package_search with no additional filters should not include
showcases.
'''
if not toolkit.check_ckan_version(min_version='2.4'):
raise SkipTest('Filtering out showcases requires CKAN 2.4+ (ckan/ckan/issues/2380)')
user = factories.User()
factories.Dataset(user=user)
factories.Dataset(user=user)
factories.Dataset(user=user, type='showcase')
factories.Dataset(user=user, type='custom')
search_results = helpers.call_action('user_show', context={},
include_datasets=True,
id=user['name'])['datasets']
types = [result['type'] for result in search_results]
nosetools.assert_equal(len(search_results), 3)
nosetools.assert_true('showcase' not in types)
nosetools.assert_true('custom' in types)
| deniszgonjanin/ckanext-showcase | ckanext/showcase/tests/action/test_get.py | Python | agpl-3.0 | 22,173 |
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
class SyncingError(StandardError):
def __init__(self, msg, *args):
StandardError.__init__(self)
self.msg = msg % args
def __str__(self):
return "SyncingError: %s" % (self.msg,)
| ujdhesa/unisubs | apps/externalsites/exceptions.py | Python | agpl-3.0 | 970 |
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Third Party
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin
)
from django.core.cache import cache
from django.core.urlresolvers import (
reverse,
reverse_lazy
)
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect
)
from django.shortcuts import (
get_object_or_404,
render
)
from django.utils.translation import (
ugettext as _,
ugettext_lazy
)
from django.views.generic import (
CreateView,
DeleteView,
ListView,
UpdateView
)
# wger
from wger.nutrition.forms import UnitChooserForm
from wger.nutrition.models import Ingredient
from wger.utils.cache import cache_mapper
from wger.utils.constants import PAGINATION_OBJECTS_PER_PAGE
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin
)
from wger.utils.language import (
load_ingredient_languages,
load_language
)
logger = logging.getLogger(__name__)
# ************************
# Ingredient functions
# ************************
class IngredientListView(ListView):
'''
Show an overview of all ingredients
'''
model = Ingredient
template_name = 'ingredient/overview.html'
context_object_name = 'ingredients_list'
paginate_by = PAGINATION_OBJECTS_PER_PAGE
def get_queryset(self):
'''
Filter the ingredients the user will see by its language
(the user can also want to see ingredients in English, in addition to his
native language, see load_ingredient_languages)
'''
languages = load_ingredient_languages(self.request)
return (Ingredient.objects.filter(language__in=languages)
.filter(status=Ingredient.STATUS_ACCEPTED)
.only('id', 'name'))
def get_context_data(self, **kwargs):
'''
Pass additional data to the template
'''
context = super(IngredientListView, self).get_context_data(**kwargs)
context['show_shariff'] = True
return context
def view(request, id, slug=None):
template_data = {}
ingredient = cache.get(cache_mapper.get_ingredient_key(int(id)))
if not ingredient:
ingredient = get_object_or_404(Ingredient, pk=id)
cache.set(cache_mapper.get_ingredient_key(ingredient), ingredient)
template_data['ingredient'] = ingredient
template_data['form'] = UnitChooserForm(data={'ingredient_id': ingredient.id,
'amount': 100,
'unit': None})
template_data['show_shariff'] = True
return render(request, 'ingredient/view.html', template_data)
class IngredientDeleteView(WgerDeleteMixin,
LoginRequiredMixin,
PermissionRequiredMixin,
DeleteView):
'''
Generic view to delete an existing ingredient
'''
model = Ingredient
fields = ('name',
'energy',
'protein',
'carbohydrates',
'carbohydrates_sugar',
'fat',
'fat_saturated',
'fibres',
'sodium')
template_name = 'delete.html'
success_url = reverse_lazy('nutrition:ingredient:list')
messages = ugettext_lazy('Successfully deleted')
permission_required = 'nutrition.delete_ingredient'
# Send some additional data to the template
def get_context_data(self, **kwargs):
context = super(IngredientDeleteView, self).get_context_data(**kwargs)
context['title'] = _(u'Delete {0}?').format(self.object)
context['form_action'] = reverse('nutrition:ingredient:delete',
kwargs={'pk': self.object.id})
return context
class IngredientMixin(WgerFormMixin):
'''
Manually set the order of the fields
'''
fields = ['name',
'energy',
'protein',
'carbohydrates',
'carbohydrates_sugar',
'fat',
'fat_saturated',
'fibres',
'sodium',
'license',
'license_author']
class IngredientEditView(IngredientMixin, LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
'''
Generic view to update an existing ingredient
'''
model = Ingredient
form_action_urlname = 'nutrition:ingredient:edit'
permission_required = 'nutrition.change_ingredient'
def get_context_data(self, **kwargs):
'''
Send some additional data to the template
'''
context = super(IngredientEditView, self).get_context_data(**kwargs)
context['title'] = _(u'Edit {0}').format(self.object)
return context
class IngredientCreateView(IngredientMixin, CreateView):
'''
Generic view to add a new ingredient
'''
model = Ingredient
title = ugettext_lazy('Add a new ingredient')
form_action = reverse_lazy('nutrition:ingredient:add')
sidebar = 'ingredient/form.html'
def form_valid(self, form):
form.instance.language = load_language()
form.instance.set_author(self.request)
return super(IngredientCreateView, self).form_valid(form)
def dispatch(self, request, *args, **kwargs):
'''
Demo users can't submit ingredients
'''
if request.user.userprofile.is_temporary:
return HttpResponseForbidden()
return super(IngredientCreateView, self).dispatch(request, *args, **kwargs)
class PendingIngredientListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
'''
List all ingredients pending review
'''
model = Ingredient
template_name = 'ingredient/pending.html'
context_object_name = 'ingredient_list'
permission_required = 'nutrition.change_ingredient'
def get_queryset(self):
'''
Only show ingredients pending review
'''
return Ingredient.objects.filter(status=Ingredient.STATUS_PENDING) \
.order_by('-creation_date')
@permission_required('nutrition.add_ingredient')
def accept(request, pk):
'''
Accepts a pending user submitted ingredient
'''
ingredient = get_object_or_404(Ingredient, pk=pk)
ingredient.status = Ingredient.STATUS_ACCEPTED
ingredient.save()
ingredient.send_email(request)
messages.success(request, _('Ingredient was successfully added to the general database'))
return HttpResponseRedirect(ingredient.get_absolute_url())
@permission_required('nutrition.add_ingredient')
def decline(request, pk):
'''
Declines and deletes a pending user submitted ingredient
'''
ingredient = get_object_or_404(Ingredient, pk=pk)
ingredient.status = Ingredient.STATUS_DECLINED
ingredient.save()
messages.success(request, _('Ingredient was successfully marked as rejected'))
return HttpResponseRedirect(ingredient.get_absolute_url())
| petervanderdoes/wger | wger/nutrition/views/ingredient.py | Python | agpl-3.0 | 7,787 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import copy
import json
import string
from collections import namedtuple
from mock import patch, Mock
from nose.tools import assert_raises
from pybossa.importers import (_BulkTaskDropboxImport, _BulkTaskFlickrImport,
_BulkTaskCSVImport, _BulkTaskGDImport, _BulkTaskEpiCollectPlusImport,
BulkImportException, Importer)
from default import Test, FakeResponse
from factories import AppFactory, TaskFactory
from pybossa.repositories import TaskRepository
from pybossa.core import db
task_repo = TaskRepository(db)
@patch.object(Importer, '_create_importer_for')
class TestImporterPublicMethods(Test):
importer = Importer()
def test_create_tasks_creates_them_correctly(self, importer_factory):
mock_importer = Mock()
mock_importer.tasks.return_value = [{'info': {'question': 'question',
'url': 'url'},
'n_answers': 20}]
importer_factory.return_value = mock_importer
app = AppFactory.create()
form_data = dict(type='csv', csv_url='http://fakecsv.com')
self.importer.create_tasks(task_repo, app.id, **form_data)
task = task_repo.get_task(1)
assert task is not None
assert task.app_id == app.id, task.app_id
assert task.n_answers == 20, task.n_answers
assert task.info == {'question': 'question', 'url': 'url'}, task.info
importer_factory.assert_called_with('csv')
mock_importer.tasks.assert_called_with(**form_data)
def test_create_tasks_creates_many_tasks(self, importer_factory):
mock_importer = Mock()
mock_importer.tasks.return_value = [{'info': {'question': 'question1'}},
{'info': {'question': 'question2'}}]
importer_factory.return_value = mock_importer
app = AppFactory.create()
form_data = dict(type='gdocs', googledocs_url='http://ggl.com')
result = self.importer.create_tasks(task_repo, app.id, **form_data)
tasks = task_repo.filter_tasks_by(app_id=app.id)
assert len(tasks) == 2, len(tasks)
assert result == '2 new tasks were imported successfully', result
importer_factory.assert_called_with('gdocs')
def test_create_tasks_not_creates_duplicated_tasks(self, importer_factory):
mock_importer = Mock()
mock_importer.tasks.return_value = [{'info': {'question': 'question'}}]
importer_factory.return_value = mock_importer
app = AppFactory.create()
TaskFactory.create(app=app, info={'question': 'question'})
form_data = dict(type='flickr', album_id='1234')
result = self.importer.create_tasks(task_repo, app.id, **form_data)
tasks = task_repo.filter_tasks_by(app_id=app.id)
assert len(tasks) == 1, len(tasks)
assert result == 'It looks like there were no new records to import', result
importer_factory.assert_called_with('flickr')
def test_count_tasks_to_import_returns_what_expected(self, importer_factory):
mock_importer = Mock()
mock_importer.count_tasks.return_value = 2
importer_factory.return_value = mock_importer
form_data = dict(type='epicollect', epicollect_project='project',
epicollect_form='form')
number_of_tasks = self.importer.count_tasks_to_import(**form_data)
assert number_of_tasks == 2, number_of_tasks
importer_factory.assert_called_with('epicollect')
def test_get_all_importer_names_returns_default_importer_names(self, create):
importers = self.importer.get_all_importer_names()
expected_importers = ['csv', 'gdocs', 'epicollect']
assert set(importers) == set(expected_importers)
def test_get_all_importers_returns_configured_importers(self, create):
importer_params = {'api_key': self.flask_app.config['FLICKR_API_KEY']}
importer = Importer()
importer.register_flickr_importer(importer_params)
importer.register_dropbox_importer()
assert 'flickr' in importer.get_all_importer_names()
assert 'dropbox' in importer.get_all_importer_names()
def test_get_autoimporter_names_returns_default_autoimporter_names(self, create):
importers = self.importer.get_autoimporter_names()
expected_importers = ['csv', 'gdocs', 'epicollect']
assert set(importers) == set(expected_importers)
def test_get_autoimporter_names_returns_configured_autoimporters(self, create):
importer_params = {'api_key': self.flask_app.config['FLICKR_API_KEY']}
importer = Importer()
importer.register_flickr_importer(importer_params)
assert 'flickr' in importer.get_autoimporter_names()
class Test_BulkTaskDropboxImport(object):
dropbox_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.txt?dl=0",'
u'"name":"test.txt",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
importer = _BulkTaskDropboxImport()
def test_count_tasks_returns_0_if_no_files_to_import(self):
form_data = {'files': [], 'type': 'dropbox'}
number_of_tasks = self.importer.count_tasks(**form_data)
assert number_of_tasks == 0, number_of_tasks
def test_count_tasks_returns_1_if_1_file_to_import(self):
form_data = {'files': [self.dropbox_file_data],
'type': 'dropbox'}
number_of_tasks = self.importer.count_tasks(**form_data)
assert number_of_tasks == 1, number_of_tasks
def test_tasks_return_emtpy_list_if_no_files_to_import(self):
form_data = {'files': [], 'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks == [], tasks
def test_tasks_returns_list_with_1_file_data_if_1_file_to_import(self):
form_data = {'files': [self.dropbox_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert len(tasks) == 1, tasks
def test_tasks_returns_tasks_with_fields_for_generic_files(self):
#For generic file extensions: link, filename, link_raw
form_data = {'files': [self.dropbox_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.txt"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.txt?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.txt?raw=1"
def test_tasks_attributes_for_png_image_files(self):
#For image file extensions: link, filename, link_raw, url_m, url_b, title
png_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.png?dl=0",'
u'"name":"test.png",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
form_data = {'files': [png_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.png"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.png?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.png?raw=1"
assert tasks[0]['info']['url_m'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.png?raw=1"
assert tasks[0]['info']['url_b'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.png?raw=1"
assert tasks[0]['info']['title'] == "test.png"
def test_tasks_attributes_for_jpg_image_files(self):
#For image file extensions: link, filename, link_raw, url_m, url_b, title
jpg_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpg?dl=0",'
u'"name":"test.jpg",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
form_data = {'files': [jpg_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.jpg"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpg?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpg?raw=1"
assert tasks[0]['info']['url_m'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpg?raw=1"
assert tasks[0]['info']['url_b'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpg?raw=1"
assert tasks[0]['info']['title'] == "test.jpg"
def test_tasks_attributes_for_jpeg_image_files(self):
#For image file extensions: link, filename, link_raw, url_m, url_b, title
jpeg_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpeg?dl=0",'
u'"name":"test.jpeg",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
form_data = {'files': [jpeg_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.jpeg"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpeg?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpeg?raw=1"
assert tasks[0]['info']['url_m'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpeg?raw=1"
assert tasks[0]['info']['url_b'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.jpeg?raw=1"
assert tasks[0]['info']['title'] == "test.jpeg"
def test_tasks_attributes_for_gif_image_files(self):
#For image file extensions: link, filename, link_raw, url_m, url_b, title
gif_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.gif?dl=0",'
u'"name":"test.gif",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
form_data = {'files': [gif_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.gif"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.gif?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.gif?raw=1"
assert tasks[0]['info']['url_m'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.gif?raw=1"
assert tasks[0]['info']['url_b'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.gif?raw=1"
assert tasks[0]['info']['title'] == "test.gif"
def test_tasks_attributes_for_pdf_files(self):
#For pdf file extension: link, filename, link_raw, pdf_url
pdf_file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.pdf?dl=0",'
u'"name":"test.pdf",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
form_data = {'files': [pdf_file_data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.pdf"
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.pdf?dl=0"
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.pdf?raw=1"
assert tasks[0]['info']['pdf_url'] == "https://dl.dropboxusercontent.com/s/l2b77qvlrequ6gl/test.pdf"
def test_tasks_attributes_for_video_files(self):
#For video file extension: link, filename, link_raw, video_url
video_ext = ['mp4', 'm4v', 'ogg', 'ogv', 'webm', 'avi']
file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.extension?dl=0",'
u'"name":"test.extension",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
for ext in video_ext:
data = string.replace(file_data,'extension', ext)
form_data = {'files': [data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.%s" % ext
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.%s?dl=0" % ext
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.%s?raw=1" % ext
assert tasks[0]['info']['video_url'] == "https://dl.dropboxusercontent.com/s/l2b77qvlrequ6gl/test.%s" % ext
def test_tasks_attributes_for_audio_files(self):
#For audio file extension: link, filename, link_raw, audio_url
audio_ext = ['mp4', 'm4a', 'mp3', 'ogg', 'oga', 'webm', 'wav']
file_data = (u'{"bytes":286,'
u'"link":"https://www.dropbox.com/s/l2b77qvlrequ6gl/test.extension?dl=0",'
u'"name":"test.extension",'
u'"icon":"https://www.dropbox.com/static/images/icons64/page_white_text.png"}')
for ext in audio_ext:
data = string.replace(file_data,'extension', ext)
form_data = {'files': [data],
'type': 'dropbox'}
tasks = self.importer.tasks(**form_data)
assert tasks[0]['info']['filename'] == "test.%s" % ext
assert tasks[0]['info']['link'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.%s?dl=0" % ext
assert tasks[0]['info']['link_raw'] == "https://www.dropbox.com/s/l2b77qvlrequ6gl/test.%s?raw=1" % ext
assert tasks[0]['info']['audio_url'] == "https://dl.dropboxusercontent.com/s/l2b77qvlrequ6gl/test.%s" % ext
@patch('pybossa.importers.requests')
class Test_BulkTaskFlickrImport(object):
invalid_response = {u'stat': u'fail',
u'code': 1, u'message': u'Photoset not found'}
response = {
u'stat': u'ok',
u'photoset': {
u'perpage': 500,
u'title': u'Science Hack Day Balloon Mapping Workshop',
u'photo': [
{u'isfamily': 0, u'title': u'Inflating the balloon', u'farm': 6,
u'ispublic': 1, u'server': u'5441', u'isfriend': 0,
u'secret': u'00e2301a0d', u'isprimary': u'0', u'id': u'8947115130'},
{u'isfamily': 0, u'title': u'Inflating the balloon', u'farm': 4,
u'ispublic': 1, u'server': u'3763', u'isfriend': 0,
u'secret': u'70d482fc68', u'isprimary': u'0', u'id': u'8946490553'},
{u'isfamily': 0, u'title': u'Inflating the balloon', u'farm': 3,
u'ispublic': 1, u'server': u'2810', u'isfriend': 0,
u'secret': u'99cae13d87', u'isprimary': u'0', u'id': u'8947113960'}],
u'pages': 1,
u'primary': u'8947113500',
u'id': u'72157633923521788',
u'ownername': u'Teleyinex',
u'owner': u'32985084@N00',
u'per_page': 500,
u'total': u'3',
u'page': 1}}
photo = {u'isfamily': 0, u'title': u'Inflating the balloon', u'farm': 6,
u'ispublic': 1, u'server': u'5441', u'isfriend': 0,
u'secret': u'00e2301a0d', u'isprimary': u'0', u'id': u'8947115130'}
importer = _BulkTaskFlickrImport(api_key='fake-key')
def make_response(self, text, status_code=200):
fake_response = Mock()
fake_response.text = text
fake_response.status_code = status_code
return fake_response
def test_call_to_flickr_api_endpoint(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.response))
self.importer._get_album_info('72157633923521788')
url = 'https://api.flickr.com/services/rest/'
payload = {'method': 'flickr.photosets.getPhotos',
'api_key': 'fake-key',
'photoset_id': '72157633923521788',
'format': 'json',
'nojsoncallback': '1'}
requests.get.assert_called_with(url, params=payload)
def test_call_to_flickr_api_uses_no_credentials(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.response))
self.importer._get_album_info('72157633923521788')
# The request MUST NOT include user credentials, to avoid private photos
url_call_params = requests.get.call_args_list[0][1]['params'].keys()
assert 'auth_token' not in url_call_params
def test_count_tasks_returns_number_of_photos_in_album(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.response))
number_of_tasks = self.importer.count_tasks(album_id='72157633923521788')
assert number_of_tasks is 3, number_of_tasks
def test_count_tasks_raises_exception_if_invalid_album(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.invalid_response))
assert_raises(BulkImportException, self.importer.count_tasks, album_id='bad')
def test_count_tasks_raises_exception_on_non_200_flickr_response(self, requests):
requests.get.return_value = self.make_response('Not Found', 404)
assert_raises(BulkImportException, self.importer.count_tasks,
album_id='72157633923521788')
def test_tasks_returns_list_of_all_photos(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.response))
photos = self.importer.tasks(album_id='72157633923521788')
assert len(photos) == 3, len(photos)
def test_tasks_returns_tasks_with_title_and_url_info_fields(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.response))
url = 'https://farm6.staticflickr.com/5441/8947115130_00e2301a0d.jpg'
url_m = 'https://farm6.staticflickr.com/5441/8947115130_00e2301a0d_m.jpg'
url_b = 'https://farm6.staticflickr.com/5441/8947115130_00e2301a0d_b.jpg'
link = 'https://www.flickr.com/photos/32985084@N00/8947115130'
title = self.response['photoset']['photo'][0]['title']
photo = self.importer.tasks(album_id='72157633923521788')[0]
assert photo['info'].get('title') == title
assert photo['info'].get('url') == url, photo['info'].get('url')
assert photo['info'].get('url_m') == url_m, photo['info'].get('url_m')
assert photo['info'].get('url_b') == url_b, photo['info'].get('url_b')
assert photo['info'].get('link') == link, photo['info'].get('link')
def test_tasks_raises_exception_if_invalid_album(self, requests):
requests.get.return_value = self.make_response(json.dumps(self.invalid_response))
assert_raises(BulkImportException, self.importer.tasks, album_id='bad')
def test_tasks_raises_exception_on_non_200_flickr_response(self, requests):
requests.get.return_value = self.make_response('Not Found', 404)
assert_raises(BulkImportException, self.importer.tasks,
album_id='72157633923521788')
def test_tasks_returns_all_for_sets_with_more_than_500_photos(self, requests):
# Deep-copy the object, as we will be modifying it and we don't want
# these modifications to affect other tests
first_response = copy.deepcopy(self.response)
first_response['photoset']['pages'] = 2
first_response['photoset']['total'] = u'600'
first_response['photoset']['page'] = 1
first_response['photoset']['photo'] = [self.photo for i in range(500)]
second_response = copy.deepcopy(self.response)
second_response['photoset']['pages'] = 2
second_response['photoset']['total'] = u'600'
second_response['photoset']['page'] = 2
second_response['photoset']['photo'] = [self.photo for i in range(100)]
fake_first_response = self.make_response(json.dumps(first_response))
fake_second_response = self.make_response(json.dumps(second_response))
responses = [fake_first_response, fake_second_response]
requests.get.side_effect = lambda *args, **kwargs: responses.pop(0)
photos = self.importer.tasks(album_id='72157633923521788')
assert len(photos) == 600, len(photos)
def test_tasks_returns_all_for_sets_with_more_than_1000_photos(self, requests):
# Deep-copy the object, as we will be modifying it and we don't want
# these modifications to affect other tests
first_response = copy.deepcopy(self.response)
first_response['photoset']['pages'] = 3
first_response['photoset']['total'] = u'1100'
first_response['photoset']['page'] = 1
first_response['photoset']['photo'] = [self.photo for i in range(500)]
second_response = copy.deepcopy(self.response)
second_response['photoset']['pages'] = 3
second_response['photoset']['total'] = u'1100'
second_response['photoset']['page'] = 2
second_response['photoset']['photo'] = [self.photo for i in range(500)]
third_response = copy.deepcopy(self.response)
third_response['photoset']['pages'] = 3
third_response['photoset']['total'] = u'1100'
third_response['photoset']['page'] = 3
third_response['photoset']['photo'] = [self.photo for i in range(100)]
fake_first_response = self.make_response(json.dumps(first_response))
fake_second_response = self.make_response(json.dumps(second_response))
fake_third_response = self.make_response(json.dumps(third_response))
responses = [fake_first_response, fake_second_response, fake_third_response]
requests.get.side_effect = lambda *args, **kwargs: responses.pop(0)
photos = self.importer.tasks(album_id='72157633923521788')
assert len(photos) == 1100, len(photos)
@patch('pybossa.importers.requests.get')
class Test_BulkTaskCSVImport(object):
url = 'http://myfakecsvurl.com'
importer = _BulkTaskCSVImport()
def test_count_tasks_returns_0_if_no_rows_other_than_header(self, request):
empty_file = FakeResponse(text='CSV,with,no,content\n', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
number_of_tasks = self.importer.count_tasks(csv_url=self.url)
assert number_of_tasks is 0, number_of_tasks
def test_count_tasks_returns_1_for_CSV_with_one_valid_row(self, request):
empty_file = FakeResponse(text='Foo,Bar,Baz\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
number_of_tasks = self.importer.count_tasks(csv_url=self.url)
assert number_of_tasks is 1, number_of_tasks
def test_count_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/csv'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access that file"
assert_raises(BulkImportException, self.importer.count_tasks, csv_url=self.url)
try:
self.importer.count_tasks(csv_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_raises_exception_if_not_CSV_file(self, request):
html_request = FakeResponse(text='Not a CSV', status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That file doesn't look like the right file."
assert_raises(BulkImportException, self.importer.count_tasks, csv_url=self.url)
try:
self.importer.count_tasks(csv_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_raises_exception_if_dup_header(self, request):
empty_file = FakeResponse(text='Foo,Bar,Foo\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
msg = "The file you uploaded has two headers with the same name."
assert_raises(BulkImportException, self.importer.count_tasks, csv_url=self.url)
try:
self.importer.count_tasks(csv_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/csv'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access that file"
assert_raises(BulkImportException, self.importer.tasks, csv_url=self.url)
try:
self.importer.tasks(csv_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_not_CSV_file(self, request):
html_request = FakeResponse(text='Not a CSV', status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That file doesn't look like the right file."
assert_raises(BulkImportException, self.importer.tasks, csv_url=self.url)
try:
self.importer.tasks(csv_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_dup_header(self, request):
empty_file = FakeResponse(text='Foo,Bar,Foo\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
msg = "The file you uploaded has two headers with the same name."
raised = False
try:
self.importer.tasks(csv_url=self.url).next()
except BulkImportException as e:
assert e[0] == msg, e
raised = True
finally:
assert raised, "Exception not raised"
def test_tasks_return_tasks_with_only_info_fields(self, request):
empty_file = FakeResponse(text='Foo,Bar,Baz\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
tasks = self.importer.tasks(csv_url=self.url)
task = tasks.next()
assert task == {"info": {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'}}, task
def test_tasks_return_tasks_with_non_info_fields_too(self, request):
empty_file = FakeResponse(text='Foo,Bar,priority_0\n1,2,3',
status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
tasks = self.importer.tasks(csv_url=self.url)
task = tasks.next()
assert task == {'info': {u'Foo': u'1', u'Bar': u'2'},
u'priority_0': u'3'}, task
def test_tasks_works_with_encodings_other_than_utf8(self, request):
empty_file = FakeResponse(text=u'Foo\nM\xc3\xbcnchen', status_code=200,
headers={'content-type': 'text/plain'},
encoding='ISO-8859-1')
request.return_value = empty_file
tasks = self.importer.tasks(csv_url=self.url)
task = tasks.next()
assert empty_file.encoding == 'utf-8'
@patch('pybossa.importers.requests.get')
class Test_BulkTaskGDImport(object):
url = 'http://drive.google.com'
importer = _BulkTaskGDImport()
def test_count_tasks_returns_0_if_no_rows_other_than_header(self, request):
empty_file = FakeResponse(text='CSV,with,no,content\n', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
number_of_tasks = self.importer.count_tasks(googledocs_url=self.url)
assert number_of_tasks is 0, number_of_tasks
def test_count_tasks_returns_1_for_CSV_with_one_valid_row(self, request):
valid_file = FakeResponse(text='Foo,Bar,Baz\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = valid_file
number_of_tasks = self.importer.count_tasks(googledocs_url=self.url)
assert number_of_tasks is 1, number_of_tasks
def test_count_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access that file"
assert_raises(BulkImportException, self.importer.count_tasks, googledocs_url=self.url)
try:
self.importer.count_tasks(googledocs_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_raises_exception_if_not_CSV_file(self, request):
html_request = FakeResponse(text='Not a CSV', status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That file doesn't look like the right file."
assert_raises(BulkImportException, self.importer.count_tasks, googledocs_url=self.url)
try:
self.importer.count_tasks(googledocs_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_raises_exception_if_dup_header(self, request):
empty_file = FakeResponse(text='Foo,Bar,Foo\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
msg = "The file you uploaded has two headers with the same name."
assert_raises(BulkImportException, self.importer.count_tasks, googledocs_url=self.url)
try:
self.importer.count_tasks(googledocs_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access that file"
assert_raises(BulkImportException, self.importer.tasks, googledocs_url=self.url)
try:
self.importer.tasks(googledocs_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_not_CSV_file(self, request):
html_request = FakeResponse(text='Not a CSV', status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That file doesn't look like the right file."
assert_raises(BulkImportException, self.importer.tasks, googledocs_url=self.url)
try:
self.importer.tasks(googledocs_url=self.url)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_dup_header(self, request):
empty_file = FakeResponse(text='Foo,Bar,Foo\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
msg = "The file you uploaded has two headers with the same name."
raised = False
try:
self.importer.tasks(googledocs_url=self.url).next()
except BulkImportException as e:
assert e[0] == msg, e
raised = True
finally:
assert raised, "Exception not raised"
def test_tasks_return_tasks_with_only_info_fields(self, request):
empty_file = FakeResponse(text='Foo,Bar,Baz\n1,2,3', status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
tasks = self.importer.tasks(googledocs_url=self.url)
task = tasks.next()
assert task == {"info": {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'}}, task
def test_tasks_return_tasks_with_non_info_fields_too(self, request):
empty_file = FakeResponse(text='Foo,Bar,priority_0\n1,2,3',
status_code=200,
headers={'content-type': 'text/plain'},
encoding='utf-8')
request.return_value = empty_file
tasks = self.importer.tasks(googledocs_url=self.url)
task = tasks.next()
assert task == {'info': {u'Foo': u'1', u'Bar': u'2'},
u'priority_0': u'3'}, task
def test_tasks_works_with_encodings_other_than_utf8(self, request):
empty_file = FakeResponse(text=u'Foo\nM\xc3\xbcnchen', status_code=200,
headers={'content-type': 'text/plain'},
encoding='ISO-8859-1')
request.return_value = empty_file
tasks = self.importer.tasks(googledocs_url=self.url)
task = tasks.next()
assert empty_file.encoding == 'utf-8'
@patch('pybossa.importers.requests.get')
class Test_BulkTaskEpiCollectPlusImport(object):
epicollect = {'epicollect_project': 'fakeproject',
'epicollect_form': 'fakeform'}
importer = _BulkTaskEpiCollectPlusImport()
def test_count_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/json'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access the " \
"EpiCollect Plus project"
assert_raises(BulkImportException, self.importer.count_tasks, **self.epicollect)
try:
self.importer.count_tasks(**self.epicollect)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_file_forbidden(self, request):
forbidden_request = FakeResponse(text='Forbidden', status_code=403,
headers={'content-type': 'text/json'},
encoding='utf-8')
request.return_value = forbidden_request
msg = "Oops! It looks like you don't have permission to access the " \
"EpiCollect Plus project"
assert_raises(BulkImportException, self.importer.tasks, **self.epicollect)
try:
self.importer.tasks(**self.epicollect)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_raises_exception_if_not_json(self, request):
html_request = FakeResponse(text='Not an application/json',
status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That project and form do not look like the right one."
assert_raises(BulkImportException, self.importer.count_tasks, **self.epicollect)
try:
self.importer.count_tasks(**self.epicollect)
except BulkImportException as e:
assert e[0] == msg, e
def test_tasks_raises_exception_if_not_json(self, request):
html_request = FakeResponse(text='Not an application/json',
status_code=200,
headers={'content-type': 'text/html'},
encoding='utf-8')
request.return_value = html_request
msg = "Oops! That project and form do not look like the right one."
assert_raises(BulkImportException, self.importer.tasks, **self.epicollect)
try:
self.importer.tasks(**self.epicollect)
except BulkImportException as e:
assert e[0] == msg, e
def test_count_tasks_returns_number_of_tasks_in_project(self, request):
data = [dict(DeviceID=23), dict(DeviceID=24)]
response = FakeResponse(text=json.dumps(data), status_code=200,
headers={'content-type': 'application/json'},
encoding='utf-8')
request.return_value = response
number_of_tasks = self.importer.count_tasks(**self.epicollect)
assert number_of_tasks is 2, number_of_tasks
def test_tasks_returns_tasks_in_project(self, request):
data = [dict(DeviceID=23), dict(DeviceID=24)]
response = FakeResponse(text=json.dumps(data), status_code=200,
headers={'content-type': 'application/json'},
encoding='utf-8')
request.return_value = response
task = self.importer.tasks(**self.epicollect).next()
assert task == {'info': {u'DeviceID': 23}}, task
| stefanhahmann/pybossa | test/test_importers.py | Python | agpl-3.0 | 39,427 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-26 13:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("tracking", "0010_auto_20180904_0818")]
operations = [
migrations.AlterField(
model_name="absence",
name="type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="absences",
to="employment.AbsenceType",
),
),
migrations.AlterField(
model_name="activity",
name="task",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="activities",
to="projects.Task",
),
),
migrations.AlterField(
model_name="report",
name="task",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="reports",
to="projects.Task",
),
),
migrations.AlterField(
model_name="report",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="reports",
to=settings.AUTH_USER_MODEL,
),
),
]
| adfinis-sygroup/timed-backend | timed/tracking/migrations/0011_auto_20181026_1528.py | Python | agpl-3.0 | 1,588 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
config = {
'LOCALE': 'en',
'LOCALES_DIR': 'locales',
'ROOT_PATH': '',
'HOST':'127.0.0.1',
'PORT':'4000',
'SKIP_LURED':False
}
from utils import *
| simonhashadenough/PokemonGo-Map | PokeAlarm/alarms/__init__.py | Python | agpl-3.0 | 197 |
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from django.core.cache import cache
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from datetime import datetime, timedelta
from stationspinner.accounting.models import Capsuler
import pytz, pycrest, hashlib, os, time, logging
log = logging.getLogger('crest')
def _set_crest_data(auth_con, capsuler):
expires_in_seconds = (datetime.fromtimestamp(auth_con.expires, pytz.UTC) -
datetime.now(tz=pytz.UTC)).total_seconds()
# This sets the token for communication between armada, stationspinner and
# crest.
cache.set(auth_con.token,
{
'username': capsuler.username,
'refresh_token': auth_con.refresh_token,
'expires': auth_con.expires
},
timeout=expires_in_seconds)
return auth_con.token
def login(capsuler, auth_con, state_token):
key = _set_crest_data(auth_con, capsuler)
# This sets the state_token which we've sent through the sso and back to us
# and ties it to the sso token. We only keep this for a short period of
# time, so it is hard to bruteforce and intercept a connection.
cache.set(state_token, key, timeout=60)
def deny_login(state_token, reason):
'''
If the user is denied access, albeit successfully authenticating from the
sso, we need to send a message back. We'll stick it into our state token.
'''
cache.set(state_token, reason, timeout=60)
def check_login(state_token):
'''
Gets messages or token from the cache.
'''
auth_data = cache.get(state_token, None)
if not auth_data:
# User polled a non-existant token. Very suspicious.
return None, 403
if auth_data == 'waiting for sso':
return {'waiting': True}, 200
if type(auth_data) is dict and 'error' in auth_data:
# Login has failed for some reason
return auth_data['error'], 400
crest_data = cache.get(auth_data, None)
if crest_data and 'username' in crest_data:
# Login has been successful. Delete the state token and send the new
# token back to the user
crest_token = auth_data
cache.delete(state_token)
log.debug("".join((crest_data['username'], ' logged in, token: ', crest_token)))
return {'token': crest_token,
'expires': crest_data['expires']}, 200
return 'Stuck in warp tunnel!', 400
def logout(request):
auth = get_authorization_header(request).split()
try:
token = auth[1].decode()
except UnicodeError:
return
cache.delete(token)
def get_authorized_connection(code):
eve_sso = pycrest.EVE(client_id=settings.CREST_CLIENTID,
api_key=settings.CREST_SECRET_KEY)
return eve_sso.authorize(code)
def refresh_token(token, capsuler):
auth_data = cache.get(token, None)
if not auth_data:
raise exceptions.AuthenticationFailed('Token has expired.')
eve_sso = pycrest.EVE(client_id=settings.CREST_CLIENTID,
api_key=settings.CREST_SECRET_KEY)
cache.delete(token)
# If we're using authentication only and no crest scopes,
# the refresh token will be empty. Generate a new non-crest
# token instead.
if auth_data['refresh_token'] is None:
new_token = hashlib.sha1(os.urandom(256)).hexdigest()
expires = datetime.now(tz=pytz.UTC) + timedelta(minutes=20)
cache.set(new_token,
{
'username': auth_data['username'],
'refresh_token': None,
'expires': None
},
timeout=(expires - datetime.now(tz=pytz.UTC)).total_seconds()) # Use 20 minutes, same as crest
expires = time.mktime(expires.timetuple())
else:
auth_con = eve_sso.refr_authorize(auth_data['refresh_token'])
new_token = _set_crest_data(auth_con, capsuler)
expires = auth_con.expires
log.debug(" ".join(('Refreshed token for', auth_data['username'], new_token)))
return new_token, expires
def get_authorization_token():
token = hashlib.sha1(os.urandom(128)).hexdigest()
cache.set(token, 'waiting for sso', timeout=60*60*8)
return token
class CrestAuthentication(BaseAuthentication):
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'token':
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
token = auth[1].decode()
except UnicodeError:
msg = _('Invalid token header. Token string should not contain invalid characters.')
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(token)
def authenticate_credentials(self, token):
token_data = cache.get(token, None)
if not token_data:
raise exceptions.AuthenticationFailed('Invalid token.')
try:
capsuler = Capsuler.objects.get(username=token_data['username'])
except Capsuler.DoesNotExist:
raise exceptions.AuthenticationFailed(_('User inactive or deleted.'))
if not capsuler.is_active:
raise exceptions.AuthenticationFailed(_('User inactive or deleted.'))
return (capsuler, token)
def authenticate_header(self, request):
return 'Token' | kriberg/stationspinner | stationspinner/accounting/authentication.py | Python | agpl-3.0 | 5,821 |
# This file is part of Booktype.
# Copyright (c) 2013 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url, include
from .views import RegisterPageView
urlpatterns = patterns(
'',
url(r'^signin/$', 'booki.account.views.signin', name='signin'),
url(r'^register/$', RegisterPageView.as_view(), name='register'),
)
| btat/Booktype | lib/booktype/apps/accounts/urls.py | Python | agpl-3.0 | 1,024 |
# The Hazard Library
# Copyright (C) 2012-2022 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from openquake.hazardlib.scalerel import (
get_available_scalerel,
get_available_magnitude_scalerel,
get_available_sigma_magnitude_scalerel,
get_available_area_scalerel,
get_available_sigma_area_scalerel)
from openquake.hazardlib.scalerel.peer import PeerMSR
from openquake.hazardlib.scalerel.wc1994 import WC1994
class AvailableMSRTestCase(unittest.TestCase):
"""
The scalerel module contains methods to determine the scaling relations
contained within. This class tests that all execute - without necessarily
verifying the number or set of scaling relations available
"""
def _test_get_available_scalerel(self):
self.assertGreater(len(get_available_scalerel()), 0)
def _test_get_available_magnitude_scalerel(self):
self.assertGreater(len(get_available_magnitude_scalerel()), 0)
def _test_get_available_sigma_magnitude_scalerel(self):
self.assertGreater(len(get_available_sigma_magnitude_scalerel()), 0)
def _test_get_available_area_scalerel(self):
self.assertGreater(len(get_available_area_scalerel()), 0)
def _test_get_available_sigma_area_scalerel(self):
self.assertGreater(len(get_available_sigma_area_scalerel()), 0)
class BaseMSRTestCase(unittest.TestCase):
MSR_CLASS = None
def setUp(self):
super().setUp()
self.msr = self.MSR_CLASS()
def _test_get_median_area(self, mag, rake, expected_value, places=7):
self.assertAlmostEqual(self.msr.get_median_area(mag, rake),
expected_value, places=places)
def _test_get_median_mag(self, area, rake, expected_value, places=7):
self.assertAlmostEqual(self.msr.get_median_mag(area, rake),
expected_value, places=places)
class PeerMSRMSRTestCase(BaseMSRTestCase):
MSR_CLASS = PeerMSR
def test_median_area(self):
self._test_get_median_area(4.3, None, 1.9952623)
self._test_get_median_area(5.1, 0, 12.5892541)
class WC1994MSRTestCase(BaseMSRTestCase):
MSR_CLASS = WC1994
def test_median_area_all(self):
self._test_get_median_area(2.2, None, 0.0325087)
self._test_get_median_area(1.3, None, 0.0049317)
def test_median_area_strike_slip(self):
self._test_get_median_area(3.9, -28.22, 1.2302688)
self._test_get_median_area(3.9, -45, 1.2302688)
self._test_get_median_area(3.9, 0, 1.2302688)
self._test_get_median_area(3.9, 45, 1.2302688)
def test_median_area_thrust(self):
self._test_get_median_area(4.1, 50, 1.0665961)
self._test_get_median_area(4.1, 95, 1.0665961)
def test_median_area_normal(self):
self._test_get_median_area(5.9, -59, 92.8966387)
self._test_get_median_area(5.9, -125, 92.8966387)
def test_get_std_dev_area(self):
self.assertEqual(self.msr.get_std_dev_area(None, None), 0.24)
self.assertEqual(self.msr.get_std_dev_area(20, 4), 0.22)
self.assertEqual(self.msr.get_std_dev_area(None, 138), 0.22)
self.assertEqual(self.msr.get_std_dev_area(None, -136), 0.22)
self.assertEqual(self.msr.get_std_dev_area(None, 50), 0.26)
self.assertEqual(self.msr.get_std_dev_area(None, -130), 0.22)
def test_string(self):
self.assertEqual(str(self.msr), "<WC1994>")
| gem/oq-engine | openquake/hazardlib/tests/scalerel/msr_test.py | Python | agpl-3.0 | 4,059 |
# coding: utf-8
# Copyright (C) 2014 - Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, fields, _
from openerp.exceptions import Warning as UserError
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
intercompany_trade = fields.Boolean(
string='Intercompany Trade', related='partner_id.intercompany_trade')
@api.multi
def action_invoice_create(self):
orders = self.filtered(lambda x: not x.intercompany_trade)
return super(PurchaseOrder, orders).action_invoice_create()
@api.multi
def view_invoice(self):
orders = self.filtered(lambda x: x.intercompany_trade)
if orders:
raise UserError(_(
"In Intercompany Trade context, The supplier invoices will"
" be created by your supplier"))
return self.view_invoice()
@api.model
def _prepare_order_line_move(
self, order, order_line, picking_id, group_id):
res = super(PurchaseOrder, self)._prepare_order_line_move(
order, order_line, picking_id, group_id)
if order.intercompany_trade:
for item in res:
item['invoice_state'] = 'none'
return res
| grap/odoo-addons-it | intercompany_trade_purchase/models/purchase_order.py | Python | agpl-3.0 | 1,356 |
#!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
from operator import itemgetter, attrgetter
import numpy as np
STATS_SERVER = 'volt2'
COLORS = ['b','g','c','m','k']
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
mc = {}
def get_stats(hostname, port, days):
"""Get most recent run statistics of all apps within the last 'days'
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'CenterAverageOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
maxdate = datetime.datetime(1970,1,1,0,0,0)
mindate = datetime.datetime(2038,1,19,0,0,0)
stats = dict()
run_stat_keys = ['app', 'nodes', 'branch', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
group = (row[0],row[1])
app_stats = []
maxdate = max(maxdate, row[3])
mindate = min(mindate, row[3])
if group not in stats:
stats[group] = app_stats
else:
app_stats = stats[group]
run_stats = dict(zip(run_stat_keys, row))
app_stats.append(run_stats)
return (stats, mindate, maxdate)
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h, xmin, xmax, series):
self.filename = filename
self.legends = {}
w = w == None and 2000 or w
h = h == None and 1000 or h
self.xmax = xmax
self.xmin = xmin
self.series = series
self.fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = self.fig.add_subplot(111)
self.ax.set_title(title)
plt.tick_params(axis='x', which='major', labelsize=16)
plt.tick_params(axis='y', labelright=True, labelleft=False, labelsize=16)
plt.grid(True)
self.fig.autofmt_xdate()
plt.ylabel(ylabel)
plt.xlabel(xlabel)
def plot(self, x, y, color, marker_shape, legend, linestyle):
self.ax.plot(x, y, linestyle, label=legend, color=color,
marker=marker_shape, markerfacecolor=color, markersize=8)
def close(self):
x_formatter = matplotlib.dates.DateFormatter("%b %d %y")
self.ax.xaxis.set_major_formatter(x_formatter)
loc = matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO, interval=1)
self.ax.xaxis.set_major_locator(loc)
self.ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(n=7))
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
self.ax.yaxis.set_major_formatter(y_formatter)
ymin, ymax = plt.ylim()
plt.xlim((self.xmin.toordinal(), (self.xmax+datetime.timedelta(1)).replace(minute=0, hour=0, second=0, microsecond=0).toordinal()))
if self.series.startswith('lat'):
lloc = 2
else:
lloc = 3
plt.legend(prop={'size': 12}, loc=lloc)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
plt.close('all')
def plot(title, xlabel, ylabel, filename, width, height, app, data, series, mindate, maxdate):
global mc
plot_data = dict()
for run in data:
if run['branch'] not in plot_data:
plot_data[run['branch']] = {series: []}
if series == 'tppn':
value = run['tps']/run['nodes']
else:
value = run[series]
datenum = matplotlib.dates.date2num(run['date'])
plot_data[run['branch']][series].append((datenum,value))
if len(plot_data) == 0:
return
pl = Plot(title, xlabel, ylabel, filename, width, height, mindate, maxdate, series)
flag = dict()
for b,bd in plot_data.items():
for k,v in bd.items():
if k not in flag.keys():
flag[k] = []
v = sorted(v, key=lambda x: x[0])
u = zip(*v)
if b not in mc:
mc[b] = (COLORS[len(mc.keys())%len(COLORS)], MARKERS[len(mc.keys())%len(MARKERS)])
pl.plot(u[0], u[1], mc[b][0], mc[b][1], b, '-')
ma = [None]
if len(u[0]) >= 10:
(ma,mstd) = moving_average(u[1], 10)
pl.plot(u[0], ma, mc[b][0], None, None, ":")
failed = 0
if k.startswith('lat'):
polarity = 1
cv = np.nanmin(ma)
rp = (u[0][np.nanargmin(ma)], cv)
if ma[-1] > cv * 1.05:
failed = 1
else:
polarity = -1
cv = np.nanmax(ma)
rp = (u[0][np.nanargmax(ma)], cv)
if ma[-1] < cv * 0.95:
failed = 1
twosigma = np.sum([np.convolve(mstd, polarity*2), ma], axis=0)
pl.plot(u[0], twosigma, mc[b][0], None, None, '-.')
pl.ax.annotate(r"$2\sigma$", xy=(u[0][-1], twosigma[-1]), xycoords='data', xytext=(20,0), textcoords='offset points', ha='right')
twntypercent = np.sum([np.convolve(ma, polarity*0.2), ma], axis=0)
pl.plot(u[0], twntypercent, mc[b][0], None, None, '-.')
pl.ax.annotate(r"20%", xy=(u[0][-1], twntypercent[-1]), xycoords='data', xytext=(20,0), textcoords='offset points', ha='right')
p = (ma[-1]-rp[1])/rp[1]*100.
if failed != 0:
if p<10:
color = 'yellow'
else:
color = 'red'
flag[k].append((b, p))
for pos in ['top', 'bottom', 'right', 'left']:
pl.ax.spines[pos].set_edgecolor(color)
pl.ax.set_axis_bgcolor(color)
pl.ax.set_alpha(0.2)
pl.ax.annotate("%.2f" % cv, xy=rp, xycoords='data', xytext=(0,-10*polarity),
textcoords='offset points', ha='center')
pl.ax.annotate("%.2f" % ma[-1], xy=(u[0][-1],ma[-1]), xycoords='data', xytext=(5,+5),
textcoords='offset points', ha='left')
pl.ax.annotate("(%+.2f%%)" % p, xy=(u[0][-1],ma[-1]), xycoords='data', xytext=(5,-5),
textcoords='offset points', ha='left')
"""
#pl.ax.annotate(b, xy=(u[0][-1],u[1][-1]), xycoords='data',
# xytext=(0, 0), textcoords='offset points') #, arrowprops=dict(arrowstyle="->"))
x = u[0][-1]
y = u[1][-1]
pl.ax.annotate(str(y), xy=(x,y), xycoords='data', xytext=(5,0),
textcoords='offset points', ha='left')
xmin, ymin = [(u[0][i],y) for i,y in enumerate(u[1]) if y == min(u[1])][-1]
xmax, ymax= [(u[0][i],y) for i,y in enumerate(u[1]) if y == max(u[1])][-1]
if ymax != ymin:
if xmax != x:
pl.ax.annotate(str(ymax), xy=(xmax,ymax),
textcoords='offset points', ha='center', va='bottom', xytext=(0,5))
if xmin != x:
pl.ax.annotate(str(ymin), xy=(xmin,ymin),
textcoords='offset points', ha='center', va='top', xytext=(0,-5))
"""
pl.close()
return flag
def generate_index_file(filenames):
row = """
<tr>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
</tr>
"""
sep = """
</table>
<table frame="box">
<tr>
<th colspan="3"><a name="%s">%s</a></th>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
Generated on %s
<table frame="box">
%s
</table>
</body>
</html>
"""
hrow = """
<tr>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
</tr>
"""
#h = map(lambda x:(x[0].replace(' ','%20'), x[0]), filenames)
h = []
for x in filenames:
tdattr = "<span></span>" #"bgcolor=green"
tdnote = ""
M = 0.0
if len(x) == 6:
for v in x[5].values():
if len(v) > 0:
M = max(M, abs(v[0][1]))
if M > 0.0:
tdattr = '<span style="color:yellow">►</span>'
if M > 10.0:
tdattr = '<span style="color:red">►</span>'
tdnote = " (by %.2f%%)" % M
h.append(("", x[0].replace(' ','%20'), tdattr + x[0] + tdnote))
n = 4
z = n-len(h)%n
while z > 0 and z < n:
h.append(('','',''))
z -= 1
rows = []
t = ()
for i in range(1, len(h)+1):
t += tuple(h[i-1])
if i%n == 0:
rows.append(hrow % t)
t = ()
last_app = None
for i in filenames:
if i[0] != last_app:
rows.append(sep % (i[0], i[0]))
last_app = i[0]
rows.append(row % (i[1], i[1], i[2], i[2], i[3], i[3]))
return full_content % (time.strftime("%Y/%m/%d %H:%M:%S"), ''.join(rows))
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n-1] = None
s = [float('NaN')]*(n-1)
for d in range(n, len(x)+1):
s.append(np.std(x[d-n:d]))
return (a,s)
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base [ndays]" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
ndays = 2000
if len(sys.argv) >=4:
ndays = int(sys.argv[3])
width = None
height = None
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[5])
# show all the history
(stats, mindate, maxdate) = get_stats(STATS_SERVER, 21212, ndays)
mindate = (mindate).replace(hour=0, minute=0, second=0, microsecond=0)
maxdate = (maxdate + datetime.timedelta(days=1)).replace(minute=0, hour=0, second=0, microsecond=0)
root_path = path
filenames = [] # (appname, latency, throughput)
iorder = 0
for group, data in stats.iteritems():
(app,nodes) = group
app = app.replace('/','')
conn = FastSerializer(STATS_SERVER, 21212)
proc = VoltProcedure(conn, "@AdHoc", [FastSerializer.VOLTTYPE_STRING])
resp = proc.call(["select chart_order, series, chart_heading, x_label, y_label from charts where appname = '%s' order by chart_order" % app])
conn.close()
app = app +" %d %s" % (nodes, ["node","nodes"][nodes>1])
legend = { 1 : dict(series="lat95", heading="95tile latency", xlabel="Time", ylabel="Latency (ms)"),
2 : dict(series="lat99", heading="99tile latency", xlabel="Time", ylabel="Latency (ms)"),
3 : dict(series="tppn", heading="avg throughput per node", xlabel="Time", ylabel="ops/sec per node")
}
for r in resp.tables[0].tuples:
legend[r[0]] = dict(series=r[1], heading=r[2], xlabel=r[3], ylabel=r[4])
fns = [app]
flags = dict()
for r in legend.itervalues():
title = app + " " + r['heading']
fn = "_" + title.replace(" ","_") + ".png"
fns.append(prefix + fn)
f = plot(title, r['xlabel'], r['ylabel'], path + fn, width, height, app, data, r['series'], mindate, maxdate)
flags.update(f)
fns.append(iorder)
fns.append(flags)
filenames.append(tuple(fns))
filenames.append(("KVBenchmark-five9s-latency", "", "", "http://ci/job/performance-nextrelease-5nines/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
filenames.append(("KVBenchmark-five9s-nofail-latency", "", "", "http://ci/job/performance-nextrelease-5nines-nofail/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
filenames.append(("KVBenchmark-five9s-nofail-nocl-latency", "", "", "http://ci/job/performance-nextrelease-5nines-nofail-nocl/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
# generate index file
index_file = open(root_path + '-index.html', 'w')
sorted_filenames = sorted(filenames, key=lambda f: f[0].lower()+str(f[1]))
index_file.write(generate_index_file(sorted_filenames))
index_file.close()
if __name__ == "__main__":
main()
| zheguang/voltdb | tools/vis2.py | Python | agpl-3.0 | 13,907 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import copy
import time
from shinken.basemodule import BaseModule
def get_objs_names(objs):
s = ''
for o in objs:
s += get_obj_name(o)
return s
def get_obj_name(obj):
print "ARG", obj
print "Get name on", obj.get_name()
return obj.get_name()
def list_to_comma(lst):
# For ['d', 'r', 'u'] will return d,r,u
return ','.join(lst)
def last_hard_state_to_int(lst):
return 1
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Merlindb_broker(BaseModule):
def __init__(self, modconf, backend, host=None, user=None, password=None, database=None, character_set=None, database_path=None):
# Mapping for name of data, rename attributes and transform function
self.mapping = {
# Program status
'program_status': {'program_start': {'transform': None},
'pid': {'transform': None},
'last_alive': {'transform': None},
'is_running': {'transform': None},
'instance_id': {'transform': None},
},
# Program status update (every 10s)
'update_program_status': {'program_start': {'transform': None},
'pid': {'transform': None},
'last_alive': {'transform': None},
'is_running': {'transform': None},
'instance_id': {'transform': None},
},
# Host
'initial_host_status': {
'id': {'transform': None},
'instance_id': {'transform': None},
'host_name': {'transform': None},
'alias': {'transform': None},
'display_name': {'transform': None},
'address': {'transform': None},
'contact_groups': {'transform': None},
'contacts': {'transform': None},
'initial_state': {'transform': None},
'max_check_attempts': {'transform': None},
'check_interval': {'transform': None},
'retry_interval': {'transform': None},
'active_checks_enabled': {'transform': None},
'passive_checks_enabled': {'transform': None},
'obsess_over_host': {'transform': None},
'check_freshness': {'transform': None},
'freshness_threshold': {'transform': None},
'event_handler_enabled': {'transform': None},
'low_flap_threshold': {'transform': None},
'high_flap_threshold': {'transform': None},
'flap_detection_enabled': {'transform': None},
'process_perf_data': {'transform': None},
'notification_interval': {'transform': None},
'first_notification_delay': {'transform': None},
'notifications_enabled': {'transform': None},
'notes': {'transform': None},
'notes_url': {'transform': None},
'action_url': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'next_chk': {'transform': None, 'name': 'next_check'},
'attempt': {'transform': None, 'name': 'current_attempt'},
'state_id': {'transform': None, 'name': 'current_state'},
'state_type_id': {'transform': None, 'name': 'state_type'},
'current_event_id': {'transform': None},
'last_event_id': {'transform': None},
'last_state_id': {'transform': None, 'name': 'last_state'},
'last_state_change': {'transform': None},
'last_hard_state_change': {'transform': None},
'last_hard_state': {'transform': last_hard_state_to_int},
'is_flapping': {'transform': None},
'flapping_comment_id': {'transform': None},
'percent_state_change': {'transform': None},
'problem_has_been_acknowledged': {'transform': None},
'acknowledgement_type': {'transform': None},
'check_type': {'transform': None},
'has_been_checked': {'transform': None},
'should_be_scheduled': {'transform': None},
'last_problem_id': {'transform': None},
'current_problem_id': {'transform': None},
'execution_time': {'transform': None},
'last_notification': {'transform': None},
'current_notification_number': {'transform': None},
'current_notification_id': {'transform': None},
'check_flapping_recovery_notification': {'transform': None},
'scheduled_downtime_depth': {'transform': None},
'pending_flex_downtime': {'transform': None},
},
'update_host_status': {
'id': {'transform': None},
'instance_id': {'transform': None},
'host_name': {'transform': None},
'alias': {'transform': None},
'display_name': {'transform': None},
'address': {'transform': None},
'initial_state': {'transform': None},
'max_check_attempts': {'transform': None},
'check_interval': {'transform': None},
'retry_interval': {'transform': None},
'active_checks_enabled': {'transform': None},
'passive_checks_enabled': {'transform': None},
'obsess_over_host': {'transform': None},
'check_freshness': {'transform': None},
'freshness_threshold': {'transform': None},
'event_handler_enabled': {'transform': None},
'low_flap_threshold': {'transform': None},
'high_flap_threshold': {'transform': None},
'flap_detection_enabled': {'transform': None},
'process_perf_data': {'transform': None},
'notification_interval': {'transform': None},
'first_notification_delay': {'transform': None},
'notifications_enabled': {'transform': None},
'notes': {'transform': None},
'notes_url': {'transform': None},
'action_url': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'next_chk': {'transform': None, 'name': 'next_check'},
'attempt': {'transform': None, 'name': 'current_attempt'},
'state_id': {'transform': None, 'name': 'current_state'},
'state_type_id': {'transform': None, 'name': 'state_type'},
'current_event_id': {'transform': None},
'last_event_id': {'transform': None},
'last_state_id': {'transform': None, 'name': 'last_state'},
'last_state_change': {'transform': None},
'last_hard_state_change': {'transform': None},
'last_hard_state': {'transform': last_hard_state_to_int},
'is_flapping': {'transform': None},
'flapping_comment_id': {'transform': None},
'percent_state_change': {'transform': None},
'problem_has_been_acknowledged': {'transform': None},
'acknowledgement_type': {'transform': None},
'check_type': {'transform': None},
'has_been_checked': {'transform': None},
'should_be_scheduled': {'transform': None},
'last_problem_id': {'transform': None},
'current_problem_id': {'transform': None},
'execution_time': {'transform': None},
'last_notification': {'transform': None},
'current_notification_number': {'transform': None},
'current_notification_id': {'transform': None},
'check_flapping_recovery_notification': {'transform': None},
'scheduled_downtime_depth': {'transform': None},
'pending_flex_downtime': {'transform': None},
},
'host_check_result': {
'latency': {'transform': None},
'last_time_unreachable': {'transform': None},
'attempt': {'transform': None, 'name': 'current_attempt'},
'check_type': {'transform': None},
'state_type_id': {'transform': None, 'name': 'state_type'},
'execution_time': {'transform': None},
'start_time': {'transform': None},
'acknowledgement_type': {'transform': None},
'return_code': {'transform': None},
'last_time_down': {'transform': None},
'instance_id': {'transform': None},
'long_output': {'transform': None},
'end_time': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'timeout': {'transform': None},
'output': {'transform': None},
'state_id': {'transform': None, 'name': 'current_state'},
'last_time_up': {'transform': None},
'early_timeout': {'transform': None},
'perf_data': {'transform': None},
'host_name': {'transform': None},
},
'host_next_schedule': {
'instance_id': {'transform': None},
'next_chk': {'transform': None, 'name': 'next_check'},
'host_name': {'transform': None},
},
# Service
'initial_service_status': {
'id': {'transform': None},
'instance_id': {'transform': None},
'host_name': {'transform': None},
'service_description': {'transform': None},
'display_name': {'transform': None},
'is_volatile': {'transform': None},
'initial_state': {'transform': None},
'max_check_attempts': {'transform': None},
'check_interval': {'transform': None},
'retry_interval': {'transform': None},
'active_checks_enabled': {'transform': None},
'passive_checks_enabled': {'transform': None},
'obsess_over_service': {'transform': None},
'check_freshness': {'transform': None},
'freshness_threshold': {'transform': None},
'event_handler_enabled': {'transform': None},
'low_flap_threshold': {'transform': None},
'high_flap_threshold': {'transform': None},
'flap_detection_enabled': {'transform': None},
'process_perf_data': {'transform': None},
'notification_interval': {'transform': None},
'first_notification_delay': {'transform': None},
'notifications_enabled': {'transform': None},
'notes': {'transform': None},
'notes_url': {'transform': None},
'action_url': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'next_chk': {'transform': None, 'name': 'next_check'},
'attempt': {'transform': None, 'name': 'current_attempt'},
'state_id': {'transform': None, 'name': 'current_state'},
'current_event_id': {'transform': None},
'last_event_id': {'transform': None},
'last_state_id': {'transform': None, 'name': 'last_state'},
'last_state_change': {'transform': None},
'last_hard_state_change': {'transform': None},
'last_hard_state': {'transform': last_hard_state_to_int},
'state_type_id': {'transform': None, 'name': 'state_type'},
'is_flapping': {'transform': None},
'flapping_comment_id': {'transform': None},
'percent_state_change': {'transform': None},
'problem_has_been_acknowledged': {'transform': None},
'acknowledgement_type': {'transform': None},
'check_type': {'transform': None},
'has_been_checked': {'transform': None},
'should_be_scheduled': {'transform': None},
'last_problem_id': {'transform': None},
'current_problem_id': {'transform': None},
'execution_time': {'transform': None},
'last_notification': {'transform': None},
'current_notification_number': {'transform': None},
'current_notification_id': {'transform': None},
'check_flapping_recovery_notification': {'transform': None},
'scheduled_downtime_depth': {'transform': None},
'pending_flex_downtime': {'transform': None},
},
'update_service_status': {
'id': {'transform': None},
'instance_id': {'transform': None},
'host_name': {'transform': None},
'service_description': {'transform': None},
'display_name': {'transform': None},
'is_volatile': {'transform': None},
'initial_state': {'transform': None},
'max_check_attempts': {'transform': None},
'check_interval': {'transform': None},
'retry_interval': {'transform': None},
'active_checks_enabled': {'transform': None},
'passive_checks_enabled': {'transform': None},
'obsess_over_service': {'transform': None},
'check_freshness': {'transform': None},
'freshness_threshold': {'transform': None},
'event_handler_enabled': {'transform': None},
'low_flap_threshold': {'transform': None},
'high_flap_threshold': {'transform': None},
'flap_detection_enabled': {'transform': None},
'process_perf_data': {'transform': None},
'notification_interval': {'transform': None},
'first_notification_delay': {'transform': None},
'notifications_enabled': {'transform': None},
'notes': {'transform': None},
'notes_url': {'transform': None},
'action_url': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'next_chk': {'transform': None, 'name': 'next_check'},
'attempt': {'transform': None, 'name': 'current_attempt'},
'state_id': {'transform': None, 'name': 'current_state'},
'current_event_id': {'transform': None},
'last_event_id': {'transform': None},
'last_state_id': {'transform': None, 'name': 'last_state'},
'last_state_change': {'transform': None},
'last_hard_state_change': {'transform': None},
'last_hard_state': {'transform': last_hard_state_to_int},
'state_type_id': {'transform': None, 'name': 'current_state'},
'is_flapping': {'transform': None},
'flapping_comment_id': {'transform': None},
'percent_state_change': {'transform': None},
'problem_has_been_acknowledged': {'transform': None},
'acknowledgement_type': {'transform': None},
'check_type': {'transform': None},
'has_been_checked': {'transform': None},
'should_be_scheduled': {'transform': None},
'last_problem_id': {'transform': None},
'current_problem_id': {'transform': None},
'execution_time': {'transform': None},
'last_notification': {'transform': None},
'current_notification_number': {'transform': None},
'current_notification_id': {'transform': None},
'check_flapping_recovery_notification': {'transform': None},
'scheduled_downtime_depth': {'transform': None},
'pending_flex_downtime': {'transform': None},
},
'service_check_result': {
'check_type': {'transform': None},
'last_time_critical': {'transform': None},
'last_time_warning': {'transform': None},
'latency': {'transform': None},
'last_chk': {'transform': None, 'name': 'last_check'},
'last_time_ok': {'transform': None},
'end_time': {'transform': None},
'last_time_unknown': {'transform': None},
'execution_time': {'transform': None},
'start_time': {'transform': None},
'return_code': {'transform': None},
'output': {'transform': None},
'service_description': {'transform': None},
'early_timeout': {'transform': None},
'attempt': {'transform': None, 'name': 'current_attempt'},
'state_type_id': {'transform': None, 'name': 'state_type'},
'acknowledgement_type': {'transform': None},
'instance_id': {'transform': None},
'long_output': {'transform': None},
'host_name': {'transform': None},
'timeout': {'transform': None},
'state_id': {'transform': None, 'name': 'current_state'},
'perf_data': {'transform': None},
},
'service_next_schedule': {
'next_chk': {'transform': None, 'name': 'next_check'},
'service_description': {'transform': None},
'instance_id': {'transform': None},
'host_name': {'transform': None},
},
# Contact
'initial_contact_status': {
'service_notifications_enabled': {'transform': None},
'can_submit_commands': {'transform': None},
'contact_name': {'transform': None},
'id': {'transform': None},
'retain_status_information': {'transform': None},
'address1': {'transform': None},
'address2': {'transform': None},
'address3': {'transform': None},
'address4': {'transform': None},
'address5': {'transform': None},
'address6': {'transform': None},
#'service_notification_commands': {'transform': get_objs_names},
'pager': {'transform': None},
#'host_notification_period': {'transform': get_obj_name},
'host_notifications_enabled': {'transform': None},
#'host_notification_commands': {'transform': get_objs_names},
#'service_notification_period': {'transform': get_obj_name},
'email': {'transform': None},
'alias': {'transform': None},
'host_notification_options': {'transform': list_to_comma},
'service_notification_options': {'transform': list_to_comma},
},
# Contact group
'initial_contactgroup_status': {
'contactgroup_name': {'transform': None},
'alias': {'transform': None},
'instance_id': {'transform': None},
'id': {'transform': None},
'members': {'transform': None},
},
# Host group
'initial_hostgroup_status': {
'hostgroup_name': {'transform': None},
'notes': {'transform': None},
'instance_id': {'transform': None},
'action_url': {'transform': None},
'notes_url': {'transform': None},
'members': {'transform': None},
'id': {'transform': None},
}
}
BaseModule.__init__(self, modconf)
self.backend = backend
self.host = host
self.user = user
self.password = password
self.database = database
self.character_set = character_set
self.database_path = database_path
# Now get a backend_db of our backend type
if backend == 'mysql':
#from mysql_backend import Mysql_backend
from shinken.db_mysql import DBMysql
print "Creating a mysql backend"
self.db_backend = DBMysql(host, user, password, database, character_set)
if backend == 'sqlite':
#from sqlite_backend import Sqlite_backend
from shinken.db_sqlite import DBSqlite
print "Creating a sqlite backend"
self.db_backend = DBSqlite(self.database_path)
def preprocess(self, type, brok):
new_brok = copy.deepcopy(brok)
# Only preprocess if we can apply a mapping
if type in self.mapping:
to_del = []
to_add = []
mapping = self.mapping[brok.type]
for prop in new_brok.data:
# ex: 'name': 'program_start_time', 'transform'
if prop in mapping:
#print "Got a prop to change", prop
val = brok.data[prop]
if mapping[prop]['transform'] is not None:
#print "Call function for", type, prop
f = mapping[prop]['transform']
val = f(val)
name = prop
if 'name' in mapping[prop]:
name = mapping[prop]['name']
to_add.append((name, val))
to_del.append(prop)
else:
to_del.append(prop)
for prop in to_del:
del new_brok.data[prop]
for (name, val) in to_add:
new_brok.data[name] = val
else:
print "No preprocess type", brok.type
print brok.data
return new_brok
# Called by Broker so we can do init stuff
# TODO: add conf param to get pass with init
# Conf from arbiter!
def init(self):
print "I connect to Merlin database"
self.db_backend.connect_database()
# Get a brok, parse it, and put in in database
# We call functions like manage_ TYPEOFBROK _brok that return us queries
def manage_brok(self, b):
type = b.type
manager = 'manage_' + type + '_brok'
#print "(Merlin) I search manager:", manager
if hasattr(self, manager):
new_b = self.preprocess(type, b)
f = getattr(self, manager)
queries = f(new_b)
# Ok, we've got queries, now: run them!
for q in queries:
self.db_backend.execute_query(q)
return
# Ok, we are at launch and a scheduler want him only, OK...
# So ca create several queries with all tables we need to delete with
# our instance_id
# This brok must be send at the beginning of a scheduler session,
# if not, BAD THINGS MAY HAPPENED :)
def manage_clean_all_my_instance_id_brok(self, b):
instance_id = b.data['instance_id']
tables = ['command', 'comment', 'contact', 'contactgroup', 'downtime', 'host',
'hostdependency', 'hostescalation', 'hostgroup', 'notification', 'program_status',
'scheduled_downtime', 'service', 'serviceescalation',
'servicegroup', 'timeperiod']
res = []
for table in tables:
q = "DELETE FROM %s WHERE instance_id = '%s' " % (table, instance_id)
res.append(q)
return res
# Program status is .. status of program? :)
# Like pid, daemon mode, last activity, etc
# We already clean database, so insert
def manage_program_status_brok(self, b):
instance_id = b.data['instance_id']
del_query = "DELETE FROM program_status WHERE instance_id = '%s' " % instance_id
query = self.db_backend.create_insert_query('program_status', b.data)
return [del_query, query]
# Program status is .. status of program? :)
# Like pid, daemon mode, last activity, etc
# We already clean database, so insert
def manage_update_program_status_brok(self, b):
instance_id = b.data['instance_id']
del_query = "DELETE FROM program_status WHERE instance_id = '%s' " % instance_id
query = self.db_backend.create_insert_query('program_status', b.data)
return [del_query, query]
# Initial service status is at start. We need an insert because we
# clean the base
def manage_initial_service_status_brok(self, b):
b.data['last_update'] = time.time()
# It's a initial entry, so we need insert
query = self.db_backend.create_insert_query('service', b.data)
return [query]
# A service check have just arrived, we UPDATE data info with this
def manage_service_check_result_brok(self, b):
data = b.data
b.data['last_update'] = time.time()
# We just impact the service :)
where_clause = {'host_name': data['host_name'], 'service_description': data['service_description']}
query = self.db_backend.create_update_query('service', data, where_clause)
return [query]
# A new service schedule have just arrived, we UPDATE data info with this
def manage_service_next_schedule_brok(self, b):
data = b.data
# We just impact the service :)
where_clause = {'host_name': data['host_name'], 'service_description': data['service_description']}
query = self.db_backend.create_update_query('service', data, where_clause)
return [query]
# A full service status? Ok, update data
def manage_update_service_status_brok(self, b):
data = b.data
b.data['last_update'] = time.time()
where_clause = {'host_name': data['host_name'], 'service_description': data['service_description']}
query = self.db_backend.create_update_query('service', data, where_clause)
return [query]
# A host have just be create, database is clean, we INSERT it
def manage_initial_host_status_brok(self, b):
b.data['last_update'] = time.time()
tmp_data = copy.copy(b.data)
del tmp_data['contacts']
del tmp_data['contact_groups']
query = self.db_backend.create_insert_query('host', tmp_data)
res = [query]
for cg_name in b.data['contact_groups'].split(','):
q_del = "DELETE FROM host_contactgroup WHERE host = '%s' and contactgroup = (SELECT id FROM contactgroup WHERE contactgroup_name = '%s')" % (b.data['id'], cg_name)
res.append(q_del)
q = "INSERT INTO host_contactgroup (host, contactgroup) VALUES ('%s', (SELECT id FROM contactgroup WHERE contactgroup_name = '%s'))" % (b.data['id'], cg_name)
res.append(q)
return res
# A new host group? Insert it
# We need to do something for the members prop (host.id, host_name)
# They are for host_hostgroup table, with just host.id hostgroup.id
def manage_initial_hostgroup_status_brok(self, b):
data = b.data
# Here we've got a special case: in data, there is members
# and we do not want it in the INSERT query, so we create a
# tmp_data without it
tmp_data = copy.copy(data)
del tmp_data['members']
query = self.db_backend.create_insert_query('hostgroup', tmp_data)
res = [query]
# Ok, the hostgroup table is uptodate, now we add relations
# between hosts and hostgroups
for (h_id, h_name) in b.data['members']:
# First clean
q_del = "DELETE FROM host_hostgroup WHERE host = '%s' and hostgroup='%s'" % (h_id, b.data['id'])
res.append(q_del)
# Then add
q = "INSERT INTO host_hostgroup (host, hostgroup) VALUES ('%s', '%s')" % (h_id, b.data['id'])
res.append(q)
return res
# same from hostgroup, but with servicegroup
def manage_initial_servicegroup_status_brok(self, b):
data = b.data
# Here we've got a special case: in data, there is members
# and we do not want it in the INSERT query, so we create a
# tmp_data without it
tmp_data = copy.copy(data)
del tmp_data['members']
query = self.db_backend.create_insert_query('servicegroup', tmp_data)
res = [query]
# Now the members part
for (s_id, s_name) in b.data['members']:
# first clean
q_del = "DELETE FROM service_servicegroup WHERE service='%s' and servicegroup='%s'" % (s_id, b.data['id'])
res.append(q_del)
# Then add
q = "INSERT INTO service_servicegroup (service, servicegroup) VALUES ('%s', '%s')" % (s_id, b.data['id'])
res.append(q)
return res
# Same than service result, but for host result
def manage_host_check_result_brok(self, b):
b.data['last_update'] = time.time()
data = b.data
# Only the host is impacted
where_clause = {'host_name': data['host_name']}
query = self.db_backend.create_update_query('host', data, where_clause)
return [query]
# Same than service result, but for host new scheduling
def manage_host_next_schedule_brok(self, b):
data = b.data
# Only the host is impacted
where_clause = {'host_name': data['host_name']}
query = self.db_backend.create_update_query('host', data, where_clause)
return [query]
# Ok the host is updated
def manage_update_host_status_brok(self, b):
b.data['last_update'] = time.time()
data = b.data
# Only this host
where_clause = {'host_name': data['host_name']}
query = self.db_backend.create_update_query('host', data, where_clause)
return [query]
# A contact have just be created, database is clean, we INSERT it
def manage_initial_contact_status_brok(self, b):
query = self.db_backend.create_insert_query('contact', b.data)
return [query]
# same from hostgroup, but with servicegroup
def manage_initial_contactgroup_status_brok(self, b):
data = b.data
# Here we've got a special case: in data, there is members
# and we do not want it in the INSERT query, so we create a
# tmp_data without it
tmp_data = copy.copy(data)
del tmp_data['members']
query = self.db_backend.create_insert_query('contactgroup', tmp_data)
res = [query]
# Now the members part
for (c_id, c_name) in b.data['members']:
# first clean
q_del = "DELETE FROM contact_contactgroup WHERE contact='%s' and contactgroup='%s'" % (c_id, b.data['id'])
res.append(q_del)
# Then add
q = "INSERT INTO contact_contactgroup (contact, contactgroup) VALUES ('%s', '%s')" % (c_id, b.data['id'])
res.append(q)
return res
# A notification have just be created, we INSERT it
def manage_notification_raise_brok(self, b):
n_data = {}
t = ['reason_type', 'service_description', 'ack_data', 'contacts_notified', 'start_time', 'escalated', 'instance_id',
'state', 'end_time', 'ack_author', 'notification_type', 'output', 'id', 'host_name']
for prop in t:
n_data[prop] = b.data[prop]
query = self.db_backend.create_insert_query('notification', n_data)
return [query]
| wbsavage/shinken | shinken/modules/merlindb_broker/merlindb_broker.py | Python | agpl-3.0 | 32,777 |
import arrow
import datetime
import calendar
from json import JSONEncoder, dumps
from flask import Response
from inbox.models import (Message, Contact, Calendar, Event, When,
Thread, Namespace, Block, Category, Account,
Metadata)
from inbox.models.event import (RecurringEvent, RecurringEventOverride,
InflatedEvent)
from nylas.logging import get_logger
log = get_logger()
def format_address_list(addresses):
if addresses is None:
return []
return [{'name': name, 'email': email} for name, email in addresses]
def format_categories(categories):
if categories is None:
return []
formatted_categories = []
for category in categories:
if not category:
continue
formatted_categories.append({'id': category.public_id,
'name': category.name or None,
'display_name': category.api_display_name})
return formatted_categories
def format_phone_numbers(phone_numbers):
formatted_phone_numbers = []
for number in phone_numbers:
formatted_phone_numbers.append({
'type': number.type,
'number': number.number,
})
return formatted_phone_numbers
def encode(obj, namespace_public_id=None, expand=False, is_n1=False):
try:
return _encode(obj, namespace_public_id, expand, is_n1=is_n1)
except Exception as e:
error_context = {
"id": getattr(obj, "id", None),
"cls": str(getattr(obj, "__class__", None)),
"exception": e,
"exc_info": True
}
log.error("object encoding failure", **error_context)
raise
def _encode(obj, namespace_public_id=None, expand=False, is_n1=False):
"""
Returns a dictionary representation of a Nylas model object obj, or
None if there is no such representation defined. If the optional
namespace_public_id parameter is passed, it will used instead of fetching
the namespace public id for each object. This improves performance when
serializing large numbers of objects, but also means that you must take
care to ONLY serialize objects that belong to the given namespace!
Parameters
----------
namespace_public_id: string, optional
public id of the namespace to which the object to serialize belongs.
Returns
-------
dictionary or None
"""
def _get_namespace_public_id(obj):
return namespace_public_id or obj.namespace.public_id
def _format_participant_data(participant):
"""Event.participants is a JSON blob which may contain internal data.
This function returns a dict with only the data we want to make
public."""
dct = {}
for attribute in ['name', 'status', 'email', 'comment']:
dct[attribute] = participant.get(attribute)
return dct
def _get_lowercase_class_name(obj):
return type(obj).__name__.lower()
# Flask's jsonify() doesn't handle datetimes or json arrays as primary
# objects.
if isinstance(obj, datetime.datetime):
return calendar.timegm(obj.utctimetuple())
if isinstance(obj, datetime.date):
return obj.isoformat()
if isinstance(obj, arrow.arrow.Arrow):
return encode(obj.datetime)
if isinstance(obj, Namespace): # These are now "accounts"
acc_state = obj.account.sync_state
if acc_state is None:
acc_state = 'running'
if is_n1 and acc_state not in ['running', 'invalid']:
acc_state = 'running'
resp = {
'id': obj.public_id,
'object': 'account',
'account_id': obj.public_id,
'email_address': obj.account.email_address if obj.account else '',
'name': obj.account.name,
'provider': obj.account.provider,
'organization_unit': obj.account.category_type,
'sync_state': acc_state
}
# Gmail accounts do not set the `server_settings`
if expand and obj.account.server_settings:
resp['server_settings'] = obj.account.server_settings
return resp
elif isinstance(obj, Account):
raise Exception("Should never be serializing accounts")
elif isinstance(obj, Message):
thread_public_id = None
if obj.thread:
thread_public_id = obj.thread.public_id
resp = {
'id': obj.public_id,
'object': 'message',
'account_id': _get_namespace_public_id(obj),
'subject': obj.subject,
'from': format_address_list(obj.from_addr),
'reply_to': format_address_list(obj.reply_to),
'to': format_address_list(obj.to_addr),
'cc': format_address_list(obj.cc_addr),
'bcc': format_address_list(obj.bcc_addr),
'date': obj.received_date,
'thread_id': thread_public_id,
'snippet': obj.snippet,
'body': obj.body,
'unread': not obj.is_read,
'starred': obj.is_starred,
'files': obj.api_attachment_metadata,
'events': [encode(e) for e in obj.events]
}
categories = format_categories(obj.categories)
if obj.namespace.account.category_type == 'folder':
resp['folder'] = categories[0] if categories else None
else:
resp['labels'] = categories
# If the message is a draft (Nylas-created or otherwise):
if obj.is_draft:
resp['object'] = 'draft'
resp['version'] = obj.version
if obj.reply_to_message is not None:
resp['reply_to_message_id'] = obj.reply_to_message.public_id
else:
resp['reply_to_message_id'] = None
if expand:
resp['headers'] = {
'Message-Id': obj.message_id_header,
'In-Reply-To': obj.in_reply_to,
'References': obj.references
}
return resp
elif isinstance(obj, Thread):
base = {
'id': obj.public_id,
'object': 'thread',
'account_id': _get_namespace_public_id(obj),
'subject': obj.subject,
'participants': format_address_list(obj.participants),
'last_message_timestamp': obj.recentdate,
'last_message_received_timestamp': obj.most_recent_received_date,
'last_message_sent_timestamp': obj.most_recent_sent_date,
'first_message_timestamp': obj.subjectdate,
'snippet': obj.snippet,
'unread': obj.unread,
'starred': obj.starred,
'has_attachments': obj.has_attachments,
'version': obj.version,
}
categories = format_categories(obj.categories)
if obj.namespace.account.category_type == 'folder':
base['folders'] = categories
else:
base['labels'] = categories
if not expand:
base['message_ids'] = \
[m.public_id for m in obj.messages if not m.is_draft]
base['draft_ids'] = [m.public_id for m in obj.drafts]
return base
# Expand messages within threads
all_expanded_messages = []
all_expanded_drafts = []
for msg in obj.messages:
resp = {
'id': msg.public_id,
'object': 'message',
'account_id': _get_namespace_public_id(msg),
'subject': msg.subject,
'from': format_address_list(msg.from_addr),
'reply_to': format_address_list(msg.reply_to),
'to': format_address_list(msg.to_addr),
'cc': format_address_list(msg.cc_addr),
'bcc': format_address_list(msg.bcc_addr),
'date': msg.received_date,
'thread_id': obj.public_id,
'snippet': msg.snippet,
'unread': not msg.is_read,
'starred': msg.is_starred,
'files': msg.api_attachment_metadata
}
categories = format_categories(msg.categories)
if obj.namespace.account.category_type == 'folder':
resp['folder'] = categories[0] if categories else None
else:
resp['labels'] = categories
if msg.is_draft:
resp['object'] = 'draft'
resp['version'] = msg.version
if msg.reply_to_message is not None:
resp['reply_to_message_id'] = \
msg.reply_to_message.public_id
else:
resp['reply_to_message_id'] = None
all_expanded_drafts.append(resp)
else:
all_expanded_messages.append(resp)
base['messages'] = all_expanded_messages
base['drafts'] = all_expanded_drafts
return base
elif isinstance(obj, Contact):
return {
'id': obj.public_id,
'object': 'contact',
'account_id': _get_namespace_public_id(obj),
'name': obj.name,
'email': obj.email_address,
'phone_numbers': format_phone_numbers(obj.phone_numbers)
}
elif isinstance(obj, Event):
resp = {
'id': obj.public_id,
'object': 'event',
'account_id': _get_namespace_public_id(obj),
'calendar_id': obj.calendar.public_id if obj.calendar else None,
'message_id': obj.message.public_id if obj.message else None,
'title': obj.title,
'description': obj.description,
'owner': obj.owner,
'participants': [_format_participant_data(participant)
for participant in obj.participants],
'read_only': obj.read_only,
'location': obj.location,
'when': encode(obj.when),
'busy': obj.busy,
'status': obj.status,
}
if isinstance(obj, RecurringEvent):
resp['recurrence'] = {
'rrule': obj.recurring,
'timezone': obj.start_timezone
}
if isinstance(obj, RecurringEventOverride):
resp['original_start_time'] = encode(obj.original_start_time)
if obj.master:
resp['master_event_id'] = obj.master.public_id
if isinstance(obj, InflatedEvent):
del resp['message_id']
if obj.master:
resp['master_event_id'] = obj.master.public_id
if obj.master.calendar:
resp['calendar_id'] = obj.master.calendar.public_id
return resp
elif isinstance(obj, Calendar):
return {
'id': obj.public_id,
'object': 'calendar',
'account_id': _get_namespace_public_id(obj),
'name': obj.name,
'description': obj.description,
'read_only': obj.read_only,
}
elif isinstance(obj, When):
# Get time dictionary e.g. 'start_time': x, 'end_time': y or 'date': z
times = obj.get_time_dict()
resp = {k: encode(v) for k, v in times.iteritems()}
resp['object'] = _get_lowercase_class_name(obj)
return resp
elif isinstance(obj, Block): # ie: Attachments/Files
resp = {
'id': obj.public_id,
'object': 'file',
'account_id': _get_namespace_public_id(obj),
'content_type': obj.content_type,
'size': obj.size,
'filename': obj.filename,
}
if len(obj.parts):
# if obj is actually a message attachment (and not merely an
# uploaded file), set additional properties
resp.update({
'message_ids': [p.message.public_id for p in obj.parts]})
content_ids = list({p.content_id for p in obj.parts
if p.content_id is not None})
content_id = None
if len(content_ids) > 0:
content_id = content_ids[0]
resp.update({'content_id': content_id})
return resp
elif isinstance(obj, Category):
# 'object' is set to 'folder' or 'label'
resp = {
'id': obj.public_id,
'object': obj.type,
'account_id': _get_namespace_public_id(obj),
'name': obj.name or None,
'display_name': obj.api_display_name
}
return resp
elif isinstance(obj, Metadata):
resp = {
'id': obj.public_id,
'account_id': _get_namespace_public_id(obj),
'application_id': obj.app_client_id,
'object_type': obj.object_type,
'object_id': obj.object_public_id,
'version': obj.version,
'value': obj.value
}
return resp
class APIEncoder(object):
"""
Provides methods for serializing Nylas objects. If the optional
namespace_public_id parameter is passed, it will be bound and used instead
of fetching the namespace public id for each object. This improves
performance when serializing large numbers of objects, but also means that
you must take care to ONLY serialize objects that belong to the given
namespace!
Parameters
----------
namespace_public_id: string, optional
public id of the namespace to which the object to serialize belongs.
"""
def __init__(self, namespace_public_id=None, expand=False, is_n1=False):
self.encoder_class = self._encoder_factory(namespace_public_id, expand, is_n1=is_n1)
def _encoder_factory(self, namespace_public_id, expand, is_n1=False):
class InternalEncoder(JSONEncoder):
def default(self, obj):
custom_representation = encode(obj,
namespace_public_id,
expand=expand, is_n1=is_n1)
if custom_representation is not None:
return custom_representation
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, obj)
return InternalEncoder
def cereal(self, obj, pretty=False):
"""
Returns the JSON string representation of obj.
Parameters
----------
obj: serializable object
pretty: bool, optional
Whether to pretty-print the string (with 4-space indentation).
Raises
------
TypeError
If obj is not serializable.
"""
if pretty:
return dumps(obj,
sort_keys=True,
indent=4,
separators=(',', ': '),
cls=self.encoder_class)
return dumps(obj, cls=self.encoder_class)
def jsonify(self, obj):
"""
Returns a Flask Response object encapsulating the JSON
representation of obj.
Parameters
----------
obj: serializable object
Raises
------
TypeError
If obj is not serializable.
"""
return Response(self.cereal(obj, pretty=True),
mimetype='application/json')
| jobscore/sync-engine | inbox/api/kellogs.py | Python | agpl-3.0 | 15,458 |
"""
Discussion API internal interface
"""
import itertools
from collections import defaultdict
from enum import Enum
import six
from django.core.exceptions import ValidationError
from django.http import Http404
from django.urls import reverse
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import CourseKey
from rest_framework.exceptions import PermissionDenied
from six.moves.urllib.parse import urlencode, urlunparse
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.discussion.django_comment_client.base.views import (
track_comment_created_event,
track_thread_created_event,
track_voted_event
)
from lms.djangoapps.discussion.django_comment_client.utils import (
get_accessible_discussion_xblocks,
get_group_id_for_user,
is_commentable_divided
)
from lms.djangoapps.discussion.rest_api.exceptions import (
CommentNotFoundError,
DiscussionDisabledError,
ThreadNotFoundError
)
from lms.djangoapps.discussion.rest_api.forms import CommentActionsForm, ThreadActionsForm
from lms.djangoapps.discussion.rest_api.pagination import DiscussionAPIPagination
from lms.djangoapps.discussion.rest_api.permissions import (
can_delete,
get_editable_fields,
get_initializable_comment_fields,
get_initializable_thread_fields
)
from lms.djangoapps.discussion.rest_api.serializers import (
CommentSerializer,
DiscussionTopicSerializer,
ThreadSerializer,
get_context
)
from openedx.core.djangoapps.django_comment_common.comment_client.comment import Comment
from openedx.core.djangoapps.django_comment_common.comment_client.thread import Thread
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientRequestError
from openedx.core.djangoapps.django_comment_common.signals import (
comment_created,
comment_deleted,
comment_edited,
comment_voted,
thread_created,
thread_deleted,
thread_edited,
thread_voted,
thread_followed,
thread_unfollowed
)
from openedx.core.djangoapps.django_comment_common.utils import get_course_discussion_settings
from openedx.core.djangoapps.user_api.accounts.api import get_account_settings
from openedx.core.djangoapps.user_api.accounts.views import AccountViewSet
from openedx.core.lib.exceptions import CourseNotFoundError, DiscussionNotFoundError, PageNotFoundError
class DiscussionTopic(object):
"""
Class for discussion topic structure
"""
def __init__(self, topic_id, name, thread_list_url, children=None):
self.id = topic_id # pylint: disable=invalid-name
self.name = name
self.thread_list_url = thread_list_url
self.children = children or [] # children are of same type i.e. DiscussionTopic
class DiscussionEntity(Enum):
"""
Enum for different types of discussion related entities
"""
thread = 'thread'
comment = 'comment'
def _get_course(course_key, user):
"""
Get the course descriptor, raising CourseNotFoundError if the course is not found or
the user cannot access forums for the course, and DiscussionDisabledError if the
discussion tab is disabled for the course.
"""
try:
course = get_course_with_access(user, 'load', course_key, check_if_enrolled=True)
except Http404:
# Convert 404s into CourseNotFoundErrors.
raise CourseNotFoundError("Course not found.")
except CourseAccessRedirect:
# Raise course not found if the user cannot access the course
# since it doesn't make sense to redirect an API.
raise CourseNotFoundError("Course not found.")
if not any([tab.type == 'discussion' and tab.is_enabled(course, user) for tab in course.tabs]):
raise DiscussionDisabledError("Discussion is disabled for the course.")
return course
def _get_thread_and_context(request, thread_id, retrieve_kwargs=None):
"""
Retrieve the given thread and build a serializer context for it, returning
both. This function also enforces access control for the thread (checking
both the user's access to the course and to the thread's cohort if
applicable). Raises ThreadNotFoundError if the thread does not exist or the
user cannot access it.
"""
retrieve_kwargs = retrieve_kwargs or {}
try:
if "with_responses" not in retrieve_kwargs:
retrieve_kwargs["with_responses"] = False
if "mark_as_read" not in retrieve_kwargs:
retrieve_kwargs["mark_as_read"] = False
cc_thread = Thread(id=thread_id).retrieve(**retrieve_kwargs)
course_key = CourseKey.from_string(cc_thread["course_id"])
course = _get_course(course_key, request.user)
context = get_context(course, request, cc_thread)
course_discussion_settings = get_course_discussion_settings(course_key)
if (
not context["is_requester_privileged"] and
cc_thread["group_id"] and
is_commentable_divided(course.id, cc_thread["commentable_id"], course_discussion_settings)
):
requester_group_id = get_group_id_for_user(request.user, course_discussion_settings)
if requester_group_id is not None and cc_thread["group_id"] != requester_group_id:
raise ThreadNotFoundError("Thread not found.")
return cc_thread, context
except CommentClientRequestError:
# params are validated at a higher level, so the only possible request
# error is if the thread doesn't exist
raise ThreadNotFoundError("Thread not found.")
def _get_comment_and_context(request, comment_id):
"""
Retrieve the given comment and build a serializer context for it, returning
both. This function also enforces access control for the comment (checking
both the user's access to the course and to the comment's thread's cohort if
applicable). Raises CommentNotFoundError if the comment does not exist or the
user cannot access it.
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
_, context = _get_thread_and_context(request, cc_comment["thread_id"])
return cc_comment, context
except CommentClientRequestError:
raise CommentNotFoundError("Comment not found.")
def _is_user_author_or_privileged(cc_content, context):
"""
Check if the user is the author of a content object or a privileged user.
Returns:
Boolean
"""
return (
context["is_requester_privileged"] or
context["cc_requester"]["id"] == cc_content["user_id"]
)
def get_thread_list_url(request, course_key, topic_id_list=None, following=False):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = reverse("thread-list")
query_list = (
[("course_id", six.text_type(course_key))] +
[("topic_id", topic_id) for topic_id in topic_id_list or []] +
([("following", following)] if following else [])
)
return request.build_absolute_uri(urlunparse(("", "", path, "", urlencode(query_list), "")))
def get_course(request, course_key):
"""
Return general discussion information for the course.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
course_key: The key of the course to get information for
Returns:
The course information; see discussion.rest_api.views.CourseView for more
detail.
Raises:
CourseNotFoundError: if the course does not exist or is not accessible
to the requesting user
"""
course = _get_course(course_key, request.user)
return {
"id": six.text_type(course_key),
"blackouts": [
{"start": blackout["start"].isoformat(), "end": blackout["end"].isoformat()}
for blackout in course.get_discussion_blackout_datetimes()
],
"thread_list_url": get_thread_list_url(request, course_key),
"following_thread_list_url": get_thread_list_url(request, course_key, following=True),
"topics_url": request.build_absolute_uri(
reverse("course_topics", kwargs={"course_id": course_key})
)
}
def get_courseware_topics(request, course_key, course, topic_ids):
"""
Returns a list of topic trees for courseware-linked topics.
Parameters:
request: The django request objects used for build_absolute_uri.
course_key: The key of the course to get discussion threads for.
course: The course for which topics are requested.
topic_ids: A list of topic IDs for which details are requested.
This is optional. If None then all course topics are returned.
Returns:
A list of courseware topics and a set of existing topics among
topic_ids.
"""
courseware_topics = []
existing_topic_ids = set()
def get_xblock_sort_key(xblock):
"""
Get the sort key for the xblock (falling back to the discussion_target
setting if absent)
"""
return xblock.sort_key or xblock.discussion_target
def get_sorted_xblocks(category):
"""Returns key sorted xblocks by category"""
return sorted(xblocks_by_category[category], key=get_xblock_sort_key)
discussion_xblocks = get_accessible_discussion_xblocks(course, request.user)
xblocks_by_category = defaultdict(list)
for xblock in discussion_xblocks:
xblocks_by_category[xblock.discussion_category].append(xblock)
for category in sorted(xblocks_by_category.keys()):
children = []
for xblock in get_sorted_xblocks(category):
if not topic_ids or xblock.discussion_id in topic_ids:
discussion_topic = DiscussionTopic(
xblock.discussion_id,
xblock.discussion_target,
get_thread_list_url(request, course_key, [xblock.discussion_id]),
)
children.append(discussion_topic)
if topic_ids and xblock.discussion_id in topic_ids:
existing_topic_ids.add(xblock.discussion_id)
if not topic_ids or children:
discussion_topic = DiscussionTopic(
None,
category,
get_thread_list_url(request, course_key, [item.discussion_id for item in get_sorted_xblocks(category)]),
children,
)
courseware_topics.append(DiscussionTopicSerializer(discussion_topic).data)
return courseware_topics, existing_topic_ids
def get_non_courseware_topics(request, course_key, course, topic_ids):
"""
Returns a list of topic trees that are not linked to courseware.
Parameters:
request: The django request objects used for build_absolute_uri.
course_key: The key of the course to get discussion threads for.
course: The course for which topics are requested.
topic_ids: A list of topic IDs for which details are requested.
This is optional. If None then all course topics are returned.
Returns:
A list of non-courseware topics and a set of existing topics among
topic_ids.
"""
non_courseware_topics = []
existing_topic_ids = set()
sorted_topics = sorted(list(course.discussion_topics.items()), key=lambda item: item[1].get("sort_key", item[0]))
for name, entry in sorted_topics:
if not topic_ids or entry['id'] in topic_ids:
discussion_topic = DiscussionTopic(
entry["id"], name, get_thread_list_url(request, course_key, [entry["id"]])
)
non_courseware_topics.append(DiscussionTopicSerializer(discussion_topic).data)
if topic_ids and entry["id"] in topic_ids:
existing_topic_ids.add(entry["id"])
return non_courseware_topics, existing_topic_ids
def get_course_topics(request, course_key, topic_ids=None):
"""
Returns the course topic listing for the given course and user; filtered
by 'topic_ids' list if given.
Parameters:
course_key: The key of the course to get topics for
user: The requesting user, for access control
topic_ids: A list of topic IDs for which topic details are requested
Returns:
A course topic listing dictionary; see discussion.rest_api.views.CourseTopicViews
for more detail.
Raises:
DiscussionNotFoundError: If topic/s not found for given topic_ids.
"""
course = _get_course(course_key, request.user)
courseware_topics, existing_courseware_topic_ids = get_courseware_topics(request, course_key, course, topic_ids)
non_courseware_topics, existing_non_courseware_topic_ids = get_non_courseware_topics(
request, course_key, course, topic_ids
)
if topic_ids:
not_found_topic_ids = topic_ids - (existing_courseware_topic_ids | existing_non_courseware_topic_ids)
if not_found_topic_ids:
raise DiscussionNotFoundError(
u"Discussion not found for '{}'.".format(", ".join(str(id) for id in not_found_topic_ids))
)
return {
"courseware_topics": courseware_topics,
"non_courseware_topics": non_courseware_topics,
}
def _get_user_profile_dict(request, usernames):
"""
Gets user profile details for a list of usernames and creates a dictionary with
profile details against username.
Parameters:
request: The django request object.
usernames: A string of comma separated usernames.
Returns:
A dict with username as key and user profile details as value.
"""
if usernames:
username_list = usernames.split(",")
else:
username_list = []
user_profile_details = get_account_settings(request, username_list)
return {user['username']: user for user in user_profile_details}
def _user_profile(user_profile):
"""
Returns the user profile object. For now, this just comprises the
profile_image details.
"""
return {
'profile': {
'image': user_profile['profile_image']
}
}
def _get_users(discussion_entity_type, discussion_entity, username_profile_dict):
"""
Returns users with profile details for given discussion thread/comment.
Parameters:
discussion_entity_type: DiscussionEntity Enum value for Thread or Comment.
discussion_entity: Serialized thread/comment.
username_profile_dict: A dict with user profile details against username.
Returns:
A dict of users with username as key and user profile details as value.
"""
users = {}
if discussion_entity['author']:
users[discussion_entity['author']] = _user_profile(username_profile_dict[discussion_entity['author']])
if (
discussion_entity_type == DiscussionEntity.comment
and discussion_entity['endorsed']
and discussion_entity['endorsed_by']
):
users[discussion_entity['endorsed_by']] = _user_profile(username_profile_dict[discussion_entity['endorsed_by']])
return users
def _add_additional_response_fields(
request, serialized_discussion_entities, usernames, discussion_entity_type, include_profile_image
):
"""
Adds additional data to serialized discussion thread/comment.
Parameters:
request: The django request object.
serialized_discussion_entities: A list of serialized Thread/Comment.
usernames: A list of usernames involved in threads/comments (e.g. as author or as comment endorser).
discussion_entity_type: DiscussionEntity Enum value for Thread or Comment.
include_profile_image: (boolean) True if requested_fields has 'profile_image' else False.
Returns:
A list of serialized discussion thread/comment with additional data if requested.
"""
if include_profile_image:
username_profile_dict = _get_user_profile_dict(request, usernames=','.join(usernames))
for discussion_entity in serialized_discussion_entities:
discussion_entity['users'] = _get_users(discussion_entity_type, discussion_entity, username_profile_dict)
return serialized_discussion_entities
def _include_profile_image(requested_fields):
"""
Returns True if requested_fields list has 'profile_image' entity else False
"""
return requested_fields and 'profile_image' in requested_fields
def _serialize_discussion_entities(request, context, discussion_entities, requested_fields, discussion_entity_type):
"""
It serializes Discussion Entity (Thread or Comment) and add additional data if requested.
For a given list of Thread/Comment; it serializes and add additional information to the
object as per requested_fields list (i.e. profile_image).
Parameters:
request: The django request object
context: The context appropriate for use with the thread or comment
discussion_entities: List of Thread or Comment objects
requested_fields: Indicates which additional fields to return
for each thread.
discussion_entity_type: DiscussionEntity Enum value for Thread or Comment
Returns:
A list of serialized discussion entities
"""
results = []
usernames = []
include_profile_image = _include_profile_image(requested_fields)
for entity in discussion_entities:
if discussion_entity_type == DiscussionEntity.thread:
serialized_entity = ThreadSerializer(entity, context=context).data
elif discussion_entity_type == DiscussionEntity.comment:
serialized_entity = CommentSerializer(entity, context=context).data
results.append(serialized_entity)
if include_profile_image:
if serialized_entity['author'] and serialized_entity['author'] not in usernames:
usernames.append(serialized_entity['author'])
if (
'endorsed' in serialized_entity and serialized_entity['endorsed'] and
'endorsed_by' in serialized_entity and
serialized_entity['endorsed_by'] and serialized_entity['endorsed_by'] not in usernames
):
usernames.append(serialized_entity['endorsed_by'])
results = _add_additional_response_fields(
request, results, usernames, discussion_entity_type, include_profile_image
)
return results
def get_thread_list(
request,
course_key,
page,
page_size,
topic_id_list=None,
text_search=None,
following=False,
view=None,
order_by="last_activity_at",
order_direction="desc",
requested_fields=None,
):
"""
Return the list of all discussion threads pertaining to the given course
Parameters:
request: The django request objects used for build_absolute_uri
course_key: The key of the course to get discussion threads for
page: The page number (1-indexed) to retrieve
page_size: The number of threads to retrieve per page
topic_id_list: The list of topic_ids to get the discussion threads for
text_search A text search query string to match
following: If true, retrieve only threads the requester is following
view: filters for either "unread" or "unanswered" threads
order_by: The key in which to sort the threads by. The only values are
"last_activity_at", "comment_count", and "vote_count". The default is
"last_activity_at".
order_direction: The direction in which to sort the threads by. The default
and only value is "desc". This will be removed in a future major
version.
requested_fields: Indicates which additional fields to return
for each thread. (i.e. ['profile_image'])
Note that topic_id_list, text_search, and following are mutually exclusive.
Returns:
A paginated result containing a list of threads; see
discussion.rest_api.views.ThreadViewSet for more detail.
Raises:
ValidationError: if an invalid value is passed for a field.
ValueError: if more than one of the mutually exclusive parameters is
provided
CourseNotFoundError: if the requesting user does not have access to the requested course
PageNotFoundError: if page requested is beyond the last
"""
exclusive_param_count = sum(1 for param in [topic_id_list, text_search, following] if param)
if exclusive_param_count > 1: # pragma: no cover
raise ValueError("More than one mutually exclusive param passed to get_thread_list")
cc_map = {"last_activity_at": "activity", "comment_count": "comments", "vote_count": "votes"}
if order_by not in cc_map:
raise ValidationError({
"order_by":
[u"Invalid value. '{}' must be 'last_activity_at', 'comment_count', or 'vote_count'".format(order_by)]
})
if order_direction != "desc":
raise ValidationError({
"order_direction": [u"Invalid value. '{}' must be 'desc'".format(order_direction)]
})
course = _get_course(course_key, request.user)
context = get_context(course, request)
query_params = {
"user_id": six.text_type(request.user.id),
"group_id": (
None if context["is_requester_privileged"] else
get_group_id_for_user(request.user, get_course_discussion_settings(course.id))
),
"page": page,
"per_page": page_size,
"text": text_search,
"sort_key": cc_map.get(order_by),
}
if view:
if view in ["unread", "unanswered"]:
query_params[view] = "true"
else:
ValidationError({
"view": [u"Invalid value. '{}' must be 'unread' or 'unanswered'".format(view)]
})
if following:
paginated_results = context["cc_requester"].subscribed_threads(query_params)
else:
query_params["course_id"] = six.text_type(course.id)
query_params["commentable_ids"] = ",".join(topic_id_list) if topic_id_list else None
query_params["text"] = text_search
paginated_results = Thread.search(query_params)
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a PageNotFoundError in that case
if paginated_results.page != page:
raise PageNotFoundError("Page not found (No results on this page).")
results = _serialize_discussion_entities(
request, context, paginated_results.collection, requested_fields, DiscussionEntity.thread
)
paginator = DiscussionAPIPagination(
request,
paginated_results.page,
paginated_results.num_pages,
paginated_results.thread_count
)
return paginator.get_paginated_response({
"results": results,
"text_search_rewrite": paginated_results.corrected_text,
})
def get_comment_list(request, thread_id, endorsed, page, page_size, requested_fields=None):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
requested_fields: Indicates which additional fields to return for
each comment. (i.e. ['profile_image'])
Returns:
A paginated result containing a list of comments; see
discussion.rest_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"with_responses": True,
"recursive": False,
"user_id": request.user.id,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None:
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a PageNotFoundError in that case
if not responses and page != 1:
raise PageNotFoundError("Page not found (No results on this page).")
num_pages = (resp_total + page_size - 1) // page_size if resp_total else 1
results = _serialize_discussion_entities(request, context, responses, requested_fields, DiscussionEntity.comment)
paginator = DiscussionAPIPagination(request, page, num_pages, resp_total)
return paginator.get_paginated_response(results)
def _check_fields(allowed_fields, data, message):
"""
Checks that the keys given in data is in allowed_fields
Arguments:
allowed_fields (set): A set of allowed fields
data (dict): The data to compare the allowed_fields against
message (str): The message to return if there are any invalid fields
Raises:
ValidationError if the given data contains a key that is not in
allowed_fields
"""
non_allowed_fields = {field: [message] for field in data.keys() if field not in allowed_fields}
if non_allowed_fields:
raise ValidationError(non_allowed_fields)
def _check_initializable_thread_fields(data, context):
"""
Checks if the given data contains a thread field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the thread which
includes the requesting user
Raises:
ValidationError if the given data contains a thread field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_thread_fields(context),
data,
"This field is not initializable."
)
def _check_initializable_comment_fields(data, context):
"""
Checks if the given data contains a comment field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the comment which
includes the requesting user
Raises:
ValidationError if the given data contains a comment field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_comment_fields(context),
data,
"This field is not initializable."
)
def _check_editable_fields(cc_content, data, context):
"""
Raise ValidationError if the given update data contains a field that is not
editable by the requesting user
"""
_check_fields(
get_editable_fields(cc_content, context),
data,
"This field is not editable."
)
def _do_extra_actions(api_content, cc_content, request_fields, actions_form, context, request):
"""
Perform any necessary additional actions related to content creation or
update that require a separate comments service request.
"""
for field, form_value in actions_form.cleaned_data.items():
if field in request_fields and form_value != api_content[field]:
api_content[field] = form_value
if field == "following":
_handle_following_field(form_value, context["cc_requester"], cc_content)
elif field == "abuse_flagged":
_handle_abuse_flagged_field(form_value, context["cc_requester"], cc_content)
elif field == "voted":
_handle_voted_field(form_value, cc_content, api_content, request, context)
elif field == "read":
_handle_read_field(api_content, form_value, context["cc_requester"], cc_content)
else:
raise ValidationError({field: ["Invalid Key"]})
def _handle_following_field(form_value, user, cc_content):
"""follow/unfollow thread for the user"""
if form_value:
user.follow(cc_content)
if cc_content.type == 'thread':
thread_followed.send(sender=None, user=user, post=cc_content)
else:
user.unfollow(cc_content)
if cc_content.type == 'thread':
thread_unfollowed.send(sender=None, user=user, post=cc_content)
def _handle_abuse_flagged_field(form_value, user, cc_content):
"""mark or unmark thread/comment as abused"""
if form_value:
cc_content.flagAbuse(user, cc_content)
else:
cc_content.unFlagAbuse(user, cc_content, removeAll=False)
def _handle_voted_field(form_value, cc_content, api_content, request, context):
"""vote or undo vote on thread/comment"""
signal = thread_voted if cc_content.type == 'thread' else comment_voted
signal.send(sender=None, user=context["request"].user, post=cc_content)
if form_value:
context["cc_requester"].vote(cc_content, "up")
api_content["vote_count"] += 1
else:
context["cc_requester"].unvote(cc_content)
api_content["vote_count"] -= 1
track_voted_event(
request, context["course"], cc_content, vote_value="up", undo_vote=False if form_value else True
)
def _handle_read_field(api_content, form_value, user, cc_content):
"""
Marks thread as read for the user
"""
if form_value and not cc_content['read']:
user.read(cc_content)
# When a thread is marked as read, all of its responses and comments
# are also marked as read.
api_content["unread_comment_count"] = 0
def create_thread(request, thread_data):
"""
Create a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_data: The data for the created thread.
Returns:
The created thread; see discussion.rest_api.views.ThreadViewSet for more
detail.
"""
course_id = thread_data.get("course_id")
user = request.user
if not course_id:
raise ValidationError({"course_id": ["This field is required."]})
try:
course_key = CourseKey.from_string(course_id)
course = _get_course(course_key, user)
except InvalidKeyError:
raise ValidationError({"course_id": ["Invalid value."]})
context = get_context(course, request)
_check_initializable_thread_fields(thread_data, context)
discussion_settings = get_course_discussion_settings(course_key)
if (
"group_id" not in thread_data and
is_commentable_divided(course_key, thread_data.get("topic_id"), discussion_settings)
):
thread_data = thread_data.copy()
thread_data["group_id"] = get_group_id_for_user(user, discussion_settings)
serializer = ThreadSerializer(data=thread_data, context=context)
actions_form = ThreadActionsForm(thread_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(list(serializer.errors.items()) + list(actions_form.errors.items())))
serializer.save()
cc_thread = serializer.instance
thread_created.send(sender=None, user=user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, list(thread_data.keys()), actions_form, context, request)
track_thread_created_event(request, course, cc_thread, actions_form.cleaned_data["following"])
return api_thread
def create_comment(request, comment_data):
"""
Create a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_data: The data for the created comment.
Returns:
The created comment; see discussion.rest_api.views.CommentViewSet for more
detail.
"""
thread_id = comment_data.get("thread_id")
if not thread_id:
raise ValidationError({"thread_id": ["This field is required."]})
cc_thread, context = _get_thread_and_context(request, thread_id)
# if a thread is closed; no new comments could be made to it
if cc_thread['closed']:
raise PermissionDenied
_check_initializable_comment_fields(comment_data, context)
serializer = CommentSerializer(data=comment_data, context=context)
actions_form = CommentActionsForm(comment_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(list(serializer.errors.items()) + list(actions_form.errors.items())))
serializer.save()
cc_comment = serializer.instance
comment_created.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, list(comment_data.keys()), actions_form, context, request)
track_comment_created_event(request, context["course"], cc_comment, cc_thread["commentable_id"], followed=False)
return api_comment
def update_thread(request, thread_id, update_data):
"""
Update a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to update.
update_data: The data to update in the thread.
Returns:
The updated thread; see discussion.rest_api.views.ThreadViewSet for more
detail.
"""
cc_thread, context = _get_thread_and_context(request, thread_id, retrieve_kwargs={"with_responses": True})
_check_editable_fields(cc_thread, update_data, context)
serializer = ThreadSerializer(cc_thread, data=update_data, partial=True, context=context)
actions_form = ThreadActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(list(serializer.errors.items()) + list(actions_form.errors.items())))
# Only save thread object if some of the edited fields are in the thread data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
# signal to update Teams when a user edits a thread
thread_edited.send(sender=None, user=request.user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, list(update_data.keys()), actions_form, context, request)
# always return read as True (and therefore unread_comment_count=0) as reasonably
# accurate shortcut, rather than adding additional processing.
api_thread['read'] = True
api_thread['unread_comment_count'] = 0
return api_thread
def update_comment(request, comment_id, update_data):
"""
Update a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id for the comment to update.
update_data: The data to update in the comment.
Returns:
The updated comment; see discussion.rest_api.views.CommentViewSet for more
detail.
Raises:
CommentNotFoundError: if the comment does not exist or is not accessible
to the requesting user
PermissionDenied: if the comment is accessible to but not editable by
the requesting user
ValidationError: if there is an error applying the update (e.g. raw_body
is empty or thread_id is included)
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
_check_editable_fields(cc_comment, update_data, context)
serializer = CommentSerializer(cc_comment, data=update_data, partial=True, context=context)
actions_form = CommentActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(list(serializer.errors.items()) + list(actions_form.errors.items())))
# Only save comment object if some of the edited fields are in the comment data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
comment_edited.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, list(update_data.keys()), actions_form, context, request)
return api_comment
def get_thread(request, thread_id, requested_fields=None):
"""
Retrieve a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to retrieve
requested_fields: Indicates which additional fields to return for
thread. (i.e. ['profile_image'])
"""
# Possible candidate for optimization with caching:
# Param with_responses=True required only to add "response_count" to response.
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"with_responses": True,
"user_id": six.text_type(request.user.id),
}
)
return _serialize_discussion_entities(request, context, [cc_thread], requested_fields, DiscussionEntity.thread)[0]
def get_response_comments(request, comment_id, page, page_size, requested_fields=None):
"""
Return the list of comments for the given thread response.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment/response to get child comments for.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
requested_fields: Indicates which additional fields to return for
each child comment. (i.e. ['profile_image'])
Returns:
A paginated result containing a list of comments
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
cc_thread, context = _get_thread_and_context(
request,
cc_comment["thread_id"],
retrieve_kwargs={
"with_responses": True,
"recursive": True,
}
)
if cc_thread["thread_type"] == "question":
thread_responses = itertools.chain(cc_thread["endorsed_responses"], cc_thread["non_endorsed_responses"])
else:
thread_responses = cc_thread["children"]
response_comments = []
for response in thread_responses:
if response["id"] == comment_id:
response_comments = response["children"]
break
response_skip = page_size * (page - 1)
paged_response_comments = response_comments[response_skip:(response_skip + page_size)]
if not paged_response_comments and page != 1:
raise PageNotFoundError("Page not found (No results on this page).")
results = _serialize_discussion_entities(
request, context, paged_response_comments, requested_fields, DiscussionEntity.comment
)
comments_count = len(response_comments)
num_pages = (comments_count + page_size - 1) // page_size if comments_count else 1
paginator = DiscussionAPIPagination(request, page, num_pages, comments_count)
return paginator.get_paginated_response(results)
except CommentClientRequestError:
raise CommentNotFoundError("Comment not found")
def delete_thread(request, thread_id):
"""
Delete a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
if can_delete(cc_thread, context):
cc_thread.delete()
thread_deleted.send(sender=None, user=request.user, post=cc_thread)
else:
raise PermissionDenied
def delete_comment(request, comment_id):
"""
Delete a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
if can_delete(cc_comment, context):
cc_comment.delete()
comment_deleted.send(sender=None, user=request.user, post=cc_comment)
else:
raise PermissionDenied
| edx-solutions/edx-platform | lms/djangoapps/discussion/rest_api/api.py | Python | agpl-3.0 | 42,211 |
# -*- coding: utf-8 -*-
import os
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
CURRENCY = u"€"
REVENUES_CATEGORIES = {
'brut': ['salaire_brut', 'chobrut', 'rstbrut', 'pensions_alimentaires_percues', 'pensions_alimentaires_versees', 'rev_cap_brut', 'fon'],
'imposable': ['salaire_imposable', 'cho', 'rst', 'pensions_alimentaires_percues', 'pensions_alimentaires_versees', 'rev_cap_brut', 'fon', 'cotsoc_cap'],
'net': ['salaire_net', 'chonet', 'rstnet', 'pensions_alimentaires_percues', 'pensions_alimentaires_versees', 'rev_cap_net', 'fon'],
'superbrut': ['salsuperbrut', 'chobrut', 'rstbrut', 'pensions_alimentaires_percues', 'pensions_alimentaires_versees', 'rev_cap_brut', 'fon'],
}
def init_country(qt = False): # drop_survey_only_variables = False, simulate_f6de = False, start_from = 'imposable'
"""Create a country-specific TaxBenefitSystem."""
# from openfisca_core.columns import FloatCol
from openfisca_core.taxbenefitsystems import MultipleXmlBasedTaxBenefitSystem
if qt:
from openfisca_qt import widgets as qt_widgets
from . import decompositions, entities, scenarios
from .model import datatrees
from .model import model # Load output variables into entities. # noqa analysis:ignore
from .model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales import preprocessing
if qt:
from .widgets.Composition import CompositionWidget
# if simulate_f6de:
# del column_by_name['f6de']
# csg_deduc_patrimoine_simulated = prestation_by_name.pop('csg_deduc_patrimoine_simulated')
# prestation_by_name['csg_deduc_patrimoine'] = FloatCol(
# csg_deduc_patrimoine_simulated._func,
# entity = csg_deduc_patrimoine_simulated.entity,
# label = csg_deduc_patrimoine_simulated.label,
# start = csg_deduc_patrimoine_simulated.start,
# end = csg_deduc_patrimoine_simulated.end,
# val_type = csg_deduc_patrimoine_simulated.val_type,
# freq = csg_deduc_patrimoine_simulated.freq,
# survey_only = False,
# )
# else:
# prestation_by_name.pop('csg_deduc_patrimoine_simulated', None)
if qt:
qt_widgets.CompositionWidget = CompositionWidget
class TaxBenefitSystem(MultipleXmlBasedTaxBenefitSystem):
"""French tax benefit system"""
check_consistency = None # staticmethod(utils.check_consistency)
CURRENCY = CURRENCY
DATA_SOURCES_DIR = os.path.join(COUNTRY_DIR, 'data', 'sources')
DECOMP_DIR = os.path.dirname(os.path.abspath(decompositions.__file__))
DEFAULT_DECOMP_FILE = decompositions.DEFAULT_DECOMP_FILE
entity_class_by_key_plural = dict(
(entity_class.key_plural, entity_class)
for entity_class in entities.entity_class_by_symbol.itervalues()
)
# Declared below to avoid "name is not defined" exception
# column_by_name = None
# prestation_by_name = None
columns_name_tree_by_entity = datatrees.columns_name_tree_by_entity
legislation_xml_info_list = [
(
os.path.join(COUNTRY_DIR, 'param', 'param.xml'),
None,
),
# (
# os.path.join(COUNTRY_DIR, 'assets', 'xxx', 'yyy.xml'),
# ('insert', 'into', 'existing', 'element'),
# ),
]
preprocess_legislation = staticmethod(preprocessing.preprocess_legislation)
REFORMS_DIR = os.path.join(COUNTRY_DIR, 'reformes')
REV_TYP = None # utils.REV_TYP # Not defined for France
REVENUES_CATEGORIES = REVENUES_CATEGORIES
Scenario = scenarios.Scenario
def prefill_cache(self):
# Compute one "zone APL" variable, to pre-load CSV of "code INSEE commune" to "Zone APL".
from .model.prestations import aides_logement
aides_logement.preload_zone_apl()
from .model.prelevements_obligatoires.prelevements_sociaux import taxes_salaires_main_oeuvre
taxes_salaires_main_oeuvre.preload_taux_versement_transport()
return TaxBenefitSystem
def init_tax_benefit_system():
"""
Helper function which suits most of the time.
Use `init_country` if you need to get the `TaxBenefitSystem` class.
"""
TaxBenefitSystem = init_country()
tax_benefit_system = TaxBenefitSystem()
return tax_benefit_system
| adrienpacifico/openfisca-france | openfisca_france/__init__.py | Python | agpl-3.0 | 4,489 |
from __future__ import absolute_import, print_function, division
import pkg_resources
import theano
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
try:
from theano.sandbox.cuda import cuda_ndarray
dimshuffle = cuda_ndarray.cuda_ndarray.dimshuffle
except ImportError:
pass
cula_available = False
try:
from scikits.cuda import cula
cula_available = True
except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
pass
cula_initialized = False
class GpuSolve(GpuOp):
"""
CULA GPU solver OP.
Parameters
----------
trans
Whether to take the transpose of the input matrix or not.
"""
__props__ = ('trans',)
def __init__(self, trans='N'):
self.trans = trans
super(GpuSolve, self).__init__()
def output_type(self, inp):
return CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_node(self, inp1, inp2):
inp1 = as_cuda_ndarray_variable(inp1)
inp2 = as_cuda_ndarray_variable(inp2)
assert inp1.ndim == 2
assert inp2.ndim == 2
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def make_thunk(self, node, storage_map, _, no_recycling, impl=None):
# Initialize CULA the first time it is needed
global cula_initialized
if not cula_available:
raise RuntimeError('Cula is not available and '
'GpuSolve Op can not be constructed.')
if not cula_initialized:
cula.culaInitialize()
cula_initialized = True
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
# size of the matrices to invert
z = outputs[0]
# Matrix
A = inputs[0][0]
# Solution vectors
b = inputs[1][0]
# A is not explicitly converted between C and F order, instead we
# switch the "transpose" flag
if self.trans in ('T', 'C'):
trans = 'N'
else:
trans = 'T'
# Convert b to F-order from c-order.
b_cpy = dimshuffle(b, (1, 0)).reshape((b.shape[0], b.shape[1]))
# This copy forces allocation of a new C-contiguous buffer
# and returns it.
A_cpy = A.copy()
b_cpy = b_cpy.copy()
def cula_gpu_solve(A_, b_, trans='T'):
A_shape = A_.shape
b_shape = b_.shape
assert(len(A_shape) == 2)
assert(len(b_shape) == 2)
if trans in ['T', 'C']:
l, n = A_shape
k, m = b_shape
if n != k:
raise ValueError('A and b must be aligned.')
elif trans in ['N']:
n, l = A_shape
k, m = b_shape
if l != m:
raise ValueError('A and b must be aligned.')
else:
raise ValueError('Invalid value for trans')
lda = max(1, n)
ldb = max(1, n, l)
# construct pointer arrays needed for culaDeviceSgels
# Cula requires you to pass a pointer for A and b.
A_ptr = A_.gpudata
b_ptr = b_.gpudata
cula.culaDeviceSgels(trans, n, l, m, A_ptr, lda, b_ptr, ldb)
return A_, b_
A_pycuda, b_pycuda = cula_gpu_solve(A_cpy, b_cpy, trans)
# Convert b to F-order from c-order and assign it to output:
b_cpy = b_cpy.reshape(b.shape[::-1])
b_cpy = dimshuffle(b_cpy, (1, 0))
z[0] = b_cpy
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
gpu_solve = GpuSolve()
| Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/sandbox/cuda/cula.py | Python | agpl-3.0 | 4,046 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2012 Daniel (AvanzOSC). All Rights Reserved
# 20/08/2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import asset_amortize | avanzosc/avanzosc6.1 | avanzosc_asset_amortize/__init__.py | Python | agpl-3.0 | 1,016 |
Subsets and Splits