repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
yola/yolapy | docs/conf.py | 1 | 9271 | # -*- coding: utf-8 -*-
#
# Yolapy documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 27 12:47:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from yolapy import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Yolapy'
copyright = u'2015, Yola'
author = u'Yola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yolapydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Yolapy.tex', u'Yolapy Documentation',
u'Yola', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'yolapy', u'Yolapy Documentation',
# [author], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'Yolapy', u'Yolapy Documentation',
# author, 'Yolapy', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 7,168,053,449,254,915,000 | 30.968966 | 79 | 0.70672 | false | 3.647128 | true | false | false |
shree-shubham/Unitype | Coupling Passions.py | 1 | 1941 | import math
# Enter your code here. Read input from STDIN. Print output to STDOUT
def distance_between(point1, point2):
EARTH_RADIUS = 6371
point1_lat_in_radians = math.radians( point1['latitude'] )
point2_lat_in_radians = math.radians( point2['latitude'] )
point1_long_in_radians = math.radians( point1['longitude'] )
point2_long_in_radians = math.radians( point2['longitude'] )
return math.acos( math.sin( point1_lat_in_radians ) * math.sin( point2_lat_in_radians ) +
math.cos( point1_lat_in_radians ) * math.cos( point2_lat_in_radians ) *
math.cos( point2_long_in_radians - point1_long_in_radians) ) * EARTH_RADIUS
m = int(raw_input())
people = {}
locations = {}
interests = {}
for p in xrange(m):
s = raw_input().split(' ')
people[p] = s[1:]
for i in s[1:]:
interests[i] = 1
z = int(raw_input())
for l in xrange(z):
s = raw_input().split(' ')
locations[s[0]] = {'latitude': float(s[1]), 'longitude': float(s[2])}
locations[s[0]]['passions'] = set()
for ll in xrange(4, 4 + int(s[3])):
locations[s[0]]['passions'].add(s[ll])
res = []
for l in locations:
interest_set = set()
for i in interests:
if i in locations[l]['passions']:
interest_set.add(i)
res += [[l, interest_set]]
commons = 0
commons_list = []
for i in xrange(len(res)):
for j in xrange(i+1, len(res)):
temp = len(res[i][1] | res[j][1])
if temp >= commons:
commons = temp
if res[i][0] < res[j][0]:
commons_list += [[res[i][0], res[j][0], commons, distance_between(locations[res[i][0]], locations[res[j][0]])]]
else:
commons_list += [[res[j][0], res[i][0], commons, distance_between(locations[res[i][0]], locations[res[j][0]])]]
commons_list = sorted(commons_list, key = lambda x : (-x[2], x[3]))
print commons_list[0][0] + ' ' + commons_list[0][1]
| gpl-3.0 | -4,238,094,887,164,742,700 | 38.612245 | 127 | 0.578568 | false | 2.918797 | false | false | false |
lfairchild/PmagPy | dialogs/pmag_er_magic_dialogs.py | 1 | 52576 | """
dialogs for ErMagicBuilder
"""
# pylint: disable=W0612,C0111,C0103,W0201,C0301
import os
import wx
import wx.grid
import numpy as np
from . import drop_down_menus2 as drop_down_menus
from . import pmag_widgets as pw
from . import magic_grid2 as magic_grid
from . import grid_frame2
from . import grid_frame3
from pmagpy import find_pmag_dir
from pmagpy import contribution_builder as cb
class ErMagicCheckFrame3(wx.Frame):
def __init__(self, parent, title, WD, contribution):
wx.Frame.__init__(self, parent, -1, title)
self.WD = WD
self.main_frame = self.Parent
self.contribution = contribution
self.temp_data = {}
self.grid = None
self.deleteRowButton = None
self.selected_rows = set()
self.min_size = (1160, 350)
self.contribution.propagate_ages()
# re-do the 'quit' binding so that it only closes the current window
self.main_frame.Bind(wx.EVT_MENU, lambda event: self.main_frame.menubar.on_quit(event, self), self.main_frame.menubar.file_quit)
self.InitSpecCheck()
def InitSpecCheck(self):
"""
make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to
"""
#wait = wx.BusyInfo("Please wait, working...")
#wx.SafeYield()
self.contribution.propagate_lithology_cols()
spec_df = self.contribution.tables['specimens'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'specimens', 'specimens', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitSampCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.backButton.Disable()
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSampCheck(self):
"""
make an interactive grid in which users can edit sample names
as well as which site a sample belongs to
"""
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank
self.contribution.propagate_lithology_cols()
samp_df = self.contribution.tables['samples'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'samples', 'samples', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
next_dia = self.InitSiteCheck
prev_dia = self.InitSpecCheck
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, next_dia),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, prev_dia),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
def on_close_grid_frame(self, event=None):
# required placeholder
pass
def onContinue(self, event, grid, next_dia=None):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# save all changes to data object and write to file
self.grid_frame.grid_builder.save_grid_data()
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
warn_string = ""
for error_name, error_cols in list(validation_errors.items()):
if error_cols:
warn_string += "You have {}: {}.\n\n".format(error_name, ", ".join(error_cols))
warn_string += "Are you sure you want to continue?"
result = pw.warning_with_override(warn_string)
if result == wx.ID_YES:
pass
else:
return False
else:
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.panel.Destroy()
if next_dia:
next_dia()
else:
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank or "Not Specified"
self.contribution.propagate_lithology_cols()
wx.MessageBox('Done!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
def onbackButton(self, event=None, prev_dia=None):
if prev_dia:
alert = True if self.grid_frame.grid.changes else False
self.grid_frame.onSave(event=None, alert=alert, destroy=True)
#if self.grid_frame.grid.name == 'samples':
# self.sample_window -= 2
self.panel.Destroy()
prev_dia()
def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait
def on_backButton(self, event, previous_dia, current_dia=None):
# save first?
if self.grid.changes:
result = pw.warning_with_override("You have unsaved data which will be lost. Are you sure you want to go back?")
if result == wx.ID_NO:
return
# go back to previous grid
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if current_dia == self.InitLocCheck:
pass
#elif previous_dia == self.InitSpecCheck or previous_dia == self.InitSampCheck:
# self.sample_window = 0
self.panel.Destroy()
previous_dia()
del wait
### Manage data methods ###
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# save all changes to data object and write to file
self.grid_builder.save_grid_data()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
class ErMagicCheckFrame(wx.Frame):
def __init__(self, parent, title, WD, magic_data): # magic_data was ErMagic
wx.Frame.__init__(self, parent, -1, title)
self.WD = WD
self.main_frame = self.Parent
self.er_magic_data = magic_data
self.er_magic_data.no_pmag_data = set(['specimen', 'sample', 'site', 'location'])
self.temp_data = {}
self.drop_down_menu = None
# sample window must be displayed (differently) twice, so it is useful to keep track
self.sample_window = 0
self.grid = None
self.deleteRowButton = None
self.selected_rows = set()
self.InitSpecCheck()
def InitSpecCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
#import wx.lib.scrolledpanel as libpanel # does not work well
#self.panel = libpanel.ScrolledPanel(self, style=wx.SIMPLE_BORDER)
text = """Step 1:
Check that all specimens belong to the correct sample
(if sample name is simply wrong, that will be fixed in step 2)"""
label = wx.StaticText(self.panel, label=text)
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'specimen',
self.er_magic_data.headers, self.panel,
'sample')
self.spec_grid = self.grid_builder.make_grid(incl_pmag=False)
self.grid = self.spec_grid
self.spec_grid.InitUI()
self.grid_builder.add_data_to_grid(self.spec_grid, 'specimen', incl_pmag=False)
samples = self.er_magic_data.make_name_list(self.er_magic_data.samples)
self.drop_down_menu = drop_down_menus.Menus("specimen", self, self.spec_grid, samples)
#### Create Buttons ####
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSampleButton = wx.Button(self.panel, label="Add a new sample")
self.samples = [name for name in self.er_magic_data.samples]
self.Bind(wx.EVT_BUTTON, self.on_addSampleButton, self.addSampleButton)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSpecimenHelp.html"), self.helpButton)
hbox_one.Add(self.addSampleButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hbox_one.Add(self.helpButton)
#
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.spec_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.spec_grid, next_dia=self.InitSampCheck), self.continueButton)
hboxok.Add(self.saveButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.ALIGN_LEFT)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'specimen', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Create Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
vbox.Add(hbox_one, flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.spec_grid, flag=wx.ALL, border=10)#|wx.EXPAND, border=30)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
def InitSampCheck(self):
"""make an interactive grid in which users can edit sample names
as well as which site a sample belongs to"""
self.sample_window += 1
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
if self.sample_window == 1:
text = """Step 2:
Check that all samples are correctly named,
and that they belong to the correct site
(if site name is simply wrong, that will be fixed in step 3)"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
else:
text = """Step 4:
Some of the data from the er_sites table has propogated into er_samples.
Check that these data are correct, and fill in missing cells using controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see Help button for more details)\n\n** Denotes controlled vocabulary"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
if self.sample_window == 1:
# provide no extra headers
headers = {'sample': {'er': [[], [], []],
'pmag': [[], [], []]}}
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
headers, self.panel,
'site')
if self.sample_window > 1:
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
self.er_magic_data.headers, self.panel,
'site')
self.samp_grid = self.grid_builder.make_grid(incl_pmag=False)
self.samp_grid.InitUI()
self.grid_builder.add_data_to_grid(self.samp_grid, 'sample', incl_pmag=False)
self.grid = self.samp_grid
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu = drop_down_menus.Menus("sample", self, self.samp_grid, sites) # initialize all needed drop-down menus
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSiteButton = wx.Button(self.panel, label="Add a new site")
self.Bind(wx.EVT_BUTTON, self.on_addSiteButton, self.addSiteButton)
hbox_one.Add(self.addSiteButton, flag=wx.RIGHT, border=10)
if self.sample_window == 1:
html_help = "ErMagicSampleHelp1.html"
if self.sample_window > 1:
html_help = "ErMagicSampleHelp.html"
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, html_help), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.samp_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
next_dia = self.InitSiteCheck if self.sample_window < 2 else self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.samp_grid, next_dia=next_dia), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSpecCheck if self.sample_window < 2 else self.InitSiteCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'sample', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(step_label, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.samp_grid, flag=wx.ALL, border=10) # using wx.EXPAND or not does not affect re-size problem
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
## this combination may prevent a display error that (without the fix) only resolves on manually resizing the window
self.panel.Refresh()
self.samp_grid.ForceRefresh()
self.panel.Refresh()
self.Refresh()
# this prevents display errors
self.Hide()
self.Show()
#self.Fit() # this make it worse!
#self.Layout() # doesn't fix display resize error
#self.panel.Layout() # doesn't fix display resize error
#self.main_frame.Layout()# doesn't fix display resize error
def InitSiteCheck(self):
"""make an interactive grid in which users can edit site names
as well as which location a site belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 3:
Check that all sites are correctly named, and that they belong to the correct location.
Fill in the additional columns with controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see the help button for more details)
note: Changes to site_class, site_lithology, or site_type will overwrite er_samples.txt
However, you will be able to edit sample_class, sample_lithology, and sample_type in step 4
**Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
#for val in ['er_citation_names', 'er_location_name', 'er_site_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lat', 'site_lon']: #
# try:
# self.er_magic_data.headers['site']['er'][0].remove(val)
# except ValueError:
# pass
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'site',
self.er_magic_data.headers, self.panel,
'location')
self.site_grid = self.grid_builder.make_grid(incl_pmag=False)
self.site_grid.InitUI()
self.grid_builder.add_data_to_grid(self.site_grid, 'site', incl_pmag=False)
self.grid = self.site_grid
# populate site_definition as 's' by default if no value is provided (indicates that site is single, not composite)
rows = self.site_grid.GetNumberRows()
col = 6
for row in range(rows):
cell = self.site_grid.GetCellValue(row, col)
if not cell:
self.site_grid.SetCellValue(row, col, 's')
# initialize all needed drop-down menus
locations = sorted(self.er_magic_data.make_name_list(self.er_magic_data.locations))
self.drop_down_menu = drop_down_menus.Menus("site", self, self.site_grid, locations)
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addLocButton = wx.Button(self.panel, label="Add a new location")
self.Bind(wx.EVT_BUTTON, self.on_addLocButton, self.addLocButton)
hbox_one.Add(self.addLocButton, flag=wx.RIGHT, border=10)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSiteHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.site_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.site_grid, next_dia=self.InitSampCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'site', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.BOTTOM|wx.TOP, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.site_grid, flag=wx.ALL|wx.EXPAND, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
# this combination prevents a display error that (without the fix) only resolves on manually resizing the window
self.site_grid.ForceRefresh()
self.panel.Refresh()
self.Hide()
self.Show()
def InitLocCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 5:
Check that locations are correctly named.
Fill in any blank cells using controlled vocabularies.
(See Help button for details)
** Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.locations = self.er_magic_data.locations
#
if not self.er_magic_data.locations:
msg = "You have no data in er_locations, so we are skipping step 5.\n Note that location names must be entered at the measurements level,so you may need to re-import your data, or you can add a location in step 3"
dlg = wx.MessageDialog(None, caption="Message:", message=msg, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.panel.Destroy()
self.InitAgeCheck()
return
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'location',
self.er_magic_data.headers, self.panel)
self.loc_grid = self.grid_builder.make_grid(incl_pmag=False)
self.loc_grid.InitUI()
self.grid_builder.add_data_to_grid(self.loc_grid, 'location', incl_pmag=False)
self.grid = self.loc_grid
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("location", self,
self.loc_grid, None)
# need to find max/min lat/lon here IF they were added in the previous grid
sites = self.er_magic_data.sites
location_lat_lon = self.er_magic_data.get_min_max_lat_lon(self.er_magic_data.locations)
col_names = ('location_begin_lat', 'location_end_lat', 'location_begin_lon', 'location_end_lon')
col_inds = [self.grid.col_labels.index(name) for name in col_names]
col_info = list(zip(col_names, col_inds))
for loc in self.er_magic_data.locations:
row_ind = self.grid.row_labels.index(loc.name)
for col_name, col_ind in col_info:
info = location_lat_lon[loc.name][col_name]
self.grid.SetCellValue(row_ind, col_ind, str(info))
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicLocationHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.loc_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.loc_grid, next_dia=self.InitAgeCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia, current_dia=self.InitLocCheck), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'location', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(self.loc_grid, flag=wx.TOP|wx.BOTTOM, border=10)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
### Grid methods ###
def make_simple_table(self, column_labels, data_dict, grid_name):
row_labels = sorted(data_dict.keys())
if len(row_labels) in range(1, 4):
num_rows = len(row_labels)
height = {1: 70, 2: 90, 3: 110, 4: 130}
grid = magic_grid.MagicGrid(self.panel, grid_name, row_labels, column_labels, (-1, height[num_rows])) # autosizes width, but enforces fixed pxl height to prevent display problems
else:
grid = magic_grid.MagicGrid(self.panel, grid_name, row_labels, column_labels)
data = grid.InitUI()
if grid_name == 'ages':
temp_data_key = 'ages'
else:
temp_data_key = column_labels[0]
self.temp_data[temp_data_key] = data
grid.add_data(data_dict)
grid.size_grid()
grid.do_event_bindings()
return grid
def onMouseOver(self, event, grid):
"""
Displays a tooltip over any cell in a certain column
"""
x, y = grid.CalcUnscrolledPosition(event.GetX(), event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('')
def validate(self, grid):
validations = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lon', 'site_lat', 'sample_class', 'sample_lithology', 'sample_type', 'sample_lat', 'sample_lon', 'location_type', 'age_unit', 'age']#, 'magic_method_codes']
cols = list(range(grid.GetNumberCols()))
rows = list(range(grid.GetNumberRows()))
data_missing = []
for col in cols:
col_label = str(grid.GetColLabelValue(col))
if col_label in validations:
for row in rows:
value = grid.GetCellValue(row, col)
if not value:
data_missing.append(col_label)
break
return data_missing
### Button methods ###
def on_addSampleButton(self, event):
def add_sample(sample, site):
add_sample_data(sample, site)
sites = self.er_magic_data.make_name_list(self.er_magic_data.sites)
pw.AddItem(self, 'Sample', add_sample, owner_items=sites, belongs_to='site') # makes window for adding new data
def add_sample_data(sample, site):
# add sample
self.er_magic_data.add_sample(sample, site)
# re-Bind so that the updated samples list shows up on a left click
samples = sorted(self.er_magic_data.make_name_list(self.er_magic_data.samples))
choices = self.drop_down_menu.choices
choices[1] = (samples, False)
self.drop_down_menu.update_drop_down_menu(self.spec_grid, choices)
def on_addSiteButton(self, event):
def add_site(site, location):
add_site_data(site, location)
locations = self.er_magic_data.make_name_list(self.er_magic_data.locations)
pw.AddItem(self, 'Site', add_site, locations, 'location')
def add_site_data(site, location):
# add site
self.er_magic_data.add_site(site, location)
# re-Bind so that the updated sites list shows up on a left click
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu.update_drop_down_menu(self.samp_grid, {1: (sites, False)})
def on_addLocButton(self, event):
def add_loc(loc, parent=None):
add_loc_data(loc)
#def __init__(self, parent, title, data_items, data_method):
if not self.er_magic_data.locations:
pass
pw.AddItem(self, 'Location', add_loc, owner_items=None, belongs_to=None) # makes window for adding new data
def add_loc_data(loc):
# add location
self.er_magic_data.add_location(loc)
# re-Bind so that the updated locations list shows up on a left click
locations = self.er_magic_data.make_name_list(self.er_magic_data.locations)
choices = self.drop_down_menu.choices
choices[1] = (locations, False)
self.drop_down_menu.update_drop_down_menu(self.site_grid, choices)
def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show()
def on_continueButton(self, event, grid, next_dia=None):
"""
pulls up next dialog, if there is one.
gets any updated information from the current grid and runs ErMagicBuilder
"""
#wait = wx.BusyInfo("Please wait, working...")
# unhighlight selected columns, etc.
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# remove '**' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
result = pw.warning_with_override("You are missing required data in these columns: {}\nAre you sure you want to continue without these data?".format(', '.join(validation_errors)))
if result == wx.ID_YES:
pass
else:
return False
if grid.changes:
self.onSave(grid)
self.deleteRowButton = None
#self.panel.Destroy() # calling Destroy here breaks with Anaconda Python (segfault)
# make sure that specimens get propagated with
# any default sample info
if next_dia == self.InitLocCheck:
if self.er_magic_data.specimens:
for spec in self.er_magic_data.specimens:
spec.propagate_data()
if next_dia:
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
wx.CallAfter(self.panel.Destroy) # no segfault here!
next_dia()
# need to wait to process the resize:
event = wx.PyCommandEvent(wx.EVT_SIZE.typeId, self.GetId())
wx.CallAfter(self.GetEventHandler().ProcessEvent, event)
del wait
else:
wait = wx.BusyInfo("Please wait, writing data to files...")
wx.SafeYield()
# actually write data:
self.er_magic_data.write_files()
self.Destroy()
del wait
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.drop_down_menu: # unhighlight selected columns, etc.
self.drop_down_menu.clean_up()
# remove '**' from col labels
starred_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
del wait
def on_cancelButton(self, event):
dlg = pw.YesNoCancelDialog(self, "Your changes so far have not been written to file.\nSave changes?", "Not so fast")
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_YES:
self.onSave(self.grid)
self.er_magic_data.write_files()
self.Destroy()
if res == wx.ID_NO:
self.Destroy()
if res == wx.ID_CANCEL:
pass
def on_backButton(self, event, previous_dia, current_dia=None):
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if current_dia == self.InitLocCheck:
pass
elif previous_dia == self.InitSpecCheck or previous_dia == self.InitSampCheck:
self.sample_window = 0
self.panel.Destroy()
previous_dia()
del wait
def onDeleteRow(self, event, data_type):
"""
On button click, remove relevant object from both the data model and the grid.
"""
ancestry = self.er_magic_data.ancestry
child_type = ancestry[ancestry.index(data_type) - 1]
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
if data_type == 'site':
how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step'
else:
how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type)
orphans = []
for name in names:
row = self.grid.row_labels.index(name)
orphan = self.er_magic_data.delete_methods[data_type](name)
if orphan:
orphans.extend(orphan)
self.grid.remove_row(row)
if orphans:
orphan_names = self.er_magic_data.make_name_list(orphans)
pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix))
self.selected_rows = set()
# update grid and data model
self.update_grid(self.grid)#, grids[grid_name])
self.grid.Refresh()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
elif event.Col < 0:
self.onSelectRow(event)
elif event.Row < 0:
self.drop_down_menu.on_label_click(event)
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
### Manage data methods ###
def update_grid(self, grid):
"""
takes in wxPython grid and ErMagic data object to be updated
"""
data_methods = {'specimen': self.er_magic_data.change_specimen,
'sample': self.er_magic_data.change_sample,
'site': self.er_magic_data.change_site,
'location': self.er_magic_data.change_location,
'age': self.er_magic_data.change_age}
grid_name = str(grid.GetName())
cols = list(range(grid.GetNumberCols()))
col_labels = []
for col in cols:
col_labels.append(grid.GetColLabelValue(col))
for row in grid.changes: # go through changes and update data structures
if row == -1:
continue
else:
data_dict = {}
for num, label in enumerate(col_labels):
if label:
data_dict[str(label)] = str(grid.GetCellValue(row, num))
new_name = str(grid.GetCellValue(row, 0))
old_name = self.temp_data[grid_name][row]
data_methods[grid_name](new_name, old_name, data_dict)
grid.changes = False
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all changes to er_magic data object
self.grid_builder.save_grid_data()
# don't actually write data in this step (time-consuming)
# instead, write to files when user is done editing
#self.er_magic_data.write_files()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
| bsd-3-clause | -1,870,059,521,684,590,800 | 42.059787 | 323 | 0.604953 | false | 3.690062 | false | false | false |
kevinarpe/kevinarpe-rambutan3 | tests/check_args/other/test_RRangeSizeStr.py | 1 | 1887 | import pytest
from rambutan3.check_args.RCheckArgsError import RCheckArgsError
from rambutan3.check_args.other.RRangeSizeStr import RRangeSizeStr
from tests.check_args.collection import test_RRangeSizeMatcher
def test_ctor():
test_RRangeSizeMatcher.core_test_ctor(RRangeSizeStr)
def test_check_arg():
with pytest.raises(RCheckArgsError):
__check_arg([123], min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg([123], max_size=1)
with pytest.raises(RCheckArgsError):
__check_arg([123], min_size=1, max_size=2)
with pytest.raises(RCheckArgsError):
__check_arg(None, min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg(123, min_size=1)
__check_arg('abc', min_size=1)
with pytest.raises(RCheckArgsError):
__check_arg('abc', min_size=4)
__check_arg('abc', max_size=3)
with pytest.raises(RCheckArgsError):
__check_arg('abc', max_size=2)
with pytest.raises(RCheckArgsError):
__check_arg('', min_size=1, max_size=3)
__check_arg('a', min_size=1, max_size=3)
__check_arg('ab', min_size=1, max_size=3)
__check_arg('abc', min_size=1, max_size=3)
with pytest.raises(RCheckArgsError):
__check_arg('abcd', min_size=1, max_size=3)
def __check_arg(value, *, min_size: int=-1, max_size: int=-1):
m = RRangeSizeStr(min_size=min_size, max_size=max_size)
assert value is m.check_arg(value, 'dummy_arg_name')
def test__eq__and__ne__():
test_RRangeSizeMatcher.core_test__eq__and__ne__(RRangeSizeStr)
def test__hash__():
test_RRangeSizeMatcher.core_test__hash__(RRangeSizeStr)
def test__str__():
assert str(RRangeSizeStr(min_size=1)) == 'str where size >= 1'
assert str(RRangeSizeStr(max_size=1)) == 'str where size <= 1'
assert str(RRangeSizeStr(min_size=1, max_size=2)) == 'str where size >= 1 and size <= 2'
| gpl-3.0 | -8,604,642,178,105,160,000 | 28.484375 | 92 | 0.656598 | false | 2.921053 | true | false | false |
nohona/cron-crm | usr/local/certbot/certbot/tests/errors_test.py | 4 | 1328 | """Tests for certbot.errors."""
import unittest
import mock
from acme import messages
from certbot import achallenges
from certbot.tests import acme_util
class FailedChallengesTest(unittest.TestCase):
"""Tests for certbot.errors.FailedChallenges."""
def setUp(self):
from certbot.errors import FailedChallenges
self.error = FailedChallenges(set([achallenges.DNS(
domain="example.com", challb=messages.ChallengeBody(
chall=acme_util.DNS01, uri=None,
error=messages.Error(typ="tls", detail="detail")))]))
def test_str(self):
self.assertTrue(str(self.error).startswith(
"Failed authorization procedure. example.com (dns-01): tls"))
class StandaloneBindErrorTest(unittest.TestCase):
"""Tests for certbot.errors.StandaloneBindError."""
def setUp(self):
from certbot.errors import StandaloneBindError
self.error = StandaloneBindError(mock.sentinel.error, 1234)
def test_instance_args(self):
self.assertEqual(mock.sentinel.error, self.error.socket_error)
self.assertEqual(1234, self.error.port)
def test_str(self):
self.assertTrue(str(self.error).startswith(
"Problem binding to port 1234: "))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| gpl-3.0 | -5,918,238,343,923,517,000 | 29.181818 | 73 | 0.676958 | false | 3.905882 | true | false | false |
project-hypr/hypr2 | tests/providers/crud/test_crud_crud.py | 1 | 12472 | # Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved.
#
# Licensed under the Simplified BSD License (the "License");
# you may not use this file except in compliance with the License.
"""Test basic CRUD operations of the CRUDProvider."""
import json
import pytest
from hypr.providers import CRUDProvider
def deserialize(data, model):
"""Deserialize JSON data."""
data = json.loads(data)
if 'content' in data and 'count' in data:
return data['count'], [model.load(r) for r in data['content']]
return model.load(data)
@pytest.fixture
def app(app, model):
"""All the tests are conducted with application/json as default mime."""
provider = type('IntestProvider', (CRUDProvider,), {'__model__': model})
app.add_provider(provider, '/test', '/test/<int:id>')
return app
class TestModelCreate:
"""Test create."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
payload = json.dumps({'value': 'foo'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 201
data = deserialize(rv.text, model)
assert data == model.one(data.id)
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 201
count, resources = deserialize(rv.text, model)
for resource in resources:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestProviderRead:
"""Test read."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == model.count() == 5
assert sorted(resources) == sorted(model.get())
def test_get_one(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource == model.one(1)
@pytest.mark.populate(5)
class TestModelUpdate:
"""Test update."""
models = 'SQLiteModel',
def test_update(self, app, model):
"""Update an instance with PATCH."""
ref = model.one(1)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(1)
def test_update_alt(self, app, model):
"""Update an instance with PUT."""
ref = model.one(2)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.put('/test/2', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(2)
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 200
count, data = deserialize(rv.text, model)
for instance in ref:
assert instance != model.one(instance.id)
for resource in data:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestModelDelete:
"""Test delete."""
models = 'SQLiteModel',
def test_delete(self, app, model):
"""Delete a resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 204
assert model.one(1) is None
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 204
for instance in ref:
assert model.one(instance.id) is None
@pytest.mark.populate(5)
class TestMissingPayloadException:
"""Test requests with missing payload."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
with app.test_client() as client:
rv = client.post('/test')
assert rv.status == 400
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
with app.test_client() as client:
rv = client.post('/test?_bulk=1')
assert rv.status == 400
def test_update(self, app, model):
"""Update an instance."""
with app.test_client() as client:
rv = client.patch('/test/1')
assert rv.status == 400
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
with app.test_client() as client:
rv = client.put('/test?_bulk=1')
assert rv.status == 400
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
with app.test_client() as client:
rv = client.delete('/test?_bulk=1')
assert rv.status == 400
@pytest.mark.populate(5)
class TestInvalidPayloadException:
"""Test requests with invalid payload."""
models = 'SQLiteModel',
def test_create(self, app):
"""Create one resource."""
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
def test_update(self, app, model):
"""Update one resource."""
ref = model.one(1)
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 400
assert ref == model.one(1)
@pytest.mark.populate(5)
class TestInvalidBulkRequest:
"""Test invalid bulk requests."""
models = 'SQLiteModel',
def test_bulk_create_missing_flag(self, app, model):
"""A missing bulk flag returns an error 400."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_missing_flag(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_flag(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_on_single_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_on_single_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_unknown_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 100, 'value': 'test_ok1'} # unkwnown resource
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_unknown_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 100} # unknwon resource
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_create_invalid_property(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'invalid': 'property'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_invalid_property(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'invalid': 'property'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_missing_id(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'value': 'test_ok1'} # missing id
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_id(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{} # missing id
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
class TestEmptySet:
"""Crud operations (except create) on an empty database."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Get an empty set."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == 0
assert resources == []
def test_get_one(self, app, model):
"""Get an unknown resource."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 404
def test_update(self, app, model):
"""Update an unknown resource."""
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 404
def test_delete(self, app, model):
"""Delete an unknown resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 404
| bsd-2-clause | 4,588,177,312,490,519,600 | 28.980769 | 76 | 0.552357 | false | 3.919547 | true | false | false |
sergei-maertens/django-systemjs | docs/_ext/djangodocs.py | 1 | 2159 | """
Taken from djangoproject/django docs.
Sphinx plugins for Django documentation.
"""
import re
from sphinx import addnodes
from sphinx.util.compat import Directive
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.set_translator('djangohtml', DjangoHTMLTranslator)
return {'parallel_read_safe': True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
version_text = {
'versionchanged': 'Changed in %s',
'versionadded': 'Added in %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
| mit | 5,868,693,548,885,009,000 | 29.842857 | 83 | 0.6151 | false | 3.883094 | false | false | false |
matthew-brett/dmg-wheel-installer | make_installer.py | 1 | 6064 | #!/usr/bin/env python
""" Make dmg installer for Python.org Python from Python wheels """
from __future__ import division, print_function
DESCRIP = "Make dmg installer for Python.org Python from Python wheels"
EPILOG = \
"""Make DMG installer from wheels
* Collect source packages for pip, setuptools
* Collect needed wheels using "pip wheel" command
* Write directory to DMG containing source and wheel packages
* Write "postinstall" script to install setuptools, pip, then install wheels
* Write "postinstall" script in ".pkg" double click installer
* Package result into DMG file.
"""
import os
from os.path import exists, join as pjoin
import shutil
from subprocess import check_call
from argparse import ArgumentParser, RawDescriptionHelpFormatter
try:
from urllib2 import urlopen, URLError # Python 2
except ImportError:
from urllib.request import urlopen, URLError # Python 3
# Defaults
PYTHON_VERSION='2.7'
# Constants
# Installed location of Python.org Python
PY_ORG_BASE='/Library/Frameworks/Python.framework/Versions/'
# Path for directory that will become the dmg contents
DMG_DIR='dmg_root'
# Subdirectory containing wheels and source packages
PKG_DIR = 'packages'
# Package directory within dmg_directory
DMG_PKG_DIR = DMG_DIR + '/' + PKG_DIR
# get-pip.py URL
GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py'
def rm_mk_dir(dirname):
if exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def mkdirs():
[rm_mk_dir(pth) for pth in (
DMG_PKG_DIR,
'scripts',
'pkg_template')]
def get_pip_params(args):
params = '--no-index' if args.no_index else []
for link in args.find_links:
params.append('--find-links=' + link)
return params
def get_pippers(pip_params, get_pip_path=None):
pip_cmd = ['pip', 'install',
'--download', DMG_PKG_DIR,
'pip', 'setuptools'] + pip_params
check_call(pip_cmd)
if not get_pip_path is None:
shutil.copy2(get_pip_path, DMG_PKG_DIR)
return
url_obj = urlopen(GET_PIP_URL)
with open(DMG_PKG_DIR + '/get-pip.py', 'wt') as fobj:
fobj.write(url_obj.read())
def get_wheels(version, requirements, pip_params):
pip_exe = '{0}/{1}/bin/pip{1}'.format(PY_ORG_BASE, version, version)
if not exists(pip_exe):
raise RuntimeError('Need to install pip for python at ' +
'{0}/bin/python{1}'.format(PY_ORG_BASE, version))
# Install wheel locally just in case
check_call([pip_exe, 'install'] + pip_params + ['wheel'])
check_call([pip_exe, 'wheel', '-w', DMG_PKG_DIR] + pip_params +
list(requirements))
def write_post(py_version, requirements):
to_install = ', '.join(['"{0}"'.format(r) for r in requirements])
with open('scripts/postinstall', 'wt') as fobj:
fobj.write(
r"""#!/usr/bin/env python
# Install into Python.org python
import sys
import os
from os.path import exists, dirname
from subprocess import check_call
# Find disk image files
package_path = os.environ.get('PACKAGE_PATH')
if package_path is None:
sys.exit(10)
package_dir = dirname(package_path)
wheelhouse = package_dir + '/{pkg_dir}'
# Find Python.org Python
python_bin = '{py_org_base}/{py_version}/bin'
python_path = python_bin + '/python{py_version}'
if not exists(python_path):
sys.exit(20)
# Install pip
check_call([python_path, wheelhouse + '/get-pip.py', '-f', wheelhouse,
'--no-setuptools'])
# Find pip
expected_pip = python_bin + '/pip{py_version}'
if not exists(expected_pip):
sys.exit(30)
pip_cmd = [expected_pip, 'install', '--no-index', '--upgrade',
'--find-links', wheelhouse]
check_call(pip_cmd + ['setuptools'])
check_call(pip_cmd + [{to_install}])
""".format(py_org_base = PY_ORG_BASE,
py_version = py_version,
to_install = to_install,
pkg_dir = PKG_DIR,
))
check_call(['chmod', 'a+x', 'scripts/postinstall'])
def write_pkg(identifier, version):
pkg_fname = pjoin(DMG_DIR, '{0}-{1}.pkg'.format(identifier, version))
check_call(['pkgbuild', '--root', 'pkg_template', '--nopayload', '--scripts',
'scripts', '--identifier', identifier, '--version', version,
pkg_fname])
def write_dmg(identifier, py_version, pkg_version):
dmg_name = '{0}-py{1}-{2}'.format(
identifier,
py_version.replace('.', ''),
pkg_version)
check_call(['hdiutil', 'create', '-srcfolder', DMG_DIR,
'-volname', dmg_name,
dmg_name + '.dmg'])
def main():
parser = ArgumentParser(description=DESCRIP,
epilog=EPILOG,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('pkg_name', type=str, help='root name of installer')
parser.add_argument('pkg_version', type=str, help='version of installer')
parser.add_argument('requirements', type=str, nargs='+',
help='pip requirement strings')
parser.add_argument('--python-version', type=str, default=PYTHON_VERSION,
help='Python version in major.minor format, e.g "3.4"')
parser.add_argument('--no-index', action='store_true',
help='disable search of pip indices when fetching '
'packages to make installer')
parser.add_argument('--find-links', '-f', type=str, nargs='*', default=[],
help='locations to find packages to make installer')
parser.add_argument('--get-pip-path', type=str,
help='local path to "get-pip.py"')
# parse the command line
args = parser.parse_args()
pip_params = get_pip_params(args)
mkdirs()
get_pippers(pip_params, args.get_pip_path)
get_wheels(args.python_version, args.requirements, pip_params)
write_post(args.python_version, args.requirements)
write_pkg(args.pkg_name, args.pkg_version)
write_dmg(args.pkg_name, args.python_version, args.pkg_version)
if __name__ == '__main__':
main()
| bsd-2-clause | 407,317,834,380,431,300 | 33.850575 | 81 | 0.636873 | false | 3.5338 | false | false | false |
Arcbot-Org/Arcbot | bolt/discord/models/channel.py | 1 | 1176 | from bolt.discord.models.base import Snowflake, Model, Field, ListField, Enum, Timestamp
from bolt.discord.models.user import User
from bolt.discord.permissions import Permission
class ChannelType(Enum):
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
class PermissionOverwrite(Model):
__repr_keys__ = ['id', 'type']
id = Field(Snowflake)
type = Field(str)
deny = Field(Permission)
allow = Field(Permission)
class Channel(Model):
__repr_keys__ = ['id', 'name', 'type']
id = Field(Snowflake, required=True)
type = Field(ChannelType, required=True)
guild_id = Field(Snowflake)
position = Field(int)
permission_overwrites = ListField(PermissionOverwrite)
name = Field(str, max_length=100)
topic = Field(str, max_length=1024)
nsfw = Field(bool)
last_message_id = Field(Snowflake)
bitrate = Field(int)
user_limit = Field(int)
rate_limit_per_user = Field(int)
recipients = ListField(User)
icon = Field(str)
owner_id = Field(Snowflake)
application_id = Field(Snowflake)
parent_id = Field(Snowflake)
last_pin_timestamp = Field(Timestamp)
| gpl-3.0 | 4,696,457,051,982,362,000 | 26.348837 | 88 | 0.668367 | false | 3.169811 | false | false | false |
gnowledge/OTM2 | opentreemap/treemap/util.py | 1 | 7219 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import datetime
from collections import OrderedDict
from urlparse import urlparse
from django.shortcuts import get_object_or_404, resolve_url
from django.http import HttpResponse
from django.utils.encoding import force_str, force_text
from django.utils.functional import Promise
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.conf import settings
from django.core.exceptions import ValidationError, MultipleObjectsReturned
from django.utils.translation import ugettext_lazy as trans
from django.db.models.fields.files import ImageFieldFile
from django.contrib.gis.geos import Point
from opentreemap.util import dict_pop
from treemap.instance import Instance
def safe_get_model_class(model_string):
"""
In a couple of cases we want to be able to convert a string
into a valid django model class. For instance, if we have
'Plot' we want to get the actual class for 'treemap.models.Plot'
in a safe way.
This function returns the class represented by the given model
if it exists in 'treemap.models'
"""
from treemap.models import MapFeature
# All of our models live in 'treemap.models', so
# we can start with that namespace
models_module = __import__('treemap.models')
if hasattr(models_module.models, model_string):
return getattr(models_module.models, model_string)
elif MapFeature.has_subclass(model_string):
return MapFeature.get_subclass(model_string)
else:
raise ValidationError(
trans('invalid model type: "%s"') % model_string)
def add_visited_instance(request, instance):
if not (hasattr(request, 'session') and request.session):
return
# get the visited instances as a list of tuples, read into
# OrderedDict. OrderedDict has nice convenience methods for this
# purpose, but doesn't serialize well, so we pass it through.
visited_instances = request.session.get('visited_instances', [])
visited_instances = OrderedDict(visited_instances)
# delete the existing entry for this instance so it can be
# reinserted as the most recent entry.
if instance.pk in visited_instances:
del visited_instances[instance.pk]
stamp = datetime.datetime.now().isoformat()
visited_instances[instance.pk] = stamp
# turn back into a list of tuples
request.session['visited_instances'] = visited_instances.items()
request.session.modified = True
def get_last_visited_instance(request):
if not hasattr(request, 'session'):
instance = None
else:
visited_instances = request.session.get('visited_instances', [])
if not visited_instances:
instance = None
else:
# get the first tuple member of the last entry
# visited_instances have entries '(<pk>, <timestamp>)'
instance_id = visited_instances[-1][0]
try:
instance = Instance.objects.get(pk=instance_id)
except (Instance.DoesNotExist, MultipleObjectsReturned):
instance = None
return instance
def login_redirect(request):
# Reference: django/contrib/auth/decorators.py
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme)\
and (not login_netloc or login_netloc == current_netloc): # NOQA
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, REDIRECT_FIELD_NAME)
def get_instance_or_404(**kwargs):
url_name, found = dict_pop(kwargs, 'url_name')
if found:
kwargs['url_name__iexact'] = url_name
return get_object_or_404(Instance, **kwargs)
def package_field_errors(model_name, validation_error):
"""
validation_error contains a dictionary of error messages of the form
{fieldname1: [messages], fieldname2: [messages]}.
Return a version keyed by "objectname.fieldname" instead of "fieldname".
"""
dict = {'%s.%s' % (to_object_name(model_name), field): msgs
for (field, msgs) in validation_error.message_dict.iteritems()}
return dict
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
class LazyEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
elif hasattr(obj, 'dict'):
return obj.dict()
elif isinstance(obj, set):
return list(obj)
elif hasattr(obj, 'as_dict'):
return obj.as_dict()
elif isinstance(obj, Point):
srid = 4326
obj.transform(srid)
return {'x': obj.x, 'y': obj.y, 'srid': srid}
# TODO: Handle S3
elif isinstance(obj, ImageFieldFile):
if obj:
return obj.url
else:
return None
else:
return super(LazyEncoder, self).default(obj)
def all_subclasses(cls):
"""Return all subclasses of given class"""
subclasses = set(cls.__subclasses__())
return subclasses | {clz for s in subclasses for clz in all_subclasses(s)}
def leaf_subclasses(cls):
"""Return all leaf subclasses of given class"""
all = all_subclasses(cls)
leaves = {s for s in all if not s.__subclasses__()}
return leaves
def to_object_name(model_name):
"""BenefitCurrencyConversion -> benefitCurrencyConversion"""
return model_name[0].lower() + model_name[1:]
def to_model_name(object_name):
"""benefitCurrencyConversion -> BenefitCurrencyConversion"""
return object_name[0].upper() + object_name[1:]
def get_filterable_audit_models():
from treemap.models import MapFeature
map_features = [c.__name__ for c in leaf_subclasses(MapFeature)]
models = map_features + ['Tree']
return {model.lower(): model for model in models}
def get_csv_response(filename):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s;' % filename
response['Cache-Control'] = 'no-cache'
# add BOM to support CSVs in MS Excel
# http://en.wikipedia.org/wiki/Byte_order_mark
response.write(u'\ufeff'.encode('utf8'))
return response
def get_json_response(filename):
response = HttpResponse(content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=%s;' % filename
response['Cache-Control'] = 'no-cache'
return response
def can_read_as_super_admin(request):
if not hasattr(request.user, 'is_super_admin'):
return False
else:
return request.user.is_super_admin() and request.method == 'GET'
| gpl-3.0 | -7,807,075,686,139,737,000 | 33.706731 | 78 | 0.677102 | false | 4.021727 | false | false | false |
Ra93POL/VKAPI | __init__.py | 1 | 4277 | # -* coding: utf-8 -*-
import VKAPI, dataMngt, time
vk = None
one_account = {'vk.com': True, 'ok.ru': True, 'disk.yandex.ru': True}
number_account = dataMngt.get_number_account()
def check_app_data(one_account, res_auth, site):
if res_auth == 'frozen':
print 'Account of "'+vk.user_data[site][1]+'" is frozen'
if one_account[site] == False: reauthorize(site, account='next')
elif not vk.app_data[site].has_key('access_token'):
print 'Access token for "'+vk.user_data[site][1]+'" wasn\'t given!'
if one_account[site] == False: reauthorize(site, account='next')
def reauthorize(site, account='next'):
global vk, number_account
time.sleep(10)
if account == 'same': number_account[site] -= 1
dataMngt.reload_user_data(vk.user_data, number_account, site)
res_auth = vk.do_authorize(site)
check_app_data(one_account, res_auth, site)
def authorize(*sites):
global vk, one_account, number_account
user_data = dataMngt.load_user_data(one_account, number_account)
vk = VKAPI.VK(user_data)
for site in sites:
res_auth = vk.do_authorize(site)
check_app_data(one_account, res_auth, site)
return vk
################# ------ OK.RU ----- ################
def ok_usersSetStatus(status):
return vk.api('ok.ru', 'users.setStatus', {'status': status})[1]
def ok_usersGetInfo(uid, fields, emptyPictures='false'):
params = {
'uid': uid,
'fields': fields,
'emptyPictures': emptyPictures}
return vk.api('ok.ru', 'users.getInfo', params)[1]
def ok_photosEditPhoto(photo_id, description):
params = {
'photo_id': photo_id,
'description': description}
return vk.api('ok.ru', 'photos.editPhoto', params)[1]
def ok_photosGetPhotos(uid, fid='', aid=''):
params = {
'uid': uid,
'fid': fid,
'aid': aid}
return vk.api('ok.ru', 'photos.getPhotos', params)[1]
################# ------ VK.COM ----- ################
def proccessing_error(cond, res):
global one_account
if cond == 'success': return res
elif cond == 'error':
code = res['code']
msg = res['msg']
oa = one_account['vk.com']
print code, msg
if code == 5:
reauthorize('vk.com', 'next')
print '\n Connected to', vk.user_data['vk.com'][1], '\n'
return 'reauthed'
elif code == 15: pass
elif code == 220: # защита от спама
if oa == False:
reauthorize('vk.com', 'next')
print '\n Connected to', vk.user_data['vk.com'][1], '\n'
return 'reauthed'
def vk_usersGet(user_ids, fields, name_case='nom'):
params = {
'user_ids': user_ids,
'fields': fields,
'name_case': name_case}
cond, res = vk.api('vk.com', 'users.get', params)
return proccessing_error(cond, res)
def vk_wallPost(owner_id, message, attachments='', from_group=0):
params = {
'owner_id': owner_id,
'message': message,
'attachments': attachments,
'from_group': from_group}
cond, res = vk.api('vk.com', 'wall.post', params)
return proccessing_error(cond, res)
def vk_newsfeedSearch(q, count, start_from='', end_time='', extended=0):
params = {
'q': q,
'count': count,
'start_from': start_from,
'end_time': end_time,
'extended': extended}
cond, res = vk.api('vk.com', 'newsfeed.search', params)
return proccessing_error(cond, res)
def vk_groupsSearch(q, count, offset=0, city_id=''):
parametrs = {
'q': q, 'offset': offset, 'count': count,
'sort': 2, 'city_id': city_id}
cond, res = vk.api('vk.com', 'groups.search', parametrs)
return proccessing_error(cond, res)
def vk_groupsGetById(group_id, fields=''):
parametrs = {'group_id': group_id, 'fields': fields}
cond, res = vk.api('vk.com', 'groups.getById', parametrs)
return proccessing_error(cond, res)
def vk_groupsGetMembers(group_id, count, offset=0, fields=''):
parametrs = {
'group_id': group_id,
'fields': fields,
'offset': offset,
'count': count}
cond, res = vk.api('vk.com', 'groups.getMembers', parametrs)
return proccessing_error(cond, res)
| gpl-3.0 | -2,256,723,707,737,997,300 | 33.112 | 75 | 0.579737 | false | 3.112409 | false | false | false |
chrislit/abydos | abydos/distance/_lcprefix.py | 1 | 4129 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._lcprefix.
Longest common prefix
"""
from os.path import commonprefix
from typing import List, cast
from ._distance import _Distance
__all__ = ['LCPrefix']
class LCPrefix(_Distance):
"""Longest common prefix.
.. versionadded:: 0.4.0
"""
def lcprefix(self, strings: List[str]) -> str:
"""Return the longest common prefix of a list of strings.
Longest common prefix (LCPrefix).
Parameters
----------
strings : list of strings
Strings for comparison
Returns
-------
str
The longest common prefix
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.lcprefix(['cat', 'hat'])
''
>>> pfx.lcprefix(['Niall', 'Neil'])
'N'
>>> pfx.lcprefix(['aluminum', 'Catalan'])
''
>>> pfx.lcprefix(['ATCG', 'TAGC'])
''
.. versionadded:: 0.4.0
"""
return cast(str, commonprefix(strings))
def dist_abs(self, src: str, tar: str, *args: str) -> int:
"""Return the length of the longest common prefix of the strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
*args : strs
Additional strings for comparison
Raises
------
ValueError
All arguments must be of type str
Returns
-------
int
The length of the longest common prefix
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.dist_abs('cat', 'hat')
0
>>> pfx.dist_abs('Niall', 'Neil')
1
>>> pfx.dist_abs('aluminum', 'Catalan')
0
>>> pfx.dist_abs('ATCG', 'TAGC')
0
.. versionadded:: 0.4.0
"""
strings = [src, tar]
for arg in args:
if isinstance(arg, str):
strings.append(arg)
else:
raise TypeError('All arguments must be of type str')
return len(self.lcprefix(strings))
def sim(self, src: str, tar: str, *args: str) -> float:
r"""Return the longest common prefix similarity of two or more strings.
Longest common prefix similarity (:math:`sim_{LCPrefix}`).
This employs the LCPrefix function to derive a similarity metric:
:math:`sim_{LCPrefix}(s,t) = \frac{|LCPrefix(s,t)|}{max(|s|, |t|)}`
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
*args : strs
Additional strings for comparison
Returns
-------
float
LCPrefix similarity
Examples
--------
>>> pfx = LCPrefix()
>>> pfx.sim('cat', 'hat')
0.0
>>> pfx.sim('Niall', 'Neil')
0.2
>>> pfx.sim('aluminum', 'Catalan')
0.0
>>> pfx.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
dist = self.dist_abs(src, tar, *args)
maxlen = max(len(src), len(tar), *[len(arg) for arg in args])
return dist / maxlen
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | 6,304,868,237,515,560,000 | 23.873494 | 79 | 0.534996 | false | 4.108458 | false | false | false |
renalreg/radar | tests/api/serializers/test_salt_wasting_clinical_features_serializer.py | 1 | 7742 | from datetime import date
from cornflake.exceptions import ValidationError
import pytest
from radar.api.serializers.salt_wasting import SaltWastingClinicalFeaturesSerializer
from radar.models.patient_demographics import PatientDemographics
from radar.models.patients import Patient
from radar.models.users import User
@pytest.fixture
def patient():
patient = Patient()
patient_demographics = PatientDemographics()
patient_demographics.date_of_birth = date(2000, 1, 1)
patient.patient_demographics.append(patient_demographics)
return patient
@pytest.fixture
def clinical_features(patient):
return {
'patient': patient,
'normal_pregnancy': False,
'abnormal_pregnancy_text': 'Foo',
'neurological_problems': True,
'seizures': True,
'abnormal_gait': True,
'deafness': True,
'other_neurological_problem': True,
'other_neurological_problem_text': 'Bar',
'joint_problems': True,
'joint_problems_age': 21,
'x_ray_abnormalities': True,
'chondrocalcinosis': True,
'other_x_ray_abnormality': True,
'other_x_ray_abnormality_text': 'Baz'
}
def test_valid(clinical_features):
obj = valid(clinical_features)
assert obj.normal_pregnancy is False
assert obj.abnormal_pregnancy_text == 'Foo'
assert obj.neurological_problems is True
assert obj.seizures is True
assert obj.abnormal_gait is True
assert obj.deafness is True
assert obj.other_neurological_problem is True
assert obj.other_neurological_problem_text == 'Bar'
assert obj.joint_problems is True
assert obj.joint_problems_age == 21
assert obj.x_ray_abnormalities is True
assert obj.chondrocalcinosis is True
assert obj.other_x_ray_abnormality is True
assert obj.other_x_ray_abnormality_text == 'Baz'
def test_normal_pregnancy_true(clinical_features):
clinical_features['normal_pregnancy'] = True
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_true_none(clinical_features):
clinical_features['normal_pregnancy'] = None
valid(clinical_features)
def test_normal_pregnancy_true_text_none(clinical_features):
clinical_features['normal_pregnancy'] = True
clinical_features['abnormal_pregnancy_text'] = None
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_true_text_blank(clinical_features):
clinical_features['normal_pregnancy'] = True
clinical_features['abnormal_pregnancy_text'] = ''
obj = valid(clinical_features)
assert obj.abnormal_pregnancy_text is None
def test_normal_pregnancy_false_text_none(clinical_features):
clinical_features['abnormal_pregnancy_text'] = None
invalid(clinical_features)
def test_normal_pregnancy_false_text_blank(clinical_features):
clinical_features['abnormal_pregnancy_text'] = ''
invalid(clinical_features)
def test_neurological_problems_false(clinical_features):
obj = valid(clinical_features)
obj.seizures = None
obj.abnormal_gait = None
obj.deafness = None
obj.other_neurological_problem = None
obj.other_neurological_problem_text = None
def test_neurological_problems_none(clinical_features):
clinical_features['neurological_problems'] = None
valid(clinical_features)
def test_neurological_problems_true_seizures_none(clinical_features):
clinical_features['seizures'] = None
invalid(clinical_features)
def test_neurological_problems_false_seizures_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['seizures'] = None
valid(clinical_features)
def test_neurological_problems_true_abnormal_gait_none(clinical_features):
clinical_features['abnormal_gait'] = None
invalid(clinical_features)
def test_neurological_problems_false_abnormal_gait_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['abnormal_gait'] = None
valid(clinical_features)
def test_neurological_problems_true_deafness_none(clinical_features):
clinical_features['deafness'] = None
invalid(clinical_features)
def test_neurological_problems_false_deafness_none(clinical_features):
clinical_features['neurological_problems'] = False
clinical_features['deafness'] = None
valid(clinical_features)
def test_neurological_problems_true_other_neurological_problem_none(clinical_features):
clinical_features['other_neurological_problem'] = None
invalid(clinical_features)
def test_other_neurological_problem_false_text_none(clinical_features):
clinical_features['other_neurological_problem'] = False
clinical_features['other_neurological_problem_text'] = None
valid(clinical_features)
def test_other_neurological_problem_true_text_blank(clinical_features):
clinical_features['other_neurological_problem_text'] = ''
invalid(clinical_features)
def test_other_neurological_problem_true_text_none(clinical_features):
clinical_features['other_neurological_problem_text'] = None
invalid(clinical_features)
def test_joint_problems_false(clinical_features):
clinical_features['joint_problems'] = False
obj = valid(clinical_features)
assert obj.joint_problems_age is None
assert obj.x_ray_abnormalities is None
assert obj.chondrocalcinosis is None
assert obj.other_x_ray_abnormality is None
assert obj.other_x_ray_abnormality_text is None
def test_joint_problems_none(clinical_features):
clinical_features['neurological_problems'] = None
valid(clinical_features)
def test_joint_problems_true_joint_problems_age_none(clinical_features):
clinical_features['joint_problems_age'] = None
invalid(clinical_features)
def test_joint_problems_false_joint_problems_age_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['joint_problems_age'] = None
valid(clinical_features)
def test_joint_problems_true_joint_problems_age_too_young(clinical_features):
clinical_features['joint_problems_age'] = -1
invalid(clinical_features)
def test_joint_problems_true_joint_problems_age_too_old(clinical_features):
clinical_features['x_ray_abnormalities'] = 121
invalid(clinical_features)
def test_joint_problems_true_x_ray_abnormalities_none(clinical_features):
clinical_features['x_ray_abnormalities'] = None
invalid(clinical_features)
def test_joint_problems_false_x_ray_abnormalities_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['x_ray_abnormalities'] = None
valid(clinical_features)
def test_joint_problems_true_chondrocalcinosis_none(clinical_features):
clinical_features['chondrocalcinosis'] = None
invalid(clinical_features)
def test_joint_problems_false_chondrocalcinosis_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['chondrocalcinosis'] = None
valid(clinical_features)
def test_joint_problems_true_other_x_ray_abnormality_none(clinical_features):
clinical_features['other_x_ray_abnormality'] = None
invalid(clinical_features)
def test_joint_problems_false_other_x_ray_abnormality_none(clinical_features):
clinical_features['joint_problems'] = False
clinical_features['other_x_ray_abnormality'] = None
valid(clinical_features)
def invalid(data):
with pytest.raises(ValidationError) as e:
valid(data)
return e
def valid(data):
serializer = SaltWastingClinicalFeaturesSerializer(data=data, context={'user': User(is_admin=True)})
serializer.is_valid(raise_exception=True)
return serializer.save()
| agpl-3.0 | -9,060,480,136,143,703,000 | 30.991736 | 104 | 0.740765 | false | 3.22449 | true | false | false |
AlexeyKruglov/Skeinforge-fabmetheus | skeinforge_application/skeinforge_plugins/craft_plugins/inset.py | 1 | 21880 | #! /usr/bin/env python
"""
This page is in the table of contents.
Inset will inset the outside outlines by half the edge width, and outset the inside outlines by the same amount.
The inset manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Inset
==Settings==
===Add Custom Code for Temperature Reading===
Default is on.
When selected, the M105 custom code for temperature reading will be added at the beginning of the file.
===Infill in Direction of Bridge===
Default is on.
When selected, the infill will be in the direction of any bridge across a gap, so that the fill will be able to span a bridge easier.
===Loop Order Choice===
Default loop order choice is 'Ascending Area'.
When overlap is to be removed, for each loop, the overlap is checked against the list of loops already extruded. If the latest loop overlaps an already extruded loop, the overlap is removed from the latest loop. The loops are ordered according to their areas.
====Ascending Area====
When selected, the loops will be ordered in ascending area. With thin walled parts, if overlap is being removed the outside of the container will not be extruded. Holes will be the correct size.
====Descending Area====
When selected, the loops will be ordered in descending area. With thin walled parts, if overlap is being removed the inside of the container will not be extruded. Holes will be missing the interior wall so they will be slightly wider than model size.
===Overlap Removal Width over Perimeter Width===
Default is 0.6.
Defines the ratio of the overlap removal width over the edge width. Any part of the extrusion that comes within the overlap removal width of another is removed. This is to prevent the extruder from depositing two extrusions right beside each other. If the 'Overlap Removal Width over Perimeter Width' is less than 0.2, the overlap will not be removed.
===Turn Extruder Heater Off at Shut Down===
Default is on.
When selected, the M104 S0 gcode line will be added to the end of the file to turn the extruder heater off by setting the extruder heater temperature to 0.
===Volume Fraction===
Default: 0.93
The 'Volume Fraction' is the estimated volume of the thread compared to the box defined by the layer height and infill width. This is used in dwindle, splodge, and statistic. It is in inset because inset is a required extrusion tool, earlier in the chain than dwindle and splodge. In dwindle and splodge it is used to determine the filament volume, in statistic it is used to determine the extrusion diameter.
==Examples==
The following examples inset the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and inset.py.
> python inset.py
This brings up the inset dialog.
> python inset.py Screw Holder Bottom.stl
The inset tool is parsing the file:
Screw Holder Bottom.stl
..
The inset tool has created the file:
.. Screw Holder Bottom_inset.gcode
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cmath
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addAlreadyFilledArounds( alreadyFilledArounds, loop, radius ):
"Add already filled loops around loop to alreadyFilledArounds."
radius = abs(radius)
alreadyFilledLoop = []
slightlyGreaterThanRadius = intercircle.globalIntercircleMultiplier * radius
muchGreaterThanRadius = 2.5 * radius
centers = intercircle.getCentersFromLoop( loop, slightlyGreaterThanRadius )
for center in centers:
alreadyFilledInset = intercircle.getSimplifiedInsetFromClockwiseLoop( center, radius )
if intercircle.isLargeSameDirection( alreadyFilledInset, center, radius ):
alreadyFilledLoop.append( alreadyFilledInset )
if len( alreadyFilledLoop ) > 0:
alreadyFilledArounds.append( alreadyFilledLoop )
def addSegmentOutline( isThick, outlines, pointBegin, pointEnd, width ):
"Add a diamond or hexagonal outline for a line segment."
width = abs( width )
exclusionWidth = 0.6 * width
slope = 0.2
if isThick:
slope = 3.0
exclusionWidth = 0.8 * width
segment = pointEnd - pointBegin
segmentLength = abs(segment)
if segmentLength == 0.0:
return
normalizedSegment = segment / segmentLength
outline = []
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
along = 0.05
alongLength = along * segmentLength
if alongLength > 0.1 * exclusionWidth:
along *= 0.1 * exclusionWidth / alongLength
alongEnd = 1.0 - along
remainingToHalf = 0.5 - along
alongToWidth = exclusionWidth / slope / segmentLength
pointBeginIntermediate = euclidean.getIntermediateLocation( along, pointBeginRotated, pointEndRotated )
pointEndIntermediate = euclidean.getIntermediateLocation( alongEnd, pointBeginRotated, pointEndRotated )
outline.append( pointBeginIntermediate )
verticalWidth = complex( 0.0, exclusionWidth )
if alongToWidth > 0.9 * remainingToHalf:
verticalWidth = complex( 0.0, slope * remainingToHalf * segmentLength )
middle = ( pointBeginIntermediate + pointEndIntermediate ) * 0.5
middleDown = middle - verticalWidth
middleUp = middle + verticalWidth
outline.append( middleUp )
outline.append( pointEndIntermediate )
outline.append( middleDown )
else:
alongOutsideBegin = along + alongToWidth
alongOutsideEnd = alongEnd - alongToWidth
outsideBeginCenter = euclidean.getIntermediateLocation( alongOutsideBegin, pointBeginRotated, pointEndRotated )
outsideBeginCenterDown = outsideBeginCenter - verticalWidth
outsideBeginCenterUp = outsideBeginCenter + verticalWidth
outsideEndCenter = euclidean.getIntermediateLocation( alongOutsideEnd, pointBeginRotated, pointEndRotated )
outsideEndCenterDown = outsideEndCenter - verticalWidth
outsideEndCenterUp = outsideEndCenter + verticalWidth
outline.append( outsideBeginCenterUp )
outline.append( outsideEndCenterUp )
outline.append( pointEndIntermediate )
outline.append( outsideEndCenterDown )
outline.append( outsideBeginCenterDown )
outlines.append( euclidean.getRotatedComplexes( normalizedSegment, outline ) )
def getBridgeDirection(belowLoops, layerLoops, radius):
'Get span direction for the majority of the overhanging extrusion edge, if any.'
if len(belowLoops) < 1:
return None
belowOutsetLoops = intercircle.getInsetLoopsFromLoops(belowLoops, -radius)
bridgeRotation = complex()
for loop in layerLoops:
for pointIndex, point in enumerate(loop):
previousIndex = (pointIndex + len(loop) - 1) % len(loop)
bridgeRotation += getOverhangDirection(belowOutsetLoops, loop[previousIndex], point)
if abs(bridgeRotation) < 0.75 * radius:
return None
else:
return cmath.sqrt(bridgeRotation / abs(bridgeRotation))
def getCraftedText( fileName, text='', repository=None):
"Inset the preface file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
"Inset the preface gcode text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'inset'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( InsetRepository() )
return InsetSkein().getCraftedGcode(gcodeText, repository)
def getDoubledRoundZ( overhangingSegment, segmentRoundZ ):
'Get doubled plane angle around z of the overhanging segment.'
endpoint = overhangingSegment[0]
roundZ = endpoint.point - endpoint.otherEndpoint.point
roundZ *= segmentRoundZ
if abs( roundZ ) == 0.0:
return complex()
if roundZ.real < 0.0:
roundZ *= - 1.0
roundZLength = abs( roundZ )
return roundZ * roundZ / roundZLength
def getInteriorSegments(loops, segments):
'Get segments inside the loops.'
interiorSegments = []
for segment in segments:
center = 0.5 * (segment[0].point + segment[1].point)
if euclidean.getIsInFilledRegion(loops, center):
interiorSegments.append(segment)
return interiorSegments
def getIsIntersectingWithinList(loop, loopList):
"Determine if the loop is intersecting or is within the loop list."
leftPoint = euclidean.getLeftPoint(loop)
for otherLoop in loopList:
if euclidean.getNumberOfIntersectionsToLeft(otherLoop, leftPoint) % 2 == 1:
return True
return euclidean.isLoopIntersectingLoops(loop, loopList)
def getNewRepository():
'Get new repository.'
return InsetRepository()
def getOverhangDirection( belowOutsetLoops, segmentBegin, segmentEnd ):
'Add to span direction from the endpoint segments which overhang the layer below.'
segment = segmentEnd - segmentBegin
normalizedSegment = euclidean.getNormalized( complex( segment.real, segment.imag ) )
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
segmentBegin = segmentYMirror * segmentBegin
segmentEnd = segmentYMirror * segmentEnd
solidXIntersectionList = []
y = segmentBegin.imag
solidXIntersectionList.append( euclidean.XIntersectionIndex( - 1.0, segmentBegin.real ) )
solidXIntersectionList.append( euclidean.XIntersectionIndex( - 1.0, segmentEnd.real ) )
for belowLoopIndex in xrange( len( belowOutsetLoops ) ):
belowLoop = belowOutsetLoops[ belowLoopIndex ]
rotatedOutset = euclidean.getRotatedComplexes( segmentYMirror, belowLoop )
euclidean.addXIntersectionIndexesFromLoopY( rotatedOutset, belowLoopIndex, solidXIntersectionList, y )
overhangingSegments = euclidean.getSegmentsFromXIntersectionIndexes( solidXIntersectionList, y )
overhangDirection = complex()
for overhangingSegment in overhangingSegments:
overhangDirection += getDoubledRoundZ( overhangingSegment, normalizedSegment )
return overhangDirection
def getSegmentsFromLoopListsPoints( loopLists, pointBegin, pointEnd ):
"Get endpoint segments from the beginning and end of a line segment."
normalizedSegment = pointEnd - pointBegin
normalizedSegmentLength = abs( normalizedSegment )
if normalizedSegmentLength == 0.0:
return []
normalizedSegment /= normalizedSegmentLength
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
rotatedLoopLists = []
for loopList in loopLists:
rotatedLoopLists.append(euclidean.getRotatedComplexLists(segmentYMirror, loopList))
xIntersectionIndexList = []
xIntersectionIndexList.append( euclidean.XIntersectionIndex( - 1, pointBeginRotated.real ) )
xIntersectionIndexList.append( euclidean.XIntersectionIndex( - 1, pointEndRotated.real ) )
euclidean.addXIntersectionIndexesFromLoopListsY( rotatedLoopLists, xIntersectionIndexList, pointBeginRotated.imag )
segments = euclidean.getSegmentsFromXIntersectionIndexes( xIntersectionIndexList, pointBeginRotated.imag )
for segment in segments:
for endpoint in segment:
endpoint.point *= normalizedSegment
return segments
def isCloseToLast( paths, point, radius ):
"Determine if the point is close to the last point of the last path."
if len(paths) < 1:
return False
lastPath = paths[-1]
return abs( lastPath[-1] - point ) < radius
def isIntersectingItself( loop, width ):
"Determine if the loop is intersecting itself."
outlines = []
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if euclidean.isLineIntersectingLoops( outlines, pointBegin, pointEnd ):
return True
addSegmentOutline( False, outlines, pointBegin, pointEnd, width )
return False
def isIntersectingWithinLists( loop, loopLists ):
"Determine if the loop is intersecting or is within the loop lists."
for loopList in loopLists:
if getIsIntersectingWithinList( loop, loopList ):
return True
return False
def writeOutput(fileName, shouldAnalyze=True):
"Inset the carving of a gcode file."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'inset', shouldAnalyze)
class InsetRepository:
"A class to handle the inset settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.inset.html', self)
self.baseNameSynonymDictionary = {
'Infill in Direction of Bridge' : 'carve.csv',
'Infill Width over Thickness (ratio):' : 'fill.csv'}
self.fileNameInput = settings.FileNameInput().getFromFileName(fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Inset', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Inset')
self.addCustomCodeForTemperatureReading = settings.BooleanSetting().getFromValue('Add Custom Code for Temperature Reading', self, True)
self.infillInDirectionOfBridge = settings.BooleanSetting().getFromValue('Infill in Direction of Bridge', self, True)
self.infillWidthOverThickness = settings.FloatSpin().getFromValue(1.3, 'Infill Width over Thickness (ratio):', self, 1.7, 1.5)
self.loopOrderChoice = settings.MenuButtonDisplay().getFromName('Loop Order Choice:', self )
self.loopOrderAscendingArea = settings.MenuRadio().getFromMenuButtonDisplay(self.loopOrderChoice, 'Ascending Area', self, True)
self.loopOrderDescendingArea = settings.MenuRadio().getFromMenuButtonDisplay(self.loopOrderChoice, 'Descending Area', self, False)
self.overlapRemovalWidthOverEdgeWidth = settings.FloatSpin().getFromValue(0.3, 'Overlap Removal Width over Perimeter Width (ratio):', self, 0.9, 0.6)
self.turnExtruderHeaterOffAtShutDown = settings.BooleanSetting().getFromValue('Turn Extruder Heater Off at Shut Down', self, True)
self.volumeFraction = settings.FloatSpin().getFromValue(0.7, 'Volume Fraction (ratio):', self, 1.0, 0.93)
self.executeTitle = 'Inset'
def execute(self):
"Inset button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class InsetSkein:
"A class to inset a skein of extrusions."
def __init__(self):
'Initialize.'
self.belowLoops = []
self.boundary = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.loopLayer = None
def addGcodeFromPerimeterPaths(self, isIntersectingSelf, loop, loopLayer, loopLists, radius):
"Add the edge paths to the output."
segments = []
outlines = []
thickOutlines = []
allLoopLists = loopLists[:] + [thickOutlines]
aroundLists = loopLists
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if isIntersectingSelf:
if euclidean.isLineIntersectingLoops(outlines, pointBegin, pointEnd):
segments += getSegmentsFromLoopListsPoints(allLoopLists, pointBegin, pointEnd)
else:
segments += getSegmentsFromLoopListsPoints(loopLists, pointBegin, pointEnd)
addSegmentOutline(False, outlines, pointBegin, pointEnd, self.overlapRemovalWidth)
addSegmentOutline(True, thickOutlines, pointBegin, pointEnd, self.overlapRemovalWidth)
else:
segments += getSegmentsFromLoopListsPoints(loopLists, pointBegin, pointEnd)
edgePaths = []
path = []
muchSmallerThanRadius = 0.1 * radius
segments = getInteriorSegments(loopLayer.loops, segments)
for segment in segments:
pointBegin = segment[0].point
if not isCloseToLast(edgePaths, pointBegin, muchSmallerThanRadius):
path = [pointBegin]
edgePaths.append(path)
path.append(segment[1].point)
if len(edgePaths) > 1:
firstPath = edgePaths[0]
lastPath = edgePaths[-1]
if abs(lastPath[-1] - firstPath[0]) < 0.1 * muchSmallerThanRadius:
connectedBeginning = lastPath[: -1] + firstPath
edgePaths[0] = connectedBeginning
edgePaths.remove(lastPath)
muchGreaterThanRadius = 6.0 * radius
for edgePath in edgePaths:
if euclidean.getPathLength(edgePath) > muchGreaterThanRadius:
self.distanceFeedRate.addGcodeFromThreadZ(edgePath, loopLayer.z)
def addGcodeFromRemainingLoop(self, loop, loopLayer, loopLists, radius):
"Add the remainder of the loop which does not overlap the alreadyFilledArounds loops."
centerOutset = intercircle.getLargestCenterOutsetLoopFromLoopRegardless(loop, radius)
euclidean.addNestedRingBeginning(self.distanceFeedRate, centerOutset.outset, loopLayer.z)
self.addGcodePerimeterBlockFromRemainingLoop(centerOutset.center, loopLayer, loopLists, radius)
self.distanceFeedRate.addLine('(</boundaryPerimeter>)')
self.distanceFeedRate.addLine('(</nestedRing>)')
def addGcodePerimeterBlockFromRemainingLoop(self, loop, loopLayer, loopLists, radius):
"Add the perimter block remainder of the loop which does not overlap the alreadyFilledArounds loops."
if self.repository.overlapRemovalWidthOverEdgeWidth.value < 0.2:
self.distanceFeedRate.addPerimeterBlock(loop, loopLayer.z)
return
isIntersectingSelf = isIntersectingItself(loop, self.overlapRemovalWidth)
if isIntersectingWithinLists(loop, loopLists) or isIntersectingSelf:
self.addGcodeFromPerimeterPaths(isIntersectingSelf, loop, loopLayer, loopLists, radius)
else:
self.distanceFeedRate.addPerimeterBlock(loop, loopLayer.z)
addAlreadyFilledArounds(loopLists, loop, self.overlapRemovalWidth)
def addInitializationToOutput(self):
"Add initialization gcode to the output."
if self.repository.addCustomCodeForTemperatureReading.value:
self.distanceFeedRate.addLine('M105') # Custom code for temperature reading.
def addInset(self, loopLayer):
"Add inset to the layer."
alreadyFilledArounds = []
extrudateLoops = intercircle.getInsetLoopsFromLoops(loopLayer.loops, self.halfEdgeWidth)
if self.repository.infillInDirectionOfBridge.value:
bridgeRotation = getBridgeDirection(self.belowLoops, extrudateLoops, self.halfEdgeWidth)
if bridgeRotation != None:
self.distanceFeedRate.addTagBracketedLine('bridgeRotation', bridgeRotation)
self.belowLoops = loopLayer.loops
triangle_mesh.sortLoopsInOrderOfArea(not self.repository.loopOrderAscendingArea.value, extrudateLoops)
for extrudateLoop in extrudateLoops:
self.addGcodeFromRemainingLoop(extrudateLoop, loopLayer, alreadyFilledArounds, self.halfEdgeWidth)
def getCraftedGcode(self, gcodeText, repository):
"Parse gcode text and store the bevel gcode."
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(<decimalPlacesCarried>':
self.addInitializationToOutput()
elif firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('inset')
return
elif firstWord == '(<layerHeight>':
layerHeight = float(splitLine[1])
self.infillWidth = self.repository.infillWidthOverThickness.value * layerHeight
self.distanceFeedRate.addTagRoundedLine('infillWidth', self.infillWidth)
self.distanceFeedRate.addTagRoundedLine('volumeFraction', self.repository.volumeFraction.value)
elif firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
self.halfEdgeWidth = 0.5 * self.edgeWidth
self.overlapRemovalWidth = self.edgeWidth * self.repository.overlapRemovalWidthOverEdgeWidth.value
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the inset skein."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine(None, splitLine)
self.boundary.append(location.dropAxis())
elif firstWord == '(</crafting>)':
self.distanceFeedRate.addLine(line)
if self.repository.turnExtruderHeaterOffAtShutDown.value:
self.distanceFeedRate.addLine('M104 S0') # Turn extruder heater off.
return
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('inset')
self.loopLayer = euclidean.LoopLayer(float(splitLine[1]))
self.distanceFeedRate.addLine(line)
elif firstWord == '(</layer>)':
self.addInset(self.loopLayer)
self.loopLayer = None
elif firstWord == '(<nestedRing>)':
self.boundary = []
self.loopLayer.loops.append(self.boundary)
if self.loopLayer == None:
self.distanceFeedRate.addLine(line)
def main():
"Display the inset dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| agpl-3.0 | 3,616,575,776,640,460,000 | 44.774059 | 409 | 0.782038 | false | 3.41875 | false | false | false |
wking/swc-amy | workshops/migrations/0054_self_organized_host.py | 1 | 1799 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import django
from django.db import models, migrations
from django.db.models import Q
def add_self_organized_host(apps, schema_editor):
"""Make new host: self-organized."""
Host = apps.get_model('workshops', 'Host')
Host.objects.create(domain='self-organized', fullname='self-organized',
country='W3')
def update_administrator_to_self_organized(apps, schema_editor):
"""Find all events that were self-organized and set administrator for them
to be "self-organized"."""
Host = apps.get_model('workshops', 'Host')
self_org = Host.objects.get(fullname='self-organized')
Event = apps.get_model('workshops', 'Event')
Event.objects.filter(administrator__isnull=True) \
.filter(
Q(invoice_status='na-self-org') |
Q(notes__contains='self-organized') |
Q(notes__contains='self organized')
) \
.update(administrator=self_org)
class Migration(migrations.Migration):
dependencies = [
('workshops', '0053_merge'),
]
operations = [
# some missing migration, totally healthy (changes only validators for the field)
migrations.AlterField(
model_name='event',
name='url',
field=models.CharField(validators=[django.core.validators.RegexValidator(re.compile('https?://github\\.com/(?P<name>[^/]+)/(?P<repo>[^/]+)/?', 32), inverse_match=True)], unique=True, max_length=100, help_text='Setting this and startdate "publishes" the event.<br />Use link to the event\'s website.', blank=True, null=True),
),
migrations.RunPython(add_self_organized_host),
migrations.RunPython(update_administrator_to_self_organized),
]
| mit | 1,508,810,104,354,600,000 | 35.714286 | 336 | 0.645359 | false | 3.755741 | false | false | false |
jackuess/listmodel | listmodel/models.py | 1 | 6658 | import re
try:
import ujson as json
except ImportError:
import json
try:
import jsonpath_rw
except ImportError:
jsonpath_rw = None
try:
import lxml.etree
except ImportError:
lxml = None
try:
import yaml
except ImportError:
yaml = None
class QueryAttr(object):
def __init__(self, query, factory=None):
self.query = query
self.factory = factory
def __get__(self, obj, cls):
if obj:
return self.create(obj, obj.__document__.execute_query(self.query))
else:
return self
def __call__(self, func):
self.create = func
return self
def create(self, obj, value):
if self.factory:
return self.factory(value)
else:
return value
class CsvRow(object):
class DocumentProxy(object):
def __init__(self, row, header_map):
self.row = row
self.header_map = header_map
def execute_query(self, column):
if isinstance(column, int):
return self.row[column]
else:
assert self.header_map
return self.row[self.header_map[column]]
def __init__(self, docproxy):
self.__document__ = docproxy
@classmethod
def fromfile(cls, file, separator=",", read_header=False):
if read_header:
row = next(file)
cols = row.strip().split(separator)
header_map = {col: pos for pos, col in enumerate(cols)}
else:
header_map = None
for row in file:
yield cls(cls.DocumentProxy(row.rstrip().split(separator),
header_map))
class XMLDoc(object):
class DocumentProxy(object):
@classmethod
def create_parser(cls):
return lxml.etree.XMLParser()
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, file):
cls.assert_lxml()
return cls(lxml.etree.parse(file, cls.create_parser()))
@classmethod
def fromstring(cls, str):
cls.assert_lxml()
return cls(lxml.etree.fromstring(str, cls.create_parser()))
@classmethod
def assert_lxml(cls):
assert lxml, "'lxml' module required"
def execute_query(self, xpath):
# if xpath.startswith("//"):
# xpath = ".{}".format(xpath)
nodes = self.doc.xpath(xpath)
if nodes:
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def set_iterables(self, query):
self.iterables = iter(self.doc.xpath(query))
def get_next_iterable(self):
return next(self.iterables)
def __init__(self, docproxy):
self.__document__ = docproxy
@classmethod
def fromfile(cls, file):
return cls(docproxy=cls.DocumentProxy.fromfile(file))
@classmethod
def fromstring(cls, str):
return cls(docproxy=cls.DocumentProxy.fromstring(str))
def __iter__(self):
self.__document__.set_iterables(self.Iterable.__query__)
return self
def __next__(self):
iterable = self.__document__.get_next_iterable()
return self.Iterable(self.DocumentProxy(iterable))
next = __next__ # Python 2 compatibility
def __repr__(self):
cls = self.__class__
query_attributes = ["{}={!r}".format(attr, getattr(self, attr))
for attr in dir(cls)
if isinstance(getattr(cls, attr), QueryAttr)]
return "<{class_name} ({query_attributes})>".format(
class_name=cls.__name__,
query_attributes=", ".join(query_attributes)
)
class HTMLDoc(XMLDoc):
class DocumentProxy(XMLDoc.DocumentProxy):
@classmethod
def create_parser(cls):
return lxml.etree.HTMLParser()
class JSONDoc(XMLDoc):
class DocumentProxy(object):
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, file):
return cls(json.load(file))
@classmethod
def fromstring(cls, str):
return cls(json.loads(str))
def execute_query(self, json_path):
assert jsonpath_rw, "'jsonpath_rw' module required"
path_expr = jsonpath_rw.parse(json_path)
values = [match.value for match in path_expr.find(self.doc)]
if values:
if len(values) > 1:
return values
else:
return values[0]
def set_iterables(self, query):
self.iterables = iter(self.execute_query(query))
def get_next_iterable(self):
return next(self.iterables)
class YAMLDoc(JSONDoc):
class DocumentProxy(JSONDoc.DocumentProxy):
@classmethod
def fromfile(cls, file):
assert yaml, "'yaml' module required"
return cls(yaml.load(file))
@classmethod
def fromstring(cls, string):
return cls.fromfile(string)
class TextDoc(XMLDoc):
class DocumentProxy(object):
def __init__(self, doc):
self.doc = doc
@classmethod
def fromfile(cls, doc):
return cls(doc.read())
@classmethod
def fromstring(cls, doc):
return cls(doc)
def execute_query(self, regexp):
def groupdict_or_groups(match):
groupdict = match.groupdict()
if groupdict:
return match.groupdict()
return match.groups()
matches = list(re.finditer(regexp, self.doc, re.DOTALL))
if matches:
if len(matches) == 1:
return first_or_all(groupdict_or_groups(matches[0]))
else:
return map(first_or_all, [groupdict_or_groups(match)
for match in matches])
def set_iterables(self, regexp):
self.iterables = re.finditer(regexp, self.doc, re.DOTALL)
def get_next_iterable(self):
next_match = next(self.iterables)
try:
return next_match.group(1)
except IndexError:
return next_match.group(0)
def first_or_all(subject):
if len(subject) == 1:
return subject[0]
return subject
def set_name(name):
def decorator(decorated):
decorated.__name__ = name
return decorated
return decorator
| lgpl-3.0 | -9,084,589,181,227,222,000 | 26.399177 | 79 | 0.542055 | false | 4.289948 | false | false | false |
johnwilmes/py-data-structures | py_data_structures/trie.py | 1 | 8045 | """A simple trie, or prefix tree, data structure."""
import itertools
import collections.abc
class Trie(collections.abc.MutableSet):
"""A simple prefix tree data structure.
A Trie is data structure for storing sequences of "names," which can be
aribtrary hashable objects. In the prototypical trie, names are characters
from an alphabet, and the trie is used to store words (see the subclass
StringTrie). The Trie is implemented internally as a tree, each node of
which is a Trie.Node object.
Args:
contents (optional): a collection of sequences of names to initially
populate the Trie
"""
class Node(object):
"""A node of a Trie object.
An instance represents a single node of a trie, corresponding a
specific prefix sequence of names, which may or may not be a complete
sequence. All attributes must be maintained by the user (Trie).
Attributes:
children (dict): mapping from names to child Nodes
terminal (bool): True if a complete sequence ends here,
False otherwise
size (int): the number of complete sequences for which this is a
prefix
"""
def __init__(self):
self.children = dict()
self.terminal = False
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
"""Iterate over complete suffixes from `self`."""
if self.terminal:
yield iter(())
for name, child in self.children.items():
for suffix in child:
yield itertools.chain((name,), suffix)
def __contains__(self, seq):
"""Check if `seq` is a complete suffix from `self`
Returns:
True if `seq` is a valid suffix of `self, False otherwise.
"""
node = self
for name in seq:
if name not in node.children:
return False
node = node.children[name]
return node.terminal
class View(collections.abc.Set):
"""A view of a sub-trie of a Trie object.
This class allows accessing (but not modifying) the sequences in the
Trie completing a given prefix.
Args:
trie_root: the root node of the original Trie object of which this
is a sub-trie
prefix: the sequence of names prefixing everything in this
sub-trie, corresponding to the path from the root of the
original Trie to this sub-trie
"""
def __init__(self, trie_root, prefix):
self.prefix = prefix
self._trie_root = trie_root
# The root node of this sub-trie, corresponding to prefix. It will
# be found when needed
self._prefix_root = None
def _validate_root(self):
"""Ensure that `self._prefix_root` is valid for `self._trie_root`
and `self.prefix`.
If the entire sub-Trie at `self._prefix_root` is removed, then
`self._prefix_root` will no longer be a descendant of
`self._trie_root`. If a sequence with prefix `self.prefix` is
added back into the Trie, it will use a new Trie.Node in place of
self._prefix_root. We need to find that node and use it in place of
self._prefix_root.
"""
root = self._prefix_root
# check if root is still okay
if root is not None and (root.children or root.terminal):
return # everything is still okay
# self._root is invalid; check for a replacement node
self._prefix_root = None
node = self._trie_root
for name in self.prefix:
if name not in node.children:
return
node = node.children[name]
self._prefix_root = node
def __iter__(self):
self._validate_root()
if self._prefix_root is None:
return
for suffix in self._prefix_root:
yield itertools.chain(self.prefix, suffix)
def __len__(self):
self._validate_root()
if self._prefix_root is not None:
return self._prefix_root.size
return 0
def __contains__(self, seq):
self._validate_root()
if self._prefix_root is None:
return False
seq = iter(seq)
for name in self.prefix:
if name != next(seq):
return False
return seq in self._prefix_root
def __init__(self, contents=None):
self._root = self.Node() # root node corresponding to empty prefix
if contents is not None:
for seq in contents:
self.add(seq)
def __len__(self):
return self._root.size
def __iter__(self):
"""Iterate over complete suffixes from `self`."""
return iter(self._root)
def __contains__(self, seq):
"""Check if `seq` is a complete sequence in the Trie.
Returns:
True if `seq` is a valid suffix of `self, False otherwise.
"""
return seq in self._root
def add(self, seq):
"""Insert a sequence into the Trie.
After insertion, `seq` will be a valid suffix of `self`.
Args:
seq: an iterable of names to be inserted"""
parent_stack = list()
node = self._root
for name in seq:
parent_stack.append(node)
if name not in node.children:
node.children[name] = self.Node()
node = node.children[name]
if node.terminal:
return
node.terminal = True
node.size += 1
while parent_stack:
parent_stack.pop().size += 1
def discard(self, seq):
"""Remove `seq` from the Trie.
Prunes the trie to remove all prefixes for which `seq` is the only
valid completion
Args:
seq: an iterable of names to be removed
"""
parent_stack = list()
node = self._root
# Traverse to node representing `seq`
for name in seq:
parent_stack.append((node, name))
if name not in node.children:
return
node = node.children[name]
if not node.terminal:
return
node.terminal = False
descendents = node.children
while parent_stack and not descendents:
node, child_name = parent_stack.pop()
del node.children[child_name]
descendents = node.children
def __getitem__(self, prefix):
"""Get a view of the Trie corresponding to `prefix`.
`prefix` does not necessarily need to currently be in Trie. This view
will be dynamically updated as sequences are added or removed from
`self`.
Args:
prefix: a container (not a single-use iterator) with the sequence
of names identifying the sub-Trie to be viewed.
"""
if prefix is iter(prefix):
raise ValueError('prefix must be a container, not an iterator')
return self.View(self._root, prefix)
class StringTrie(Trie):
"""A Trie class specialized for storing strings, rather than arbitrary
sequences of objects."""
class View(Trie.View):
"""A view of a sub-trie of a StringTrie object.
This class specializes the Trie.View class to yield strings as
appropriate, rather than generic iterators.
"""
def __iter__(self):
for word in super().__iter__():
yield ''.join(word)
def __iter__(self):
"""Override the default iterator to yield strings instead of
iterators"""
for word in super().__iter__():
yield ''.join(word)
| mit | 6,371,751,390,622,885,000 | 33.978261 | 79 | 0.558484 | false | 4.612959 | false | false | false |
alex/changes | changes/jobs/create_job.py | 1 | 1548 | from flask import current_app
from changes.backends.base import UnrecoverableException
from changes.config import db
from changes.constants import Status, Result
from changes.jobs.sync_job import sync_job
from changes.models import Job, JobPlan
from changes.queue.task import tracked_task
def abort_create(task):
job = Job.query.get(task.kwargs['job_id'])
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.commit()
current_app.logger.exception('Unrecoverable exception creating job %s', job.id)
@tracked_task(on_abort=abort_create, max_retries=10)
def create_job(job_id):
job = Job.query.get(job_id)
if not job:
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.failed
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.aborted
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
| apache-2.0 | -5,937,665,670,571,321,000 | 29.352941 | 83 | 0.684755 | false | 3.642353 | false | false | false |
willprice/python-omxplayer-wrapper | omxplayer/player.py | 1 | 27179 | import subprocess
import time
import os
import signal
import logging
import threading
import atexit
import sys
try: # python 3
from pathlib import Path
except ImportError: # python2
from pathlib2 import Path
from decorator import decorator
from dbus import DBusException, Int64, String, ObjectPath
import dbus.types
from omxplayer.bus_finder import BusFinder
from omxplayer.dbus_connection import DBusConnection, \
DBusConnectionError
from evento import Event
# CONSTANTS
RETRY_DELAY = 0.05
# FILE GLOBAL OBJECTS
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def _check_player_is_active(fn):
# wraps is a decorator that improves debugging wrapped methods
def wrapped(fn, self, *args, **kwargs):
logger.debug('Checking if process is still alive')
# poll determines whether the process has terminated,
# if it hasn't it returns None.
if self._process.poll() is None:
logger.debug('OMXPlayer is running, so execute %s' %
fn.__name__)
return fn(self, *args, **kwargs)
else:
raise OMXPlayerDeadError('Process is no longer alive, can\'t run command')
return decorator(wrapped, fn)
def _from_dbus_type(fn):
def from_dbus_type(dbusVal):
def from_dbus_dict(dbusDict):
d = dict()
for dbusKey, dbusVal in dbusDict.items():
d[from_dbus_type(dbusKey)] = from_dbus_type(dbusVal)
return d
typeUnwrapper = {
dbus.types.Dictionary: from_dbus_dict,
dbus.types.Array: lambda x: list(map(from_dbus_type, x)),
dbus.types.Double: float,
dbus.types.Boolean: bool,
dbus.types.Byte: int,
dbus.types.Int16: int,
dbus.types.Int32: int,
dbus.types.Int64: int,
dbus.types.UInt32: int,
dbus.types.UInt64: int,
dbus.types.ByteArray: str,
dbus.types.ObjectPath: str,
dbus.types.Signature: str,
dbus.types.String: str
}
try:
return typeUnwrapper[type(dbusVal)](dbusVal)
except KeyError:
return dbusVal
def wrapped(fn, self, *args, **kwargs):
return from_dbus_type(fn(self, *args, **kwargs))
return decorator(wrapped, fn)
# CLASSES
class FileNotFoundError(Exception):
pass
class OMXPlayerDeadError(Exception):
pass
class OMXPlayer(object):
"""
OMXPlayer controller
This works by speaking to OMXPlayer over DBus sending messages.
Args:
source (str): Path to the file (as ~/Videos/my-video.mp4) or URL you wish to play
args (list/str): used to pass option parameters to omxplayer. see: https://github.com/popcornmix/omxplayer#synopsis
Multiple argument example:
>>> OMXPlayer('path.mp4', args=['--no-osd', '--no-keys', '-b'])
>>> OMXPlayer('path.mp4', args='--no-osd --no-keys -b')
>>> OMXPlayer('path.mp4', dbus_name='org.mpris.MediaPlayer2.omxplayer2')
"""
def __init__(self, source,
args=None,
bus_address_finder=None,
Connection=None,
dbus_name=None,
pause=False):
logger.debug('Instantiating OMXPlayer')
if args is None:
self.args = []
elif isinstance(args, str):
import shlex
self.args = shlex.split(args)
else:
self.args = list(map(str, args))
self._is_playing = True
self._source = Path(source)
self._dbus_name = dbus_name
self._Connection = Connection if Connection else DBusConnection
self._bus_address_finder = bus_address_finder if bus_address_finder else BusFinder()
#: Event called on pause ``callback(player)``
self.pauseEvent = Event()
#: Event called on play ``callback(player)``
self.playEvent = Event()
#: Event called on stop ``callback(player)``
self.stopEvent = Event()
#: Event called on exit ``callback(player, exit_status)``
self.exitEvent = Event()
#: Event called on seek ``callback(player, relative_position)``
self.seekEvent = Event()
#: Event called on setting position ``callback(player, absolute_position)``
self.positionEvent = Event()
self._process = None
self._connection = None
self.load(source, pause=pause)
def _load_source(self, source):
if self._process:
self.quit()
self._process = self._setup_omxplayer_process(source)
self._rate = 1.0
self._is_muted = False
self._connection = self._setup_dbus_connection(self._Connection, self._bus_address_finder)
def _run_omxplayer(self, source, devnull):
def on_exit(self, exit_status):
logger.info("OMXPlayer process is dead, all DBus calls from here "
"will fail")
self.exitEvent(self, exit_status)
def monitor(self, process, on_exit):
process.wait()
on_exit(self, process.returncode)
try:
source = str(source.resolve())
except AttributeError:
pass
command = ['omxplayer'] + self.args + [source]
if self._dbus_name:
command += ['--dbus_name', self._dbus_name]
logger.debug("Opening omxplayer with the command: %s" % command)
# By running os.setsid in the fork-ed process we create a process group
# which is used to kill the subprocesses the `omxplayer` script
# (it is a bash script itself that calls omxplayer.bin) creates. Without
# doing this we end up in a scenario where we kill the shell script, but not
# the forked children of the shell script.
# See https://pymotw.com/2/subprocess/#process-groups-sessions for examples on this
process = subprocess.Popen(command,
stdin=devnull,
stdout=devnull,
preexec_fn=os.setsid)
try:
self._process_monitor = threading.Thread(target=monitor,
args=(self, process, on_exit))
self._process_monitor.start()
return process
except:
# Make sure to not leave any dangling process on failure
self._terminate_process(process)
raise
def _setup_omxplayer_process(self, source):
logger.debug('Setting up OMXPlayer process')
with open(os.devnull, 'w') as devnull:
process = self._run_omxplayer(source, devnull)
logger.debug('Process opened with PID %s' % process.pid)
atexit.register(self.quit)
return process
def _terminate_process(self, process):
try:
process_group_id = os.getpgid(process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug('SIGTERM Sent to pid: %s' % process_group_id)
except OSError:
logger.error('Could not find the process to kill')
def _setup_dbus_connection(self, Connection, bus_address_finder):
logger.debug('Trying to connect to OMXPlayer via DBus')
tries = 0
while tries < 50:
logger.debug('DBus connect attempt: {}'.format(tries))
try:
connection = Connection(bus_address_finder.get_address(), self._dbus_name)
logger.debug(
'Connected to OMXPlayer at DBus address: %s' % connection)
return connection
except (DBusConnectionError, IOError):
logger.debug('Failed to connect to OMXPlayer DBus address')
tries += 1
time.sleep(RETRY_DELAY)
raise SystemError('DBus cannot connect to the OMXPlayer process')
""" Utilities """
def load(self, source, pause=False):
"""
Loads a new source (as a file) from ``source`` (a file path or URL)
by killing the current ``omxplayer`` process and forking a new one.
Args:
source (string): Path to the file to play or URL
"""
self._source = source
try:
self._load_source(source)
if pause:
time.sleep(0.5) # Wait for the DBus interface to be initialised
self.pause()
except:
# Make sure we do not leave any dangling process
if self._process:
self._terminate_process(self._process)
self._process = None
raise
""" ROOT INTERFACE PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def can_quit(self):
"""
Returns:
bool: whether the player can quit or not """
return self._root_interface_property('CanQuit')
@_check_player_is_active
@_from_dbus_type
def fullscreen(self):
"""
Returns:
bool: whether the player is fullscreen or not """
return self._root_interface_property('Fullscreen')
@_check_player_is_active
@_from_dbus_type
def can_set_fullscreen(self):
"""
Returns:
bool: whether the player can go fullscreen """
return self._root_interface_property('CanSetFullscreen')
@_check_player_is_active
@_from_dbus_type
def can_raise(self):
"""
Returns:
bool: whether the player can raise the display window atop of all other windows"""
return self._root_interface_property('CanRaise')
@_check_player_is_active
@_from_dbus_type
def has_track_list(self):
"""
Returns:
bool: whether the player has a track list or not"""
return self._root_interface_property('HasTrackList')
@_check_player_is_active
@_from_dbus_type
def identity(self):
"""
Returns:
str: Returns `omxplayer`, the name of the player
"""
return self._root_interface_property('Identity')
@_check_player_is_active
@_from_dbus_type
def supported_uri_schemes(self):
"""
Returns:
str: list of supported URI schemes
Examples:
>>> player.supported_uri_schemes()
["file", "http", "rtsp", "rtmp"]
"""
return self._root_interface_property('SupportedUriSchemes')
""" ROOT INTERFACE METHODS """
""" PLAYER INTERFACE PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def can_go_next(self):
"""
Returns:
bool: whether the player can move to the next item in the playlist
"""
return self._player_interface_property('CanGoNext')
@_check_player_is_active
@_from_dbus_type
def can_go_previous(self):
"""
Returns:
bool: whether the player can move to the previous item in the
playlist
"""
return self._player_interface_property('CanGoPrevious')
@_check_player_is_active
@_from_dbus_type
def can_seek(self):
"""
Returns:
bool: whether the player can seek """
return self._player_interface_property('CanSeek')
@_check_player_is_active
@_from_dbus_type
def can_control(self):
"""
Returns:
bool: whether the player can be controlled"""
return self._player_interface_property('CanControl')
@_check_player_is_active
@_from_dbus_type
def can_play(self):
"""
Returns:
bool: whether the player can play"""
return self._player_interface_property('CanPlay')
@_check_player_is_active
@_from_dbus_type
def can_pause(self):
"""
Returns:
bool: whether the player can pause"""
return self._player_interface_property('CanPause')
@_check_player_is_active
@_from_dbus_type
def playback_status(self):
"""
Returns:
str: one of ("Playing" | "Paused" | "Stopped")
"""
return self._player_interface_property('PlaybackStatus')
@_check_player_is_active
@_from_dbus_type
def volume(self):
"""
Returns:
float: current player volume
"""
if self._is_muted:
return 0
return self._player_interface_property('Volume')
@_check_player_is_active
@_from_dbus_type
def set_volume(self, volume):
"""
Args:
float: volume in the interval [0, 10]
"""
# 0 isn't handled correctly so we have to set it to a very small value to achieve the same purpose
if volume == 0:
volume = 1e-10
return self._player_interface_property('Volume', dbus.Double(volume))
@_check_player_is_active
@_from_dbus_type
def _position_us(self):
"""
Returns:
int: position in microseconds
"""
return self._player_interface_property('Position')
def position(self):
"""
Returns:
int: position in seconds
"""
return self._position_us() / (1000.0 * 1000.0)
@_check_player_is_active
@_from_dbus_type
def minimum_rate(self):
"""
Returns:
float: minimum playback rate (as proportion of normal rate)
"""
return self._player_interface_property('MinimumRate')
@_check_player_is_active
@_from_dbus_type
def maximum_rate(self):
"""
Returns:
float: maximum playback rate (as proportion of normal rate)
"""
return self._player_interface_property('MaximumRate')
@_check_player_is_active
@_from_dbus_type
def rate(self):
"""
Returns:
float: playback rate, 1 is the normal rate, 2 would be double speed.
"""
return self._rate
@_check_player_is_active
@_from_dbus_type
def set_rate(self, rate):
"""
Set the playback rate of the video as a multiple of the default playback speed
Examples:
>>> player.set_rate(2)
# Will play twice as fast as normal speed
>>> player.set_rate(0.5)
# Will play half speed
"""
self._rate = self._player_interface_property('Rate', dbus.Double(rate))
return self._rate
@_check_player_is_active
@_from_dbus_type
def metadata(self):
"""
Returns:
dict: containing track information ('URI', 'length')
Examples:
>>> player.metadata()
{
'mpris:length': 19691000,
'xesam:url': 'file:///home/name/path/to/media/file.mp4'
}
"""
return self._player_interface_property('Metadata')
""" PLAYER INTERFACE NON-STANDARD PROPERTIES """
@_check_player_is_active
@_from_dbus_type
def aspect_ratio(self):
"""
Returns:
float: aspect ratio
"""
return self._player_interface_property('Aspect')
@_check_player_is_active
@_from_dbus_type
def video_stream_count(self):
"""
Returns:
int: number of video streams
"""
return self._player_interface_property('VideoStreamCount')
@_check_player_is_active
@_from_dbus_type
def width(self):
"""
Returns:
int: video width in px
"""
return self._player_interface_property('ResWidth')
@_check_player_is_active
@_from_dbus_type
def height(self):
"""
Returns:
int: video height in px
"""
return self._player_interface_property('ResHeight')
@_check_player_is_active
@_from_dbus_type
def _duration_us(self):
"""
Returns:
int: total length in microseconds
"""
return self._player_interface_property('Duration')
@_check_player_is_active
def duration(self):
"""
Returns:
float: duration in seconds
"""
return self._duration_us() / (1000.0 * 1000.0)
""" PLAYER INTERFACE METHODS """
@_check_player_is_active
def pause(self):
"""
Pause playback
"""
self._player_interface.Pause()
self._is_playing = False
self.pauseEvent(self)
@_check_player_is_active
def play_pause(self):
"""
Pause playback if currently playing, otherwise start playing if currently paused.
"""
self._player_interface.PlayPause()
self._is_playing = not self._is_playing
if self._is_playing:
self.playEvent(self)
else:
self.pauseEvent(self)
@_check_player_is_active
@_from_dbus_type
def stop(self):
"""
Stop the player, causing it to quit
"""
self._player_interface.Stop()
self.stopEvent(self)
@_check_player_is_active
@_from_dbus_type
def seek(self, relative_position):
"""
Seek the video by `relative_position` seconds
Args:
relative_position (float): The position in seconds to seek to.
"""
self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))
self.seekEvent(self, relative_position)
@_check_player_is_active
@_from_dbus_type
def set_position(self, position):
"""
Set the video to playback position to `position` seconds from the start of the video
Args:
position (float): The position in seconds.
"""
self._player_interface.SetPosition(ObjectPath("/not/used"), Int64(position * 1000.0 * 1000))
self.positionEvent(self, position)
@_check_player_is_active
@_from_dbus_type
def set_layer(self, layer):
"""
Set the layer of the Video (default 0). Higher layers are above lower layers
Args:
layer (int): The Layer to switch to.
"""
self._player_interface.SetLayer(Int64(layer))
@_check_player_is_active
@_from_dbus_type
def set_alpha(self, alpha):
"""
Set the transparency of the video overlay
Args:
alpha (float): The transparency (0..255)
"""
self._player_interface.SetAlpha(ObjectPath('/not/used'), Int64(alpha))
@_check_player_is_active
def mute(self):
"""
Mute audio. If already muted, then this does not do anything
"""
self._is_muted = True
self._player_interface.Mute()
@_check_player_is_active
def unmute(self):
"""
Unmutes the video. If already unmuted, then this does not do anything
"""
self._is_muted = False
self._player_interface.Unmute()
@_check_player_is_active
@_from_dbus_type
def set_aspect_mode(self, mode):
"""
Set the aspect mode of the video
Args:
mode (str): One of ("letterbox" | "fill" | "stretch")
"""
self._player_interface.SetAspectMode(ObjectPath('/not/used'), String(mode))
@_check_player_is_active
@_from_dbus_type
def set_video_pos(self, x1, y1, x2, y2):
"""
Set the video position on the screen
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
"""
position = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))
@_check_player_is_active
def video_pos(self):
"""
Returns:
(int, int, int, int): Video spatial position (x1, y1, x2, y2) where (x1, y1) is top left,
and (x2, y2) is bottom right. All values in px.
"""
position_string = self._player_interface.VideoPos(ObjectPath('/not/used'))
return list(map(int, position_string.split(" ")))
@_check_player_is_active
@_from_dbus_type
def set_video_crop(self, x1, y1, x2, y2):
"""
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
"""
crop = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.SetVideoCropPos(ObjectPath('/not/used'), String(crop))
@_check_player_is_active
def hide_video(self):
"""
Hides the video overlays
"""
self._player_interface.HideVideo()
@_check_player_is_active
def show_video(self):
"""
Shows the video (to undo a `hide_video`)
"""
self._player_interface.UnHideVideo()
@_check_player_is_active
@_from_dbus_type
def list_audio(self):
"""
Returns:
[str]: A list of all known audio streams, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListAudio()
@_check_player_is_active
@_from_dbus_type
def list_video(self):
"""
Returns:
[str]: A list of all known video streams, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListVideo()
@_check_player_is_active
@_from_dbus_type
def list_subtitles(self):
"""
Returns:
[str]: A list of all known subtitles, each item is in the
format: ``<index>:<language>:<name>:<codec>:<active>``
"""
return self._player_interface.ListSubtitles()
@_check_player_is_active
def select_subtitle(self, index):
"""
Enable a subtitle specified by the index it is listed in :class:`list_subtitles`
Args:
index (int): index of subtitle listing returned by :class:`list_subtitles`
"""
return self._player_interface.SelectSubtitle(dbus.Int32(index))
@_check_player_is_active
def select_audio(self, index):
"""
Select audio stream specified by the index of the stream in :class:`list_audio`
Args:
index (int): index of audio stream returned by :class:`list_audio`
"""
return self._player_interface.SelectAudio(dbus.Int32(index))
@_check_player_is_active
def show_subtitles(self):
"""
Shows subtitles after :class:`hide_subtitles`
"""
return self._player_interface.ShowSubtitles()
@_check_player_is_active
def hide_subtitles(self):
"""
Hide subtitles
"""
return self._player_interface.HideSubtitles()
@_check_player_is_active
@_from_dbus_type
def action(self, code):
"""
Executes a keyboard command via a code
Args:
code (int): The key code you wish to emulate
refer to ``keys.py`` for the possible keys
"""
self._player_interface.Action(code)
@_check_player_is_active
@_from_dbus_type
def is_playing(self):
"""
Returns:
bool: Whether the player is playing
"""
self._is_playing = (self.playback_status() == "Playing")
logger.info("Playing?: %s" % self._is_playing)
return self._is_playing
@_check_player_is_active
@_from_dbus_type
def play_sync(self):
"""
Play the video and block whilst the video is playing
"""
self.play()
logger.info("Playing synchronously")
try:
time.sleep(0.05)
logger.debug("Wait for playing to start")
while self.is_playing():
time.sleep(0.05)
except DBusException:
logger.error(
"Cannot play synchronously any longer as DBus calls timed out."
)
@_check_player_is_active
@_from_dbus_type
def play(self):
"""
Play the video asynchronously returning control immediately to the calling code
"""
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self)
@_check_player_is_active
@_from_dbus_type
def next(self):
"""
Skip to the next chapter
Returns:
bool: Whether the player skipped to the next chapter
"""
return self._player_interface.Next()
@_check_player_is_active
@_from_dbus_type
def previous(self):
"""
Skip to the previous chapter
Returns:
bool: Whether the player skipped to the previous chapter
"""
return self._player_interface.Previous()
@property
def _root_interface(self):
return self._connection.root_interface
@property
def _player_interface(self):
return self._connection.player_interface
@property
def _properties_interface(self):
return self._connection.properties_interface
def _interface_property(self, interface, prop, val):
if val:
return self._properties_interface.Set(interface, prop, val)
else:
return self._properties_interface.Get(interface, prop)
def _root_interface_property(self, prop, val=None):
return self._interface_property(self._root_interface.dbus_interface, prop, val)
def _player_interface_property(self, prop, val=None):
return self._interface_property(self._player_interface.dbus_interface, prop, val)
def quit(self):
"""
Quit the player, blocking until the process has died
"""
if self._process is None:
logger.debug('Quit was called after self._process had already been released')
return
logger.debug('Quitting OMXPlayer')
self._terminate_process(self._process)
self._process_monitor.join()
self._process = None
@_check_player_is_active
@_from_dbus_type
def get_source(self):
"""
Get the source URI of the currently playing media
Returns:
str: source currently playing
"""
return self._source
# For backward compatibility
@_check_player_is_active
@_from_dbus_type
def get_filename(self):
"""
Returns:
str: source currently playing
.. deprecated:: 0.2.0
Use: :func:`get_source` instead.
"""
return self.get_source()
# MediaPlayer2.Player types:
# Track_Id: DBus ID of track
# Plaback_Rate: Multiplier for playback speed (1 = normal speed)
# Volume: 0--1, 0 is muted and 1 is full volume
# Time_In_Us: Time in microseconds
# Playback_Status: Playing|Paused|Stopped
# Loop_Status: None|Track|Playlist
| lgpl-3.0 | 3,440,031,027,014,566,400 | 28.965821 | 124 | 0.570845 | false | 4.119903 | false | false | false |
kyubifire/softlayer-python | SoftLayer/CLI/image/export.py | 1 | 1270 | """Export an image."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@click.argument('uri')
@click.option('--ibm-api-key',
default=None,
help="The IBM Cloud API Key with access to IBM Cloud Object "
"Storage instance. For help creating this key see "
"https://console.bluemix.net/docs/services/cloud-object-"
"storage/iam/users-serviceids.html#serviceidapikeys")
@environment.pass_env
def cli(env, identifier, uri, ibm_api_key):
"""Export an image to object storage.
The URI for an object storage object (.vhd/.iso file) of the format:
swift://<objectStorageAccount>@<cluster>/<container>/<objectPath>
or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud
Object Storage
"""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
result = image_mgr.export_image_to_uri(image_id, uri, ibm_api_key)
if not result:
raise exceptions.CLIAbort("Failed to export Image")
| mit | -1,651,164,738,944,447,200 | 34.277778 | 77 | 0.680315 | false | 3.836858 | false | false | false |
skeletalbassman/pytix | wrappers/trello.py | 1 | 10975 | '''wrapper class for Trello REST API'''
import requests
import yaml
import datetime
BASE = "https://api.trello.com/1/"
class Trello():
def __init__(self, project=None, username=None, password=None):
self._key = None
self._token = None
self._authorize()
if project:
self._board = self.setProject(project)
else:
try:
with open("projects.yaml", "r") as f:
data = f.read()
boards = yaml.load(data)
self._board = boards["trello"]
except IOError:
print "If you have not previously set a Trello board as your current project, you must\nspecify a board name."
board_name = raw_input("Board name: ")
self._board = self.setProject(board_name)
def _authorize(self):
try:
with open("credentials.yaml", "r") as f:
data = f.read()
creds = yaml.load(data)
except IOError:
creds = {}
if not "trello" in creds:
print "Your API key was not found on file."
print "Navigate to the following link to obtain your API key\nand paste it into the terminal below. Make sure you are logged into Trello before following the link."
print "Link: https://trello.com/app-key"
key = raw_input("API key: ")
print "\nNow please follow the link below and click 'Allow'."
print "Copy and paste the resulting token back into the terminal. Pytix will\ncache this key and token for future use. This is a one-time procedure."
print "https://trello.com/1/authorize?expiration=never&scope=read%2Cwrite&name=pytix&key={}&response_type=token".format(key)
token = raw_input("API token: ")
self._key = key
self._token = token
new_creds = {}
new_creds["key"] = key
new_creds["token"] = token
creds["trello"] = new_creds
with open("credentials.yaml", "w") as f:
f.write(yaml.dump(creds))
def _getCreds(self):
with open("credentials.yaml", "r") as f:
data = f.read()
creds = yaml.load(data)
key = creds["trello"]["key"]
token = creds["trello"]["token"]
return key, token
def setProject(self, proj_name):
key, token = self._getCreds()
url = BASE + "members/me?&boards=all&key={0}&token={1}".format(key, token)
response = requests.get(url)
boards = response.json()["boards"]
for board in boards:
print board
if board["name"] == proj_name:
self._board = board["id"]
try:
with open("projects.yaml", "r") as f:
data = f.read()
projs = yaml.load(data)
except IOError:
projs = {}
projs["trello"] = board["id"]
with open("projects.yaml", "w") as f:
f.write(yaml.dump(projs))
return board["id"]
def getProject(self):
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?lists=open&cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
#TODO deal with the response here
#what do we want to show the user about the board?
json = response.json()
lists = json["lists"]
cards = json["cards"]
list_stats = {}
max_length = 0
for item in lists:
cur_length = len(item["name"])
if cur_length > max_length:
max_length = cur_length
list_stats[item["id"]] = {
"name": item["name"],
"no. of cards": 0
}
for card in cards:
list_stats[card["idList"]]["no. of cards"] += 1
left_side = " List Name "
right_side = " No. of Cards ".format("no. of cards")
if len(left_side)-2 > max_length:
max_length = len(left_side)-2
print "\n"+json["name"]
print "\nStatistics:"
print "-"*(19+max_length)
print "|{0:{1}}|{2}|".format(left_side, max_length+2, right_side)
print "-"*(19+max_length)
for key in list_stats:
name = " {} ".format(list_stats[key]["name"])
num = " {} ".format(str(list_stats[key]["no. of cards"]))
print "|{0:{1}}|{2:14}|".format(
name,
max_length+2,
num)
print "-"*(19+max_length)
def getList(self, name):
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
for item in json["lists"]:
if item["name"] == name:
list_id = item["id"]
if list_id:
url = BASE + "lists/{0}?cards=open&key={1}&token={2}".format(list_id, key, token)
response = requests.get(url)
json = response.json()
cards = {}
max_name_len = 0
max_id_len = 0
for card in json["cards"]:
if len(card["name"]) > max_name_len:
max_name_len = len(card["name"])
if len(card["id"]) > max_id_len:
max_id_len = len(card["id"])
cards[card["id"]] = {
"name": card["name"],
"id": card["id"]
}
left_side = " Card Name "
right_side = " Card ID "
if len(left_side)-2 > max_name_len:
max_name_len = len(left_side)-2
if len(right_side)-2 > max_id_len:
max_id_len = len(right_side)-2
print "\n"+json["name"]
print "-"*(7+max_id_len+max_name_len)
print "|{0:{1}}|{2:{3}}|".format(left_side, max_name_len+2, right_side,
max_id_len+2)
print "-"*(7+max_id_len+max_name_len)
for key in cards:
name = " {} ".format(cards[key]["name"])
ID = " {} ".format(cards[key]["id"])
print "|{0:{1}}|{2:{3}}|".format(
name,
max_name_len+2,
ID,
max_id_len+2)
print "-"*(7+max_id_len+max_name_len)
else:
print "List not found. Check your spelling."
def getTask(self, name=None, ID=None):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
if ID:
card_id = ID
else:
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if card_id:
url = BASE + "cards/{0}?actions=commentCard&key={1}&token={2}".format(card_id, key, token)
response = requests.get(url)
json = response.json()
comments = {}
max_name_len = 0
max_text_len = 0
max_date_len = 0
for comment in json["actions"]:
if len(comment["memberCreator"]["username"])-2 > max_name_len:
max_name_len = len(comment["memberCreator"]["username"])
if len(comment["data"]["text"])-2 > max_text_len:
max_text_len = len(comment["data"]["text"])
date = comment["date"].split("T")[0]
if len(date)-2 > max_date_len:
max_date_len = len(date)
comments[comment["id"]] = {
"username": comment["memberCreator"]["username"],
"text": comment["data"]["text"],
"date": date
}
name = json["name"]
name_label = " Username "
text_label = " Comment Text "
date_label = " Date "
if len(name_label)-2 > max_name_len:
max_name_len = len(name_label)-2
if len(text_label)-2 > max_text_len:
max_text_len = len(text_label)-2
print "\n"+name
print "-"*(10+max_text_len+max_name_len+max_date_len)
print "|{0:{1}}|{2:{3}}|{4:{5}}|".format(name_label, max_name_len+2, text_label,
max_text_len+2, date_label, max_date_len+2)
print "-"*(10+max_text_len+max_name_len+max_date_len)
#TODO need to handle comments where overall table width > 80 chars
for key in comments:
name = " {} ".format(comments[key]["username"])
text = " {} ".format(comments[key]["text"])
date = " {} ".format(comments[key]["date"])
print "|{0:{1}}|{2:{3}}|{4:{5}}|".format(
name,
max_name_len+2,
text,
max_text_len+2,
date,
max_date_len+2)
print "-"*(10+max_text_len+max_name_len+max_date_len)
else:
print "Card not found. Check your spelling."
def moveTask(self, name, from_list, to_list):
key, token = self._getCreds()
board = self._board
board_url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(board_url)
json = response.json()
from_id = to_id = None
for item in json["lists"]:
if item["name"] == from_list:
from_id = item["id"]
elif item["name"] == to_list:
to_id = item["id"]
if not from_id:
print "Source board not found."
return None
if not to_id:
print "Destination board not found."
return None
url1 = BASE + "lists/{0}?cards=open&key={1}&token={2}".format(from_id, key, token)
response = requests.get(url1)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}?idList={1}&pos=bottom&key={2}&token={3}".format(card_id, to_id, key, token)
response = requests.put(url)
json = response.json()
print "'{0}' moved to list '{1}'".format(json["name"], to_list)
def addTask(self, name, to_list):
key, token = self._getCreds()
board = self._board
board_url = BASE + "boards/{0}?lists=open&key={1}&token={2}".format(board, key, token)
response = requests.get(board_url)
json = response.json()
to_id = None
for item in json["lists"]:
if item["name"] == to_list:
to_id = item["id"]
if not to_id:
print "Destination list not found."
return None
url = BASE + "cards?name={0}&idList={1}&due=null&key={2}&token={3}".format(name,
to_id, key, token)
response = requests.post(url, data={})
json = response.json()
print "'{0}' added to list '{1}'".format(json["name"], to_list)
def commentTask(self, name, text):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}/actions/comments?key={1}&token={2}".format(card_id, key, token)
data = {
"text": text
}
response = requests.post(url, data=data)
json = response.json()
if text == json["display"]["entities"]["comment"]["text"]:
print "Comment added successfully."
else:
print "There was an error in processing your comment."
def deleteTask(self, name):
if not name and not ID:
print "You must specify either a card name or a card ID."
return None
key, token = self._getCreds()
board = self._board
url = BASE + "boards/{0}?cards=open&key={1}&token={2}".format(board, key, token)
response = requests.get(url)
json = response.json()
card_id = None
for card in json["cards"]:
if card["name"] == name:
card_id = card["id"]
if not card_id:
print "Card not found."
return None
url = BASE + "cards/{0}?key={1}&token={2}".format(card_id, key, token)
response = requests.delete(url, data={})
json = response.json()
if "_value" in json:
if json["_value"] == None:
print "Card deleted successfully."
else:
print "Card could not be deleted."
if __name__ == "__main__":
trello = Trello()
#trello.getList("Current Sprint")
trello.deleteTask("Test Card") | mit | -8,722,405,353,362,999,000 | 31.093567 | 167 | 0.613759 | false | 2.801889 | false | false | false |
enki/muXTCP | scapyLink.py | 1 | 1540 | #!/usr/bin/python
from muxlib.scapy import *
import sys
from twisted.internet import base, fdesc, reactor, protocol
import socket
import iptables
class ScapyLink(base.BasePort):
def __init__(self, interface=None, plusIPs=[]):
base.BasePort.__init__(self, reactor)
self.protocols = []
self.interface = interface
if interface:
self.listenIPs = [get_if_addr(interface)]
self.listenIPs += plusIPs
self.listenOnWire()
def getHandle(self):
return self.socket
def listenOnWire(self):
# self.socket = scapy.L3RawSocket(iface=self.interface, promisc=True, filter='')
self.socket = L2Socket(iface=self.interface)
reactor.addReader(self)
def fileno(self):
return self.socket.ins.fileno()
def doRead(self):
packet = self.socket.recv(MTU)
for protocol in self.protocols:
protocol.packetReceived(packet)
def registerProtocol(self, protocol):
if protocol not in self.protocols:
self.protocols.append(protocol)
# protocol.startProtocol()
else:
raise "Registered Protocol", protocol, "twice"
protocol.setTransport(self)
def unRegisterProtocol(self, protocol):
if protocol in self.protocols:
protocol.setTransport(None)
self.protocols.remove(protocol)
else:
raise "Removed Protocol", protocol, "that isn't registered"
def send(self, packet):
self.socket.send(packet)
| mit | 3,709,210,830,107,390,500 | 27.518519 | 87 | 0.634416 | false | 4.041995 | false | false | false |
eduNEXT/edunext-platform | common/djangoapps/util/course.py | 1 | 2804 | """
Utility methods related to course
"""
import logging
import six
from django.conf import settings
from django.utils.timezone import now
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
COURSE_SHARING_UTM_PARAMETERS = {
'facebook': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'facebook',
},
'twitter': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'twitter',
},
}
def get_encoded_course_sharing_utm_params():
"""
Returns encoded Course Sharing UTM Parameters.
"""
return {
utm_source: six.moves.urllib.parse.urlencode(utm_params)
for utm_source, utm_params in six.iteritems(COURSE_SHARING_UTM_PARAMETERS)
}
def get_link_for_about_page(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns the course sharing url, this can be one of course's social sharing url, marketing url, or
lms course about url.
"""
is_social_sharing_enabled = configuration_helpers.get_value(
'SOCIAL_SHARING_SETTINGS',
getattr(settings, 'SOCIAL_SHARING_SETTINGS', {})
).get('CUSTOM_COURSE_URLS')
if is_social_sharing_enabled and course.social_sharing_url:
course_about_url = course.social_sharing_url
elif settings.FEATURES.get('ENABLE_MKTG_SITE') and getattr(course, 'marketing_url', None):
course_about_url = course.marketing_url
else:
about_base = configuration_helpers.get_value_for_org(
course.id.org,
'LMS_ROOT_URL',
settings.LMS_ROOT_URL
)
course_about_url = u'{about_base_url}/courses/{course_key}/about'.format(
about_base_url=about_base,
course_key=six.text_type(course.id),
)
return course_about_url
def has_certificates_enabled(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns a boolean if the course has enabled certificates
"""
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return False
return course.cert_html_view_enabled
def should_display_grade(course_overview):
"""
Returns True or False depending upon either certificate available date
or course-end-date
"""
course_end_date = course_overview.end_date
cert_available_date = course_overview.certificate_available_date
current_date = now().replace(hour=0, minute=0, second=0, microsecond=0)
if cert_available_date:
return cert_available_date < current_date
return course_end_date and course_end_date < current_date
| agpl-3.0 | 3,148,199,023,560,329,000 | 29.150538 | 101 | 0.662981 | false | 3.77389 | false | false | false |
sugartom/tensorflow-alien | tensorflow/contrib/layers/python/layers/layers.py | 1 | 95215 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'conv2d_in_plane',
'conv2d_transpose',
'convolution',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'dropout',
'flatten',
'fully_connected',
'layer_norm',
'linear',
'pool',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If the rank of `inputs` is neither 2 or 4.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank.'
' Expected 2 or 4 but got %d' % (
inputs.name, original_rank))
if original_rank == 2:
channels = inputs.get_shape()[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, 1, 1, channels]
if data_format == DATA_FORMAT_NCHW:
new_shape = [-1, channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
dtype = inputs.dtype.base_dtype
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
trainable_beta = trainable and center
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable_beta)
trainable_gamma = trainable and scale
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable_gamma)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, original_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `batch_weights` is not None and `fused` is True.
ValueError: If `param_regularizers` is not None and `fused` is True.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
if fused:
if batch_weights is not None:
raise ValueError('Weighted mean and variance is not currently '
'supported for fused batch norm.')
if param_regularizers is not None:
raise ValueError('Regularizers are not currently '
'supported for fused batch norm.')
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=sc.name,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(
layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(
layer.moving_variance, variables_collections, 'moving_variance')
if layer.beta:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma:
_add_variable_to_collections(
layer.gamma, variables_collections, 'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables.
partitioner = variable_scope.get_variable_scope().partitioner
try:
variable_scope.get_variable_scope().set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
finally:
variable_scope.get_variable_scope().set_partitioner(partitioner)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer(),
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the result of adding biases to the inputs.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
num_features = inputs_shape[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs a'trous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
rate: A sequence of N positive integers specifying the dilation rate to use
for a'trous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = layer_class(filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
convolution2d = convolution
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: A list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding type to use, either 'SAME' or 'VALID'.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = convolutional_layers.Convolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: The tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A tensor representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(rate=1 - keep_prob,
noise_shape=noise_shape,
name=sc.name,
_scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A flattened tensor with shape [batch_size, k].
Raises:
ValueError: If inputs rank is unknown or less than 2.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_rank = inputs.get_shape().ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
inputs_shape = array_ops.shape(inputs)
batch_dim = array_ops.slice(inputs_shape, [0], [1])
spatial_dims = array_ops.slice(inputs_shape, [1], [inputs_rank - 1])
flat_spatial_dim = math_ops.reduce_prod(spatial_dims)
flat_spatial_dim = array_ops.expand_dims(flat_spatial_dim, 0)
flat_shape = array_ops.concat([batch_dim, flat_spatial_dim], 0)
outputs = array_ops.reshape(inputs, flat_shape)
# Attempt to propagate shape information, if it is defined.
input_shape = inputs.get_shape().as_list()
batch_dim, spatial_dims = input_shape[0], input_shape[1:]
if all(spatial_dims):
outputs.set_shape([batch_dim,
functools.reduce(lambda x, y: x * y, spatial_dims)])
else:
outputs.set_shape([batch_dim, None])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
new_shape = array_ops.concat((outer_dimensions,
[math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: A `Tensor` or `SparseTensor`.
new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
def _model_variable_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None, trainable=True,
collections=None, caching_device=None,
partitioner=None, rename=None, use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, collections=collections, trainable=trainable,
caching_device=caching_device, partitioner=partitioner,
custom_getter=getter, use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(
collections_set, collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.', num_outputs)
layer_variable_getter = _build_variable_getter({'bias': 'biases',
'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'fully_connected', [inputs],
reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor with 2 or more dimensions. The normalization
occurs over all but the first dimension.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If rank or last dimension of `inputs` is undefined.
"""
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = list(range(1, inputs_rank))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, axis, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1E-12
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.MaxPooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' %
(pooling_type.lower()), [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: Total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
One-hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
A tensor result of applying the layer, repetitions times.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `batch_norm_params` is None,
it adds bias to the result, creating a variable called 'biases', otherwise
it adds a batch normalization layer. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for a'trous convolution. Can be an int if both rates are the same.
If any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'})
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format='channels_last',
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=weights_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias,
variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding,
rate=utils.two_element_tuple(rate))
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
A `Tensor` result of applying the stacked layers.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer(),
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unstack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.stack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv2d_transpose = convolution2d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| apache-2.0 | -7,573,405,888,971,894,000 | 42.89811 | 80 | 0.645266 | false | 4.211562 | false | false | false |
dataflow/DataStage | datastage/dataset/longliving/sword_statement_check.py | 1 | 4734 | import logging
import time
import thread
import urllib2
import sys
import datetime
from django_longliving.base import LonglivingThread
from datastage.dataset import SUBMISSION_QUEUE
from datastage.web.dataset.models import DatasetSubmission
from datastage.web.dataset import openers
from sword2 import Connection, UrlLib2Layer
logger = logging.getLogger(__name__)
# list of all the error states that we can see in the statement that we want
# to be able to react to
ERROR_STATES = [
"http://databank.ox.ac.uk/errors/UnzippingIssue"
]
# NOTE: this thread is resistant to being stopped. A KeyboardInterrupt will
# NOT suffice, it will need to be killed with a "kill <pid>" on the command
# line
class SwordStatementCheckThread(LonglivingThread):
# FIXME: not quite sure how the __init__ function on LonglivingThread,
# so setting this as a class variable for the time being
# this is how long the thread will sleep between requests (in seconds)
throttle = 5
# this is how long the thread will sleep between retrys (in seconds)
retry_delay = 3
# This is how long the thread will sleep between entire batches of updates.
# This is particularly useful if the total number of submissions is quite
# small - it will stop the while True loop just spinning aimlessly most of
# the time. (in seconds)
batch_throttle = 120
# this is how many times the thread will re-try contacting the server if
# it suffers a major exception (i.e. not a sword exception, but something
# network related)
retry_count = 10
# this is the gap between attempts to check a specific item. If the item
# has been checked more recently than this amount of time ago, it will not
# be checked again on the current run. Specified in seconds (here it is
# set to once per day).
check_gap = 86400
def run(self):
# just keep going until the thread is killed
while True:
self._check_all_datasets()
time.sleep(SwordStatementCheckThread.batch_throttle)
def _check_all_datasets(self):
dss = DatasetSubmission.objects.all()
for dataset_submission in dss:
if not self._checkable(dataset_submission):
continue
self._check_dataset(dataset_submission)
def _checkable(self, dataset_submission):
last_checked = dataset_submission.last_accessed
if last_checked is None:
return True
now = datetime.datetime.now()
minimum = datetime.timedelta(0, SwordStatementCheckThread.check_gap)
gap = now - last_checked
return gap > minimum
def _check_dataset(self, dataset_submission):
retry_counter = 0
exception = None
while retry_counter < SwordStatementCheckThread.retry_count:
try:
# logger.info("Checking state of dataset at " + dataset_submission.remote_url)
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
conn = Connection(error_response_raises_exceptions=False, http_impl=UrlLib2Layer(opener))
receipt = conn.get_deposit_receipt(dataset_submission.remote_url)
statement = conn.get_ore_sword_statement(receipt.ore_statement_iri)
for state_uri, state_desc in statement.states:
logger.info("Dataset has state URI: " + state_uri)
if state_uri in ERROR_STATES:
dataset_submission.status = 'error'
logger.info("URI: " + state_uri + " is an error state ... setting 'error' state on submission record")
break
dataset_submission.last_accessed = datetime.datetime.now()
dataset_submission.save()
time.sleep(SwordStatementCheckThread.throttle)
except urllib2.URLError as e:
# if we get an exception, try again up to the limit
logger.info("Got error connecting to the server ... retrying " + str(retry_counter + 1) + " of " + str(SwordStatementCheckThread.retry_count))
retry_counter += 1
exception = e
time.sleep(SwordStatementCheckThread.retry_delay)
continue
else:
# if we don't get an exception, we're done
return
# if we don't return from the else statement above, it means the retries
# all failed, and we have a problem. Raise the last thrown exception.
raise exception
| mit | 5,942,489,631,099,173,000 | 40.165217 | 158 | 0.636671 | false | 4.440901 | false | false | false |
laurent-george/weboob | modules/cmso/web/pages.py | 1 | 3661 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 smurail
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from weboob.browser.pages import HTMLPage, LoggedPage, pagination
from weboob.browser.elements import ListElement, ItemElement, method
from weboob.browser.filters.standard import CleanText, CleanDecimal, Regexp, DateGuesser, Env
from weboob.browser.filters.html import Link
from weboob.capabilities.bank import Account
from ..transaction import Transaction
__all__ = ['LoginPage']
class LoginPage(HTMLPage):
def login(self, username, password):
form = self.get_form('//form[@id="formAuth"]')
form['noPersonne'] = username
form['motDePasse'] = password
form.submit()
class CmsoListElement(ListElement):
item_xpath = '//table[@class="Tb" and tr[1][@class="LnTit"]]/tr[@class="LnA" or @class="LnB"]'
class AccountsPage(LoggedPage, HTMLPage):
@method
class iter_accounts(CmsoListElement):
class item(ItemElement):
klass = Account
obj__history_url = Link('./td[1]/a')
obj_label = CleanText('./td[1]')
obj_id = obj__history_url & Regexp(pattern="indCptSelectionne=(\d+)") | None
obj_balance = CleanDecimal('./td[2]', replace_dots=True)
def validate(self, obj):
if obj.id is None:
obj.id = obj.label.replace(' ', '')
return True
class CmsoTransactionElement(ItemElement):
klass = Transaction
def condition(self):
return len(self.el) >= 5 and not self.el.get('id', '').startswith('libelleLong')
class HistoryPage(LoggedPage, HTMLPage):
def iter_history(self, *args, **kwargs):
if self.doc.xpath('//a[@href="1-situationGlobaleProfessionnel.act"]'):
return self.iter_history_rest_page(*args, **kwargs)
return self.iter_history_first_page(*args, **kwargs)
@method
class iter_history_first_page(CmsoListElement):
class item(CmsoTransactionElement):
def validate(self, obj):
return obj.date >= datetime.date.today().replace(day=1)
def date(selector):
return DateGuesser(CleanText(selector), Env('date_guesser')) | Transaction.Date(selector)
obj_date = date('./td[1]')
obj_vdate = date('./td[2]')
# Each row is followed by a "long labelled" version
obj_raw = Transaction.Raw('./following-sibling::tr[1][starts-with(@id, "libelleLong")]/td[3]')
obj_amount = Transaction.Amount('./td[5]', './td[4]')
@pagination
@method
class iter_history_rest_page(CmsoListElement):
next_page = Link('//span[has-class("Rappel")]/following-sibling::*[1][@href]')
class item(CmsoTransactionElement):
obj_date = Transaction.Date('./td[2]')
obj_vdate = Transaction.Date('./td[1]')
obj_raw = Transaction.Raw('./td[3]')
obj_amount = Transaction.Amount('./td[5]', './td[4]', replace_dots=False)
| agpl-3.0 | 2,973,286,327,672,497,700 | 34.892157 | 106 | 0.643267 | false | 3.754872 | false | false | false |
r0balo/pelisalacarta | python/main-classic/channels/yaske.py | 1 | 68313 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para yaske
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re, sys, urllib, urlparse
from core import config
from core import logger
from core import httptools
from core import scrapertools
from core import servertools
from core import channeltools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters= channeltools.get_channel_parameters('yaske')
fanart_host= parameters['fanart']
thumbnail_host= parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF','0xFF5FDA6D','0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_blod= True, viewcontent='movies',
thumbnail= thumbnail % 'novedades', viewmode = "movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_blod=True,
url= HOST + "/genero/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_blod= True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="gender", thumbnail=thumbnail % 'generos', viewmode = "thumbnails" ))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="language", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar') )
return itemlist
def search(item,texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/%s" % texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST+"/"
elif categoria == 'infantiles':
item.url = HOST+"/custom/?gender=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<li class="item-movies.*?'
patron += '<a class="image-block" href="([^"]+)" title="([^"]+)">'
patron += '<img src="([^"]+).*?'
patron += '<div class="moSinopsis">.*?</b>([^<]+).*?'
patron += '<div class="moYear">.*?</b>([^<]+).*?'
patron += '<ul class="bottombox">.*?<li>(<img.*?)</li>.*?</ul>'
patron += '<div class="quality">([^<]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 20:
url_next_page = item.url
matches = matches [:20]
next_page = 'b'
else:
matches = matches[20:]
next_page = 'a'
patron_next_page = "<a href='([^']+)'>\»\;</a>"
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, year, idiomas, calidad in matches:
patronidiomas = "<img src='[^']+' title='([^']+)'"
matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(idiomas)
idiomas_disponibles = ""
if matchesidiomas:
idiomas_disponibles = "[" + "/".join(matchesidiomas).strip() + "]"
contentTitle = decodeHtmlentities(scrapedtitle.strip())
title = "%s %s [%s]" %(contentTitle, idiomas_disponibles, calidad)
plot = decodeHtmlentities(scrapedplot)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, contentQuality=calidad,
thumbnail=scrapedthumbnail, plot=plot, contentTitle=contentTitle,
infoLabels={"year":year}, text_color = color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color = color3, text_blod=True))
return itemlist
def menu_buscar_contenido(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<select name="'+item.extra+'"(.*?)</select>')
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
adult_mode = config.get_setting("adult_mode")
for scrapedurl,scrapedtitle in matches:
thumbnail = ""
if item.extra == 'gender':
if scrapedtitle in ['Proximos', 'Series', 'Noticia'] or (scrapedtitle == 'Adultos' and adult_mode == "false"):
continue
url = HOST + "/genero/" + scrapedurl
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% scrapedtitle.lower().replace(' ','%20')
else:
url = HOST+"/custom/?"+item.extra+"="+scrapedurl
thumbnail = item.thumbnail
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color = color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot") )
if item.extra in ['gender', 'language']:
return sorted(itemlist, key=lambda i: i.title.lower())
else:
return itemlist
def findvideos(item):
logger.info()
langdict = {}
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertools.find_single_match(data,'<meta name="sinopsis" content="([^"]+)"')
item.plot = decodeHtmlentities(item.plot)
patron = '<tr bgcolor=(.*?)</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
for tr in matches:
try:
url = scrapertools.find_single_match(tr, '<a.*?href="([^"]+)"')
if not url.startswith("http") or "olimpo.link" in url:
continue
title = scrapertools.find_single_match(tr,'<i class="icon-([^"]+)')
server = scrapertools.find_single_match(tr,'"http\://www.google.com[^>]+>([^<]+)')
idioma = scrapertools.find_single_match(tr,
'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/([a-z_]+).png"[^>]+>[^<]*<')
subtitulos = scrapertools.find_single_match(tr,
'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/[^"]+"[^>]+>([^<]*)<')
thumbnail = servertools.guess_server_thumbnail(server) # TODO: esto tarda un mundo, a ver si lo cambiamos
if not thumbnail:
thumbnail = thumbnail_host
if title == 'play':
title = " Ver en %s" % server
elif title == 'download':
title = " Descargar de %s" % server
else:
title = " %s en %s" % (title, server)
sublist = langdict.get(idioma, list())
sublist.append(item.clone(action="play", title=title, url=url, server=server,
thumbnail=thumbnail, folder=False, text_color=color1))
langdict[idioma] = sublist
except:
import traceback
logger.info("Excepcion: "+traceback.format_exc())
# Añadir servidores encontrados, agrupandolos por idioma
lang_trans = {"es_es": "Español:", "la_la": "Latino:", "en_es": "Subtitulado:", "en_en": "Ingles:"}
for k in ["es_es", "la_la", "en_es", "en_en"]:
if k in langdict:
itemlist.append(Item(channel=item.channel, title=lang_trans[k], fanart=item.fanart, folder=False,
text_color=color2, text_blod=True, thumbnail=thumbnail_host))
itemlist.extend(langdict.pop(k))
# Otros idiomas
for k, v in langdict.items():
if subtitulos:
title = "%s/s%:" % (k, subtitulos)
else:
title = "%s:" % k
itemlist.append(Item(channel=item.channel, title=title, fanart=fanart_host, folder=False,
text_color=color2, text_blod=True, thumbnail=thumbnail_host))
itemlist.extend(langdict.pop(k))
# Insertar items "Buscar trailer" y "Añadir a la biblioteca"
if itemlist and item.extra != "library":
title = "%s [%s]" % (item.contentTitle, item.contentQuality)
itemlist.insert(0, item.clone(channel = "trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_library_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info("item.url="+item.url)
itemlist=[]
data = urllib.unquote(item.url)
newdata = scrapertools.find_single_match(data,'olo.gg/s/[a-zA-Z0-9]+.s.(.*?)$')
if newdata:
data = urllib.unquote(newdata)
logger.info("item.url=" + data)
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(data, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(data)
if devuelve:
#logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist
# TODO: Esto es temporal hasta q se modifique scrapertools
def decodeHtmlentities(data):
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {'CupCap;': '\u224d', 'minusdu;': '\u2a2a', 'aring': '\xe5', 'Ubreve;': '\u016c', 'lcedil;': '\u013c',
'Zacute;': '\u0179', 'NotVerticalBar;': '\u2224', 'bbrk;': '\u23b5', 'ThinSpace;': '\u2009',
'nwarhk;': '\u2923', 'rlm;': '\u200f', 'DoubleDownArrow;': '\u21d3', 'RightDownVectorBar;': '\u2955',
'jukcy;': '\u0454', 'frac12;': '\xbd', 'subrarr;': '\u2979', 'rsquo;': '\u2019', 'aacute;': '\xe1',
'Integral;': '\u222b', 'oS;': '\u24c8', 'eqslantgtr;': '\u2a96', 'Uuml': '\xdc', 'piv;': '\u03d6',
'iinfin;': '\u29dc', 'Ubrcy;': '\u040e', 'lhblk;': '\u2584', 'uml': '\xa8', 'backcong;': '\u224c',
'capdot;': '\u2a40', 'harr;': '\u2194', 'lsquor;': '\u201a', 'iscr;': '\U0001d4be', 'Lsh;': '\u21b0',
'Implies;': '\u21d2', 'Oacute': '\xd3', 'reg': '\xae', 'vsupnE;': '\u2acc\ufe00', 'Pcy;': '\u041f',
'nang;': '\u2220\u20d2', 'Kcy;': '\u041a', 'GT': '>', 'eacute;': '\xe9', 'breve;': '\u02d8',
'mfr;': '\U0001d52a', 'bnot;': '\u2310', 'racute;': '\u0155', 'dtrif;': '\u25be', 'cedil': '\xb8',
'gesdotol;': '\u2a84', 'sc;': '\u227b', 'npreceq;': '\u2aaf\u0338', 'NotTildeTilde;': '\u2249',
'nlE;': '\u2266\u0338', 'trianglerighteq;': '\u22b5', 'gfr;': '\U0001d524', 'odblac;': '\u0151',
'wedge;': '\u2227', 'solb;': '\u29c4', 'isinE;': '\u22f9', 'middot;': '\xb7', 'nshortparallel;': '\u2226',
'cudarrr;': '\u2935', 'loarr;': '\u21fd', 'UnderBar;': '_', 'mstpos;': '\u223e', 'Oacute;': '\xd3',
'ltdot;': '\u22d6', 'gacute;': '\u01f5', 'Tcy;': '\u0422', 'Jcy;': '\u0419', 'wr;': '\u2240',
'Amacr;': '\u0100', 'gtrdot;': '\u22d7', 'rarrap;': '\u2975', 'boxtimes;': '\u22a0', 'nearr;': '\u2197',
'ecaron;': '\u011b', 'angmsdad;': '\u29ab', 'ropf;': '\U0001d563', 'uacute;': '\xfa', 'nsucc;': '\u2281',
'nvap;': '\u224d\u20d2', 'udblac;': '\u0171', 'range;': '\u29a5', 'udhar;': '\u296e', 'nwarr;': '\u2196',
'lneq;': '\u2a87', 'Uuml;': '\xdc', 'Tab;': '\t', 'Lmidot;': '\u013f', 'Tfr;': '\U0001d517',
'TScy;': '\u0426', 'nvge;': '\u2265\u20d2', 'mp;': '\u2213', 'gl;': '\u2277', 'YAcy;': '\u042f',
'CenterDot;': '\xb7', 'iopf;': '\U0001d55a', 'varsigma;': '\u03c2', 'lbrack;': '[', 'icy;': '\u0438',
'boxDR;': '\u2554', 'nsubseteq;': '\u2288', 'Ocy;': '\u041e', 'integers;': '\u2124', 'THORN': '\xde',
'cwint;': '\u2231', 'downharpoonright;': '\u21c2', 'capbrcup;': '\u2a49', 'nGtv;': '\u226b\u0338',
'nge;': '\u2271', 'angmsdac;': '\u29aa', 'ropar;': '\u2986', 'boxdl;': '\u2510', 'bigcup;': '\u22c3',
'lsim;': '\u2272', 'gtquest;': '\u2a7c', 'lrhar;': '\u21cb', 'Aring': '\xc5', 'Cap;': '\u22d2',
'twoheadrightarrow;': '\u21a0', 'ngsim;': '\u2275', 'plus;': '+', 'LeftArrowBar;': '\u21e4',
'lesseqqgtr;': '\u2a8b', 'softcy;': '\u044c', 'ne;': '\u2260', 'Agrave': '\xc0', 'SmallCircle;': '\u2218',
'andd;': '\u2a5c', 'LeftArrow;': '\u2190', 'napE;': '\u2a70\u0338', 'iuml': '\xef', 'Lscr;': '\u2112',
'gla;': '\u2aa5', 'yicy;': '\u0457', 'bsime;': '\u22cd', 'gtreqqless;': '\u2a8c', 'female;': '\u2640',
'cupdot;': '\u228d', 'pound': '\xa3', 'yacy;': '\u044f', 'varkappa;': '\u03f0', 'lambda;': '\u03bb',
'circledcirc;': '\u229a', 'circlearrowleft;': '\u21ba', 'Beta;': '\u0392', 'REG': '\xae',
'drbkarow;': '\u2910', 'boxhu;': '\u2534', 'xvee;': '\u22c1', 'boxv;': '\u2502', 'igrave;': '\xec',
'SquareSupersetEqual;': '\u2292', 'Afr;': '\U0001d504', 'lacute;': '\u013a', 'Yacute;': '\xdd',
'xrArr;': '\u27f9', 'mnplus;': '\u2213', 'shchcy;': '\u0449', 'Hopf;': '\u210d', 'ucirc': '\xfb',
'tau;': '\u03c4', 'TSHcy;': '\u040b', 'Icirc': '\xce', 'imath;': '\u0131', 'qprime;': '\u2057',
'uhblk;': '\u2580', 'lbarr;': '\u290c', 'Hstrok;': '\u0126', 'NotLessGreater;': '\u2278',
'vsubne;': '\u228a\ufe00', 'DoubleLeftRightArrow;': '\u21d4', 'larrtl;': '\u21a2',
'LessEqualGreater;': '\u22da', 'boxVl;': '\u2562', 'csupe;': '\u2ad2', 'gesdoto;': '\u2a82',
'lEg;': '\u2a8b', 'zhcy;': '\u0436', 'icirc': '\xee', 'rmoust;': '\u23b1', 'RoundImplies;': '\u2970',
'subE;': '\u2ac5', 'zwj;': '\u200d', 'VerticalLine;': '|', 'ell;': '\u2113', 'larrbfs;': '\u291f',
'OpenCurlyDoubleQuote;': '\u201c', 'Hfr;': '\u210c', 'ddotseq;': '\u2a77', 'orderof;': '\u2134',
'Element;': '\u2208', 'circledast;': '\u229b', 'larrpl;': '\u2939', 'longmapsto;': '\u27fc',
'lessapprox;': '\u2a85', 'nLtv;': '\u226a\u0338', 'ast;': '*', 'DiacriticalTilde;': '\u02dc',
'lrm;': '\u200e', 'imagpart;': '\u2111', 'Ropf;': '\u211d', 'scE;': '\u2ab4', 'deg': '\xb0',
'll;': '\u226a', 'mopf;': '\U0001d55e', 'ograve;': '\xf2', 'notnivc;': '\u22fd', 'prnap;': '\u2ab9',
'CircleDot;': '\u2299', 'blank;': '\u2423', 'NotLeftTriangleEqual;': '\u22ec', 'num;': '#',
'langle;': '\u27e8', 'scaron;': '\u0161', 'subne;': '\u228a', 'prE;': '\u2ab3', 'Tau;': '\u03a4',
'trie;': '\u225c', 'times': '\xd7', 'eg;': '\u2a9a', 'rightharpoonup;': '\u21c0', 'nearhk;': '\u2924',
'pointint;': '\u2a15', 'Pscr;': '\U0001d4ab', 'quot': '"', 'Iacute;': '\xcd', 'dcy;': '\u0434',
'upsi;': '\u03c5', 'MediumSpace;': '\u205f', 'DownLeftVectorBar;': '\u2956', 'supdsub;': '\u2ad8',
'Ccirc;': '\u0108', 'luruhar;': '\u2966', 'LT': '<', 'chcy;': '\u0447', 'lsimg;': '\u2a8f',
'ljcy;': '\u0459', 'complexes;': '\u2102', 'dagger;': '\u2020', 'isinv;': '\u2208', 'PartialD;': '\u2202',
'prod;': '\u220f', 'subplus;': '\u2abf', 'digamma;': '\u03dd', 'Ccedil': '\xc7', 'blacktriangle;': '\u25b4',
'veeeq;': '\u225a', 'lesdotor;': '\u2a83', 'gcy;': '\u0433', 'ntgl;': '\u2279', 'Ouml': '\xd6',
'eparsl;': '\u29e3', 'xsqcup;': '\u2a06', 'glE;': '\u2a92', 'bowtie;': '\u22c8',
'SquareIntersection;': '\u2293', 'RightFloor;': '\u230b', 'Efr;': '\U0001d508',
'DownLeftRightVector;': '\u2950', 'hercon;': '\u22b9', 'ecy;': '\u044d', 'DoubleDot;': '\xa8', 'rcub;': '}',
'asympeq;': '\u224d', 'NotTildeFullEqual;': '\u2247', 'Gg;': '\u22d9', 'gtreqless;': '\u22db',
'Sscr;': '\U0001d4ae', 'cularrp;': '\u293d', 'DoubleUpArrow;': '\u21d1', 'sect': '\xa7', 'map;': '\u21a6',
'Del;': '\u2207', 'ctdot;': '\u22ef', 'Umacr;': '\u016a', 'copf;': '\U0001d554', 'minus;': '\u2212',
'smte;': '\u2aac', 'zfr;': '\U0001d537', 'measuredangle;': '\u2221', 'male;': '\u2642',
'angrtvbd;': '\u299d', 'NestedGreaterGreater;': '\u226b', 'uuml;': '\xfc', 'ograve': '\xf2',
'Alpha;': '\u0391', 'QUOT;': '"', 'timesd;': '\u2a30', 'hyphen;': '\u2010', 'dopf;': '\U0001d555',
'Backslash;': '\u2216', 'utrif;': '\u25b4', 'ntrianglerighteq;': '\u22ed', 'Hat;': '^', 'between;': '\u226c',
'zacute;': '\u017a', 'geqslant;': '\u2a7e', 'elinters;': '\u23e7', 'lvertneqq;': '\u2268\ufe00',
'Yscr;': '\U0001d4b4', 'NotPrecedesEqual;': '\u2aaf\u0338', 'otilde': '\xf5', 'rtriltri;': '\u29ce',
'SucceedsSlantEqual;': '\u227d', 'bsim;': '\u223d', 'dscy;': '\u0455', 'cirmid;': '\u2aef',
'gnapprox;': '\u2a8a', 'uharl;': '\u21bf', 'sqsube;': '\u2291', 'YIcy;': '\u0407', 'forall;': '\u2200',
'ogt;': '\u29c1', 'Vopf;': '\U0001d54d', 'ffllig;': '\ufb04', 'loz;': '\u25ca', 'Atilde;': '\xc3',
'ntlg;': '\u2278', 'vangrt;': '\u299c', 'it;': '\u2062', 'GreaterTilde;': '\u2273', 'rarrhk;': '\u21aa',
'smid;': '\u2223', 'kappa;': '\u03ba', 'Diamond;': '\u22c4', 'ngeq;': '\u2271', 'DownArrowBar;': '\u2913',
'expectation;': '\u2130', 'sup3': '\xb3', 'frasl;': '\u2044', 'Bscr;': '\u212c', 'geqq;': '\u2267',
'lat;': '\u2aab', 'macr;': '\xaf', 'longrightarrow;': '\u27f6', 'Gcirc;': '\u011c', 'Wcirc;': '\u0174',
'horbar;': '\u2015', 'dharr;': '\u21c2', 'DownRightTeeVector;': '\u295f', 'GreaterEqual;': '\u2265',
'rBarr;': '\u290f', 'precsim;': '\u227e', 'iuml;': '\xef', 'ZHcy;': '\u0416', 'vnsub;': '\u2282\u20d2',
'UnderParenthesis;': '\u23dd', 'RuleDelayed;': '\u29f4', 'bull;': '\u2022', 'swArr;': '\u21d9',
'nrtri;': '\u22eb', 'apE;': '\u2a70', 'nLt;': '\u226a\u20d2', 'LeftDownVectorBar;': '\u2959',
'succnapprox;': '\u2aba', 'szlig': '\xdf', 'vcy;': '\u0432', 'wcirc;': '\u0175', 'utri;': '\u25b5',
'Zeta;': '\u0396', 'Hcirc;': '\u0124', 'NotRightTriangle;': '\u22eb', 'NotGreaterEqual;': '\u2271',
'larrb;': '\u21e4', 'ecolon;': '\u2255', 'ascr;': '\U0001d4b6', 'RightUpVectorBar;': '\u2954',
'divide': '\xf7', 'npolint;': '\u2a14', 'nexist;': '\u2204', 'plusb;': '\u229e', 'boxvl;': '\u2524',
'searhk;': '\u2925', 'oror;': '\u2a56', 'tdot;': '\u20db', 'bigotimes;': '\u2a02', 'phone;': '\u260e',
'Gscr;': '\U0001d4a2', 'bumpe;': '\u224f', 'ang;': '\u2220', 'ltquest;': '\u2a7b',
'rightharpoondown;': '\u21c1', 'rdca;': '\u2937', 'cross;': '\u2717', 'Kopf;': '\U0001d542',
'IEcy;': '\u0415', 'leq;': '\u2264', 'rarrw;': '\u219d', 'rcy;': '\u0440', 'Mu;': '\u039c',
'nopf;': '\U0001d55f', 'Aopf;': '\U0001d538', 'CloseCurlyDoubleQuote;': '\u201d', 'lbrace;': '{',
'triangleq;': '\u225c', 'curlyeqprec;': '\u22de', 'LeftDownTeeVector;': '\u2961', 'subset;': '\u2282',
'xscr;': '\U0001d4cd', 'brvbar;': '\xa6', 'nles;': '\u2a7d\u0338', 'circeq;': '\u2257', 'boxVH;': '\u256c',
'lE;': '\u2266', 'zeta;': '\u03b6', 'congdot;': '\u2a6d', 'emsp13;': '\u2004', 'uogon;': '\u0173',
'xcap;': '\u22c2', 'eta;': '\u03b7', 'lAarr;': '\u21da', 'thicksim;': '\u223c', 'boxDl;': '\u2556',
'rmoustache;': '\u23b1', 'Sopf;': '\U0001d54a', 'uarr;': '\u2191', 'Otimes;': '\u2a37', 'boxvH;': '\u256a',
'lparlt;': '\u2993', 'nsime;': '\u2244', 'sqcaps;': '\u2293\ufe00', 'SquareUnion;': '\u2294',
'Rsh;': '\u21b1', 'Zcy;': '\u0417', 'ycirc;': '\u0177', 'rbrkslu;': '\u2990', 'Proportional;': '\u221d',
'Sup;': '\u22d1', 'curlyvee;': '\u22ce', 'rceil;': '\u2309', 'Xfr;': '\U0001d51b', 'minusd;': '\u2238',
'angmsdab;': '\u29a9', 'DiacriticalDoubleAcute;': '\u02dd', 'par;': '\u2225', 'lpar;': '(', 'lcy;': '\u043b',
'Nu;': '\u039d', 'euml;': '\xeb', 'CircleMinus;': '\u2296', 'lfloor;': '\u230a', 'Rightarrow;': '\u21d2',
'rect;': '\u25ad', 'dzigrarr;': '\u27ff', 'tcy;': '\u0442', 'vartheta;': '\u03d1', 'Idot;': '\u0130',
'Lleftarrow;': '\u21da', 'GT;': '>', 'emsp14;': '\u2005', 'vert;': '|', 'boxHu;': '\u2567',
'Rarrtl;': '\u2916', 'nprcue;': '\u22e0', 'para': '\xb6', 'nsucceq;': '\u2ab0\u0338', 'nhArr;': '\u21ce',
'ClockwiseContourIntegral;': '\u2232', 'Downarrow;': '\u21d3', 'Otilde': '\xd5', 'umacr;': '\u016b',
'varsubsetneq;': '\u228a\ufe00', 'cup;': '\u222a', 'longleftrightarrow;': '\u27f7', 'gg;': '\u226b',
'Barv;': '\u2ae7', 'Map;': '\u2905', 'Im;': '\u2111', 'ltcir;': '\u2a79', 'gdot;': '\u0121',
'Cayleys;': '\u212d', 'timesbar;': '\u2a31', 'Gdot;': '\u0120', 'Ucirc': '\xdb', 'bigvee;': '\u22c1',
'QUOT': '"', 'lang;': '\u27e8', 'Yfr;': '\U0001d51c', 'Larr;': '\u219e', 'leg;': '\u22da', 'cuesc;': '\u22df',
'rArr;': '\u21d2', 'mumap;': '\u22b8', 'RightVector;': '\u21c0', 'nisd;': '\u22fa', 'crarr;': '\u21b5',
'leftthreetimes;': '\u22cb', 'Fcy;': '\u0424', 'xotime;': '\u2a02', 'odash;': '\u229d', 'agrave;': '\xe0',
'LeftFloor;': '\u230a', 'scpolint;': '\u2a13', 'Pfr;': '\U0001d513', 'nvHarr;': '\u2904', 'quot;': '"',
'comp;': '\u2201', 'imagline;': '\u2110', 'telrec;': '\u2315', 'Sqrt;': '\u221a', 'supsub;': '\u2ad4',
'rarr;': '\u2192', 'gvertneqq;': '\u2269\ufe00', 'nbumpe;': '\u224f\u0338', 'Uacute': '\xda',
'gsim;': '\u2273', 'coprod;': '\u2210', 'ncongdot;': '\u2a6d\u0338', 'sscr;': '\U0001d4c8',
'lstrok;': '\u0142', 'TripleDot;': '\u20db', 'topfork;': '\u2ada', 'yacute': '\xfd', 'nrightarrow;': '\u219b',
'VerticalBar;': '\u2223', 'LeftDownVector;': '\u21c3', 'angzarr;': '\u237c', 'nsupset;': '\u2283\u20d2',
'rdldhar;': '\u2969', 'deg;': '\xb0', 'DoubleRightArrow;': '\u21d2', 'macr': '\xaf', 'ldca;': '\u2936',
'jcirc;': '\u0135', 'uml;': '\xa8', 'cupor;': '\u2a45', 'egrave': '\xe8', 'boxur;': '\u2514',
'Esim;': '\u2a73', 'hybull;': '\u2043', 'DownBreve;': '\u0311', 'order;': '\u2134', 'Vscr;': '\U0001d4b1',
'ApplyFunction;': '\u2061', 'Mellintrf;': '\u2133', 'ufisht;': '\u297e', 'Ycirc;': '\u0176',
'nedot;': '\u2250\u0338', 'Ugrave;': '\xd9', 'npar;': '\u2226', 'RightArrowLeftArrow;': '\u21c4',
'xnis;': '\u22fb', 'sharp;': '\u266f', 'twixt;': '\u226c', 'midcir;': '\u2af0', 'real;': '\u211c',
'npr;': '\u2280', 'oopf;': '\U0001d560', 'Ouml;': '\xd6', 'urtri;': '\u25f9', 'SucceedsTilde;': '\u227f',
'ngeqslant;': '\u2a7e\u0338', 'Eopf;': '\U0001d53c', 'LowerLeftArrow;': '\u2199', 'sqsubseteq;': '\u2291',
'preccurlyeq;': '\u227c', 'RightTriangle;': '\u22b3', 'ReverseUpEquilibrium;': '\u296f',
'simplus;': '\u2a24', 'Aogon;': '\u0104', 'NotGreater;': '\u226f', 'rpargt;': '\u2994', 'curarrm;': '\u293c',
'THORN;': '\xde', 'smtes;': '\u2aac\ufe00', 'Ntilde': '\xd1', 'Zscr;': '\U0001d4b5', 'Nscr;': '\U0001d4a9',
'sigma;': '\u03c3', 'Atilde': '\xc3', 'checkmark;': '\u2713', 'spades;': '\u2660', 'ycy;': '\u044b',
'shortmid;': '\u2223', 'NotLeftTriangleBar;': '\u29cf\u0338', 'SuchThat;': '\u220b', 'amacr;': '\u0101',
'bigcirc;': '\u25ef', 'Gt;': '\u226b', 'xopf;': '\U0001d569', 'puncsp;': '\u2008', 'Fscr;': '\u2131',
'gel;': '\u22db', 'sect;': '\xa7', 'cudarrl;': '\u2938', 'Iuml': '\xcf', 'squarf;': '\u25aa',
'seswar;': '\u2929', 'Eacute': '\xc9', 'scy;': '\u0441', 'subnE;': '\u2acb', 'Sacute;': '\u015a',
'doublebarwedge;': '\u2306', 'rnmid;': '\u2aee', 'djcy;': '\u0452', 'Odblac;': '\u0150', 'duhar;': '\u296f',
'nVDash;': '\u22af', 'NotPrecedes;': '\u2280', 'frac45;': '\u2158', 'ubrcy;': '\u045e', 'empty;': '\u2205',
'nbsp;': '\xa0', 'comma;': ',', 'RightArrow;': '\u2192', 'notnivb;': '\u22fe', 'nrarrw;': '\u219d\u0338',
'downdownarrows;': '\u21ca', 'ngE;': '\u2267\u0338', 'lcub;': '{', 'Kscr;': '\U0001d4a6', 'Utilde;': '\u0168',
'pertenk;': '\u2031', 'sstarf;': '\u22c6', 'bdquo;': '\u201e', 'psi;': '\u03c8', 'NotLeftTriangle;': '\u22ea',
'Jscr;': '\U0001d4a5', 'UpEquilibrium;': '\u296e', 'succneqq;': '\u2ab6', 'drcrop;': '\u230c',
'csube;': '\u2ad1', 'plusdu;': '\u2a25', 'nvlArr;': '\u2902', 'RightTeeArrow;': '\u21a6', 'apos;': "'",
'squf;': '\u25aa', 'blacktriangledown;': '\u25be', 'ShortDownArrow;': '\u2193', 'boxuL;': '\u255b',
'Lambda;': '\u039b', 'Darr;': '\u21a1', 'sup3;': '\xb3', 'xcirc;': '\u25ef', 'nscr;': '\U0001d4c3',
'UpArrowDownArrow;': '\u21c5', 'Auml': '\xc4', 'nrArr;': '\u21cf', 'nges;': '\u2a7e\u0338',
'parallel;': '\u2225', 'LeftUpTeeVector;': '\u2960', 'uwangle;': '\u29a7', 'napprox;': '\u2249',
'sol;': '/', 'nRightarrow;': '\u21cf', 'squ;': '\u25a1', 'natur;': '\u266e', 'Escr;': '\u2130',
'nLl;': '\u22d8\u0338', 'DD;': '\u2145', 'Chi;': '\u03a7', 'lBarr;': '\u290e', 'emptyset;': '\u2205',
'iexcl': '\xa1', 'rarrtl;': '\u21a3', 'gE;': '\u2267', 'LeftTeeVector;': '\u295a',
'DoubleUpDownArrow;': '\u21d5', 'Icirc;': '\xce', 'Racute;': '\u0154', 'vee;': '\u2228', 'bot;': '\u22a5',
'nleftrightarrow;': '\u21ae', 'atilde': '\xe3', 'frac35;': '\u2157', 'mDDot;': '\u223a', 'eqcolon;': '\u2255',
'bsolb;': '\u29c5', 'lltri;': '\u25fa', 'bsemi;': '\u204f', 'because;': '\u2235', 'Oslash': '\xd8',
'nu;': '\u03bd', 'rightarrow;': '\u2192', 'rangle;': '\u27e9', 'TRADE;': '\u2122', 'llhard;': '\u296b',
'LeftAngleBracket;': '\u27e8', 'scnsim;': '\u22e9', 'ccirc;': '\u0109', 'Jsercy;': '\u0408',
'nvsim;': '\u223c\u20d2', 'nleftarrow;': '\u219a', 'hopf;': '\U0001d559', 'Ccedil;': '\xc7',
'rrarr;': '\u21c9', 'twoheadleftarrow;': '\u219e', 'erDot;': '\u2253', 'epsiv;': '\u03f5', 'xi;': '\u03be',
'ring;': '\u02da', 'tscy;': '\u0446', 'mu;': '\u03bc', 'Uacute;': '\xda', 'Lang;': '\u27ea', 'ovbar;': '\u233d',
'nleq;': '\u2270', 'gbreve;': '\u011f', 'cedil;': '\xb8', 'gneq;': '\u2a88', 'wopf;': '\U0001d568',
'frac18;': '\u215b', 'Oscr;': '\U0001d4aa', 'Egrave': '\xc8', 'Igrave;': '\xcc', 'varnothing;': '\u2205',
'divideontimes;': '\u22c7', 'dot;': '\u02d9', 'EqualTilde;': '\u2242', 'NotTilde;': '\u2241', 'els;': '\u2a95',
'easter;': '\u2a6e', 'swarhk;': '\u2926', 'vnsup;': '\u2283\u20d2', 'ETH': '\xd0', 'blacksquare;': '\u25aa',
'bcong;': '\u224c', 'ocy;': '\u043e', 'rbrksld;': '\u298e', 'lhard;': '\u21bd', 'gtrarr;': '\u2978',
'nharr;': '\u21ae', 'rharu;': '\u21c0', 'Mfr;': '\U0001d510', 'npre;': '\u2aaf\u0338', 'oslash;': '\xf8',
'GreaterSlantEqual;': '\u2a7e', 'Ifr;': '\u2111', 'Pi;': '\u03a0', 'lrarr;': '\u21c6', 'sce;': '\u2ab0',
'NotSquareSubsetEqual;': '\u22e2', 'beta;': '\u03b2', 'tcedil;': '\u0163', 'Int;': '\u222c', 'Conint;': '\u222f',
'kappav;': '\u03f0', 'varphi;': '\u03d5', 'subsim;': '\u2ac7', 'nGt;': '\u226b\u20d2', 'blk14;': '\u2591',
'IJlig;': '\u0132', 'LeftUpVector;': '\u21bf', 'epsilon;': '\u03b5', 'ReverseElement;': '\u220b',
'angmsdaa;': '\u29a8', 'starf;': '\u2605', 'sung;': '\u266a', 'udarr;': '\u21c5',
'RightUpTeeVector;': '\u295c', 'gne;': '\u2a88', 'nlArr;': '\u21cd', 'Lcedil;': '\u013b', 'ccedil': '\xe7',
'dtri;': '\u25bf', 'nap;': '\u2249', 'neArr;': '\u21d7', 'boxVR;': '\u2560', 'verbar;': '|', 'omicron;': '\u03bf',
'precapprox;': '\u2ab7', 'Lcaron;': '\u013d', 'ugrave;': '\xf9', 'eDDot;': '\u2a77', 'NotTildeEqual;': '\u2244',
'pitchfork;': '\u22d4', 'top;': '\u22a4', 'quaternions;': '\u210d', 'imped;': '\u01b5', 'SquareSubset;': '\u228f',
'rarrbfs;': '\u2920', 'NotSquareSuperset;': '\u2290\u0338', 'boxvR;': '\u255e', 'ni;': '\u220b', 'gcirc;': '\u011d',
'ffr;': '\U0001d523', 'numsp;': '\u2007', 'notinvb;': '\u22f7', 'MinusPlus;': '\u2213', 'preceq;': '\u2aaf',
'boxH;': '\u2550', 'lsqb;': '[', 'lagran;': '\u2112', 'lnsim;': '\u22e6', 'triplus;': '\u2a39',
'angmsdah;': '\u29af', 'iff;': '\u21d4', 'LT;': '<', 'amp;': '&', 'rightrightarrows;': '\u21c9',
'operp;': '\u29b9', 'imacr;': '\u012b', 'frac38;': '\u215c', 'cent;': '\xa2', 'NotHumpEqual;': '\u224f\u0338',
'zeetrf;': '\u2128', 'DownTee;': '\u22a4', 'Scedil;': '\u015e', 'ShortLeftArrow;': '\u2190',
'Bernoullis;': '\u212c', 'prurel;': '\u22b0', 'gEl;': '\u2a8c', 'late;': '\u2aad', 'notniva;': '\u220c',
'robrk;': '\u27e7', 'alefsym;': '\u2135', 'eng;': '\u014b', 'sext;': '\u2736', 'roang;': '\u27ed',
'Tcedil;': '\u0162', 'NotLessLess;': '\u226a\u0338', 'efDot;': '\u2252', 'cscr;': '\U0001d4b8',
'dashv;': '\u22a3', 'cularr;': '\u21b6', 'numero;': '\u2116', 'caron;': '\u02c7', 'suphsub;': '\u2ad7',
'boxUr;': '\u2559', 'ncup;': '\u2a42', 'lozenge;': '\u25ca', 'lowast;': '\u2217', 'Ufr;': '\U0001d518',
'Gcedil;': '\u0122', 'curren;': '\xa4', 'Ycy;': '\u042b', 'NegativeThickSpace;': '\u200b',
'ulcorner;': '\u231c', 'sdotb;': '\u22a1', 'rdquor;': '\u201d', 'nvltrie;': '\u22b4\u20d2',
'LeftUpDownVector;': '\u2951', 'cap;': '\u2229', 'PrecedesEqual;': '\u2aaf', 'Ecirc;': '\xca',
'bscr;': '\U0001d4b7', 'UpArrow;': '\u2191', 'simg;': '\u2a9e', 'notin;': '\u2209',
'RightDownTeeVector;': '\u295d', 'disin;': '\u22f2', 'oacute;': '\xf3', 'nsube;': '\u2288',
'iquest': '\xbf', 'ltrif;': '\u25c2', 'ccups;': '\u2a4c', 'Because;': '\u2235', 'otimes;': '\u2297',
'Zopf;': '\u2124', 'supedot;': '\u2ac4', 'ee;': '\u2147', 'NotSucceedsSlantEqual;': '\u22e1', 'scap;': '\u2ab8',
'TildeEqual;': '\u2243', 'Colon;': '\u2237', 'rcaron;': '\u0159', 'GJcy;': '\u0403', 'curvearrowright;': '\u21b7',
'Barwed;': '\u2306', 'scirc;': '\u015d', 'Lopf;': '\U0001d543', 'hoarr;': '\u21ff', 'NotLess;': '\u226e',
'afr;': '\U0001d51e', 'homtht;': '\u223b', 'subsup;': '\u2ad3', 'NotRightTriangleEqual;': '\u22ed',
'raemptyv;': '\u29b3', 'ltrPar;': '\u2996', 'upsih;': '\u03d2', 'ccupssm;': '\u2a50', 'Longrightarrow;': '\u27f9',
'NotGreaterFullEqual;': '\u2267\u0338', 'bnequiv;': '\u2261\u20e5', 'lrtri;': '\u22bf', 'setminus;': '\u2216',
'supplus;': '\u2ac0', 'Rscr;': '\u211b', 'Popf;': '\u2119', 'NotSuperset;': '\u2283\u20d2',
'looparrowright;': '\u21ac', 'odot;': '\u2299', 'laquo': '\xab', 'sqcup;': '\u2294', 'hairsp;': '\u200a',
'Gamma;': '\u0393', 'lbrksld;': '\u298f', 'uplus;': '\u228e', 'equivDD;': '\u2a78', 'el;': '\u2a99',
'CHcy;': '\u0427', 'rarrsim;': '\u2974', 'ffilig;': '\ufb03', 'thorn;': '\xfe', 'ngtr;': '\u226f',
'qopf;': '\U0001d562', 'nvle;': '\u2264\u20d2', 'omid;': '\u29b6', 'vrtri;': '\u22b3', 'curvearrowleft;': '\u21b6',
'DownRightVector;': '\u21c1', 'frac58;': '\u215d', 'Uopf;': '\U0001d54c', 'AMP;': '&', 'equest;': '\u225f',
'succapprox;': '\u2ab8', 'intercal;': '\u22ba', 'rthree;': '\u22cc', 'gimel;': '\u2137', 'Uparrow;': '\u21d1',
'Ll;': '\u22d8', 'dzcy;': '\u045f', 'dfisht;': '\u297f', 'frac12': '\xbd', 'submult;': '\u2ac1', 'rang;': '\u27e9',
'Wscr;': '\U0001d4b2', 'Kcedil;': '\u0136', 'leqslant;': '\u2a7d', 'urcrop;': '\u230e', 'SOFTcy;': '\u042c',
'hamilt;': '\u210b', 'AMP': '&', 'pscr;': '\U0001d4c5', 'egs;': '\u2a96', 'supE;': '\u2ac6', 'searr;': '\u2198',
'varpi;': '\u03d6', 'nlarr;': '\u219a', 'nearrow;': '\u2197', 'ldsh;': '\u21b2', 'gesl;': '\u22db\ufe00',
'rarrfs;': '\u291e', 'LessTilde;': '\u2272', 'boxUL;': '\u255d', 'And;': '\u2a53', 'LeftDoubleBracket;': '\u27e6',
'rAtail;': '\u291c', 'eogon;': '\u0119', 'bepsi;': '\u03f6', 'vDash;': '\u22a8', 'Coproduct;': '\u2210',
'ngeqq;': '\u2267\u0338', 'supne;': '\u228b', 'REG;': '\xae', 'kopf;': '\U0001d55c', 'cire;': '\u2257',
'boxhD;': '\u2565', 'cir;': '\u25cb', 'awconint;': '\u2233', 'LowerRightArrow;': '\u2198', 'Wfr;': '\U0001d51a',
'ssmile;': '\u2323', 'ic;': '\u2063', 'boxHd;': '\u2564', 'Oopf;': '\U0001d546', 'trisb;': '\u29cd',
'longleftarrow;': '\u27f5', 'vprop;': '\u221d', 'qfr;': '\U0001d52e', 'frac34;': '\xbe',
'vsubnE;': '\u2acb\ufe00', 'odiv;': '\u2a38', 'nvinfin;': '\u29de', 'boxminus;': '\u229f', 'efr;': '\U0001d522',
'ForAll;': '\u2200', 'lsaquo;': '\u2039', 'yen': '\xa5', 'lAtail;': '\u291b', 'tint;': '\u222d', 'ltri;': '\u25c3',
'DownTeeArrow;': '\u21a7', 'Tilde;': '\u223c', 'nsce;': '\u2ab0\u0338', 'larr;': '\u2190', 'supsup;': '\u2ad6',
'frac16;': '\u2159', 'eth;': '\xf0', 'acirc;': '\xe2', 'ddarr;': '\u21ca', 'Iscr;': '\u2110',
'triangleright;': '\u25b9', 'capand;': '\u2a44', 'HARDcy;': '\u042a', 'sup;': '\u2283',
'NotSubset;': '\u2282\u20d2', 'searrow;': '\u2198', 'nsc;': '\u2281', 'sup1': '\xb9', 'sup2': '\xb2',
'Breve;': '\u02d8', 'epar;': '\u22d5', 'clubsuit;': '\u2663', 'approx;': '\u2248', 'NotGreaterLess;': '\u2279',
'mapsto;': '\u21a6', 'scsim;': '\u227f', 'notinE;': '\u22f9\u0338', 'hcirc;': '\u0125',
'rightthreetimes;': '\u22cc', 'geq;': '\u2265', 'Kappa;': '\u039a', 'vdash;': '\u22a2', 'Congruent;': '\u2261',
'boxdr;': '\u250c', 'DoubleContourIntegral;': '\u222f', 'upuparrows;': '\u21c8', 'csub;': '\u2acf',
'PrecedesSlantEqual;': '\u227c', 'boxbox;': '\u29c9', 'zdot;': '\u017c', 'sub;': '\u2282', 'andand;': '\u2a55',
'laemptyv;': '\u29b4', 'dstrok;': '\u0111', 'perp;': '\u22a5', 'HumpDownHump;': '\u224e', 'int;': '\u222b',
'RightUpDownVector;': '\u294f', 'LongRightArrow;': '\u27f6', 'hstrok;': '\u0127', 'ngt;': '\u226f',
'lbrke;': '\u298b', 'Ograve': '\xd2', 'nvrtrie;': '\u22b5\u20d2', 'leqq;': '\u2266', 'intprod;': '\u2a3c',
'centerdot;': '\xb7', 'emptyv;': '\u2205', 'infintie;': '\u29dd', 'lbbrk;': '\u2772', 'Cacute;': '\u0106',
'rscr;': '\U0001d4c7', 'otilde;': '\xf5', 'DiacriticalGrave;': '`', 'supe;': '\u2287', 'rotimes;': '\u2a35',
'die;': '\xa8', 'mapstodown;': '\u21a7', 'fjlig;': 'fj', 'SquareSuperset;': '\u2290', 'curren': '\xa4',
'GreaterLess;': '\u2277', 'smile;': '\u2323', 'NotHumpDownHump;': '\u224e\u0338', 'ucirc;': '\xfb',
'vArr;': '\u21d5', 'boxV;': '\u2551', 'Tcaron;': '\u0164', 'not;': '\xac', 'mho;': '\u2127', 'sfrown;': '\u2322',
'ZeroWidthSpace;': '\u200b', 'Acirc': '\xc2', 'gneqq;': '\u2269', 'Euml': '\xcb', 'Ccaron;': '\u010c',
'Iacute': '\xcd', 'Yopf;': '\U0001d550', 'aogon;': '\u0105', 'rationals;': '\u211a', 'Bopf;': '\U0001d539',
'uopf;': '\U0001d566', 'acE;': '\u223e\u0333', 'ETH;': '\xd0', 'intcal;': '\u22ba', 'clubs;': '\u2663',
'plussim;': '\u2a26', 'olt;': '\u29c0', 'tprime;': '\u2034', 'iogon;': '\u012f', 'diamondsuit;': '\u2666',
'ltlarr;': '\u2976', 'frac14': '\xbc', 'fscr;': '\U0001d4bb', 'aacute': '\xe1', 'dollar;': '$', 'xmap;': '\u27fc',
'vscr;': '\U0001d4cb', 'ShortRightArrow;': '\u2192', 'Square;': '\u25a1', 'blk12;': '\u2592', 'triangle;': '\u25b5',
'eacute': '\xe9', 'angrt;': '\u221f', 'circlearrowright;': '\u21bb', 'UpTee;': '\u22a5', 'copy;': '\xa9',
'scnE;': '\u2ab6', 'aelig;': '\xe6', 'doteq;': '\u2250', 'parsl;': '\u2afd', 'Ugrave': '\xd9',
'lfr;': '\U0001d529', 'gvnE;': '\u2269\ufe00', 'rarrc;': '\u2933', 'Acy;': '\u0410', 'rbrace;': '}',
'ccedil;': '\xe7', 'nwarrow;': '\u2196', 'njcy;': '\u045a', 'UpperRightArrow;': '\u2197', 'dHar;': '\u2965',
'gt': '>', 'jscr;': '\U0001d4bf', 'rarrpl;': '\u2945', 'varrho;': '\u03f1', 'Ocirc;': '\xd4', 'lowbar;': '_',
'Yacute': '\xdd', 'nsub;': '\u2284', 'lessdot;': '\u22d6', 'NotGreaterGreater;': '\u226b\u0338',
'darr;': '\u2193', 'mcomma;': '\u2a29', 'Cedilla;': '\xb8', 'vartriangleright;': '\u22b3', 'vfr;': '\U0001d533',
'rfisht;': '\u297d', 'PlusMinus;': '\xb1', 'planck;': '\u210f', 'NotPrecedesSlantEqual;': '\u22e0',
'Egrave;': '\xc8', 'rightarrowtail;': '\u21a3', 'Prime;': '\u2033', 'gtrless;': '\u2277', 'thetasym;': '\u03d1',
'bbrktbrk;': '\u23b6', 'nle;': '\u2270', 'mlcp;': '\u2adb', 'larrsim;': '\u2973', 'jcy;': '\u0439',
'drcorn;': '\u231f', 'harrw;': '\u21ad', 'updownarrow;': '\u2195', 'ubreve;': '\u016d', 'pluse;': '\u2a72',
'UpTeeArrow;': '\u21a5', 'prime;': '\u2032', 'COPY;': '\xa9', 'CirclePlus;': '\u2295', 'Longleftarrow;': '\u27f8',
'dArr;': '\u21d3', 'xcup;': '\u22c3', 'AElig': '\xc6', 'leftharpoonup;': '\u21bc', 'Uarr;': '\u219f',
'lsquo;': '\u2018', 'nVdash;': '\u22ae', 'nwnear;': '\u2927', 'gescc;': '\u2aa9', 'rdsh;': '\u21b3',
'grave;': '`', 'blk34;': '\u2593', 'LeftVector;': '\u21bc', 'uharr;': '\u21be', 'isins;': '\u22f4',
'lescc;': '\u2aa8', 'eplus;': '\u2a71', 'jmath;': '\u0237', 'kscr;': '\U0001d4c0', 'nsim;': '\u2241',
'Aacute;': '\xc1', 'NotLessEqual;': '\u2270', 'tshcy;': '\u045b', 'plusmn': '\xb1', 'ecir;': '\u2256',
'nsmid;': '\u2224', 'lesdoto;': '\u2a81', 'nvdash;': '\u22ac', 'Lt;': '\u226a', 'DownRightVectorBar;': '\u2957',
'asymp;': '\u2248', 'ggg;': '\u22d9', 'szlig;': '\xdf', 'lneqq;': '\u2268', 'loplus;': '\u2a2d',
'ExponentialE;': '\u2147', 'profline;': '\u2312', 'DDotrahd;': '\u2911', 'rarrlp;': '\u21ac', 'Scy;': '\u0421',
'le;': '\u2264', 'auml;': '\xe4', 'roarr;': '\u21fe', 'fltns;': '\u25b1', 'vellip;': '\u22ee', 'apacir;': '\u2a6f',
'circledS;': '\u24c8', 'rfloor;': '\u230b', 'Cross;': '\u2a2f', 'DoubleLeftTee;': '\u2ae4', 'subsetneqq;': '\u2acb',
'ordf': '\xaa', 'rightleftharpoons;': '\u21cc', 'fllig;': '\ufb02', 'ntilde': '\xf1', 'emsp;': '\u2003',
'iacute;': '\xed', 'xfr;': '\U0001d535', 'fflig;': '\ufb00', 'xlarr;': '\u27f5', 'leftarrow;': '\u2190',
'urcorner;': '\u231d', 'dharl;': '\u21c3', 'reals;': '\u211d', 'Re;': '\u211c', 'bemptyv;': '\u29b0',
'angrtvb;': '\u22be', 'mdash;': '\u2014', 'dotsquare;': '\u22a1', 'omacr;': '\u014d', 'Vvdash;': '\u22aa',
'pm;': '\xb1', 'OverBar;': '\u203e', 'nldr;': '\u2025', 'target;': '\u2316', 'hksearow;': '\u2925',
'ecirc': '\xea', 'swnwar;': '\u292a', 'nfr;': '\U0001d52b', 'Copf;': '\u2102', 'Rarr;': '\u21a0',
'raquo;': '\xbb', 'oline;': '\u203e', 'utilde;': '\u0169', 'hookrightarrow;': '\u21aa', 'Or;': '\u2a54',
'origof;': '\u22b6', 'Theta;': '\u0398', 'kfr;': '\U0001d528', 'Sfr;': '\U0001d516', 'aopf;': '\U0001d552',
'lArr;': '\u21d0', 'equiv;': '\u2261', 'ord;': '\u2a5d', 'Sigma;': '\u03a3', 'DScy;': '\u0405',
'PrecedesTilde;': '\u227e', 'gnsim;': '\u22e7', 'colone;': '\u2254', 'boxhU;': '\u2568', 'Ntilde;': '\xd1',
'NotNestedGreaterGreater;': '\u2aa2\u0338', 'NotSucceeds;': '\u2281', 'larrfs;': '\u291d', 'models;': '\u22a7',
'DifferentialD;': '\u2146', 'toea;': '\u2928', 'Zdot;': '\u017b', 'zscr;': '\U0001d4cf', 'gtlPar;': '\u2995',
'ii;': '\u2148', 'Zcaron;': '\u017d', 'Leftarrow;': '\u21d0', 'ohbar;': '\u29b5', 'orv;': '\u2a5b',
'OverParenthesis;': '\u23dc', 'Upsilon;': '\u03a5', 'plusdo;': '\u2214', 'nis;': '\u22fc',
'Poincareplane;': '\u210c', 'tfr;': '\U0001d531', 'DownArrow;': '\u2193', 'Sub;': '\u22d0', 'Ncedil;': '\u0145',
'Iota;': '\u0399', 'InvisibleComma;': '\u2063', 'Ucy;': '\u0423', 'lnap;': '\u2a89', 'angst;': '\xc5',
'sube;': '\u2286', 'Gopf;': '\U0001d53e', 'Succeeds;': '\u227b', 'ap;': '\u2248', 'andv;': '\u2a5a',
'eDot;': '\u2251', 'angsph;': '\u2222', 'Dscr;': '\U0001d49f', 'boxHD;': '\u2566', 'gamma;': '\u03b3',
'RightTeeVector;': '\u295b', 'straightphi;': '\u03d5', 'ohm;': '\u03a9', 'frac15;': '\u2155',
'itilde;': '\u0129', 'jfr;': '\U0001d527', 'NJcy;': '\u040a', 'notinva;': '\u2209', 'frac25;': '\u2156',
'Epsilon;': '\u0395', 'xoplus;': '\u2a01', 'zcy;': '\u0437', 'Union;': '\u22c3', 'lesssim;': '\u2272',
'trpezium;': '\u23e2', 'bcy;': '\u0431', 'succsim;': '\u227f', 'boxDr;': '\u2553', 'beth;': '\u2136',
'prap;': '\u2ab7', 'bumpeq;': '\u224f', 'NotSquareSubset;': '\u228f\u0338', 'nhpar;': '\u2af2',
'vBar;': '\u2ae8', 'rbrke;': '\u298c', 'Dot;': '\xa8', 'ENG;': '\u014a', 'and;': '\u2227',
'nsupseteqq;': '\u2ac6\u0338', 'blacklozenge;': '\u29eb', 'boxdL;': '\u2555', 'odsold;': '\u29bc',
'bigsqcup;': '\u2a06', 'trade;': '\u2122', 'half;': '\xbd', 'elsdot;': '\u2a97', 'iota;': '\u03b9',
'diam;': '\u22c4', 'block;': '\u2588', 'parsim;': '\u2af3', 'KHcy;': '\u0425', 'Lstrok;': '\u0141',
'lesseqgtr;': '\u22da', 'div;': '\xf7', 'planckh;': '\u210e', 'rfr;': '\U0001d52f', 'loang;': '\u27ec',
'lnapprox;': '\u2a89', 'triangleleft;': '\u25c3', 'nvDash;': '\u22ad', 'oint;': '\u222e', 'ecirc;': '\xea',
'Lfr;': '\U0001d50f', 'eqsim;': '\u2242', 'emacr;': '\u0113', 'DownLeftVector;': '\u21bd', 'succeq;': '\u2ab0',
'yucy;': '\u044e', 'biguplus;': '\u2a04', 'plusmn;': '\xb1', 'smashp;': '\u2a33', 'cuvee;': '\u22ce',
'prec;': '\u227a', 'chi;': '\u03c7', 'angmsdag;': '\u29ae', 'backprime;': '\u2035', 'nbump;': '\u224e\u0338',
'Mcy;': '\u041c', 'subseteq;': '\u2286', 'gtrapprox;': '\u2a86', 'lmoustache;': '\u23b0', 'circledR;': '\xae',
'gsiml;': '\u2a90', 'subseteqq;': '\u2ac5', 'rbbrk;': '\u2773', 'inodot;': '\u0131', 'fpartint;': '\u2a0d',
'barvee;': '\u22bd', 'egsdot;': '\u2a98', 'fcy;': '\u0444', 'qint;': '\u2a0c', 'Gammad;': '\u03dc',
'upharpoonright;': '\u21be', 'NotEqual;': '\u2260', 'boxVL;': '\u2563', 'dotminus;': '\u2238', 'esim;': '\u2242',
'lotimes;': '\u2a34', 'Xopf;': '\U0001d54f', 'divide;': '\xf7', 'RightTriangleEqual;': '\u22b5', 'af;': '\u2061',
'tridot;': '\u25ec', 'lvnE;': '\u2268\ufe00', 'multimap;': '\u22b8', 'rsh;': '\u21b1', 'Ascr;': '\U0001d49c',
'hkswarow;': '\u2926', 'suplarr;': '\u297b', 'VDash;': '\u22ab', 'uscr;': '\U0001d4ca', 'sccue;': '\u227d',
'SHcy;': '\u0428', 'ndash;': '\u2013', 'YUcy;': '\u042e', 'rppolint;': '\u2a12', 'Equilibrium;': '\u21cc',
'boxvL;': '\u2561', 'nlt;': '\u226e', 'Euml;': '\xcb', 'IOcy;': '\u0401', 'times;': '\xd7', 'mapstoup;': '\u21a5',
'epsi;': '\u03b5', 'xlArr;': '\u27f8', 'cacute;': '\u0107', 'capcap;': '\u2a4b', 'ntriangleleft;': '\u22ea',
'sqsupseteq;': '\u2292', 'NotCupCap;': '\u226d', 'RightUpVector;': '\u21be', 'rpar;': ')', 'Xi;': '\u039e',
'tilde;': '\u02dc', 'auml': '\xe4', 'esdot;': '\u2250', 'nleqslant;': '\u2a7d\u0338', 'rhard;': '\u21c1',
'Delta;': '\u0394', 'gsime;': '\u2a8e', 'lt': '<', 'SHCHcy;': '\u0429', 'varsupsetneq;': '\u228b\ufe00',
'LeftUpVectorBar;': '\u2958', 'simne;': '\u2246', 'lozf;': '\u29eb', 'LeftTeeArrow;': '\u21a4',
'spadesuit;': '\u2660', 'Pr;': '\u2abb', 'Eacute;': '\xc9', 'boxVh;': '\u256b', 'Dashv;': '\u2ae4',
'ccaron;': '\u010d', 'setmn;': '\u2216', 'Aring;': '\xc5', 'plustwo;': '\u2a27', 'Rcaron;': '\u0158',
'sdote;': '\u2a66', 'ifr;': '\U0001d526', 'roplus;': '\u2a2e', 'qscr;': '\U0001d4c6', 'bernou;': '\u212c',
'Dstrok;': '\u0110', 'not': '\xac', 'backepsilon;': '\u03f6', 'Otilde;': '\xd5', 'langd;': '\u2991',
'lopf;': '\U0001d55d', 'KJcy;': '\u040c', 'infin;': '\u221e', 'uacute': '\xfa', 'Fopf;': '\U0001d53d',
'backsim;': '\u223d', 'ape;': '\u224a', 'LeftArrowRightArrow;': '\u21c6', 'Wedge;': '\u22c0',
'DownLeftTeeVector;': '\u295e', 'Ffr;': '\U0001d509', 'rtrif;': '\u25b8', 'gjcy;': '\u0453', 'supmult;': '\u2ac2',
'gt;': '>', 'swarr;': '\u2199', 'amalg;': '\u2a3f', 'rho;': '\u03c1', 'triminus;': '\u2a3a', 'or;': '\u2228',
'nesim;': '\u2242\u0338', 'sime;': '\u2243', 'larrlp;': '\u21ab', 'Sum;': '\u2211', 'khcy;': '\u0445',
'wscr;': '\U0001d4cc', 'caret;': '\u2041', 'agrave': '\xe0', 'Ocirc': '\xd4', 'Iopf;': '\U0001d540',
'bump;': '\u224e', 'ratail;': '\u291a', 'simgE;': '\u2aa0', 'precneqq;': '\u2ab5', 'varpropto;': '\u221d',
'yuml;': '\xff', 'ntrianglelefteq;': '\u22ec', 'ouml': '\xf6', 'lt;': '<', 'alpha;': '\u03b1',
'gopf;': '\U0001d558', 'smt;': '\u2aaa', 'doteqdot;': '\u2251', 'LessSlantEqual;': '\u2a7d', 'mid;': '\u2223',
'simeq;': '\u2243', 'tstrok;': '\u0167', 'GreaterEqualLess;': '\u22db', 'escr;': '\u212f', 'Nfr;': '\U0001d511',
'nGg;': '\u22d9\u0338', 'simlE;': '\u2a9f', 'apid;': '\u224b', 'nvrArr;': '\u2903', 'dotplus;': '\u2214',
'cirscir;': '\u29c2', 'LeftTee;': '\u22a3', 'lnE;': '\u2268', 'topcir;': '\u2af1', 'egrave;': '\xe8',
'demptyv;': '\u29b1', 'copysr;': '\u2117', 'Vdashl;': '\u2ae6', 'yen;': '\xa5', 'gap;': '\u2a86',
'thetav;': '\u03d1', 'bumpE;': '\u2aae', 'Ncaron;': '\u0147', 'blacktriangleright;': '\u25b8',
'olcir;': '\u29be', 'UnderBracket;': '\u23b5', 'nsimeq;': '\u2244', 'downarrow;': '\u2193', 'Assign;': '\u2254',
'opar;': '\u29b7', 'diams;': '\u2666', 'jsercy;': '\u0458', 'SubsetEqual;': '\u2286', 'bkarow;': '\u290d',
'square;': '\u25a1', 'ntriangleright;': '\u22eb', 'nrarr;': '\u219b', 'Udblac;': '\u0170', 'sqsubset;': '\u228f',
'sup1;': '\xb9', 'ldrdhar;': '\u2967', 'erarr;': '\u2971', 'frown;': '\u2322', 'cemptyv;': '\u29b2',
'rtri;': '\u25b9', 'Hscr;': '\u210b', 'Cconint;': '\u2230', 'Edot;': '\u0116', 'hardcy;': '\u044a',
'there4;': '\u2234', 'frac56;': '\u215a', 'Gbreve;': '\u011e', 'ldquo;': '\u201c', 'wedgeq;': '\u2259',
'ncong;': '\u2247', 'prop;': '\u221d', 'isinsv;': '\u22f3', 'hbar;': '\u210f', 'supseteq;': '\u2287',
'Abreve;': '\u0102', 'swarrow;': '\u2199', 'lfisht;': '\u297c', 'siml;': '\u2a9d', 'equals;': '=',
'lesges;': '\u2a93', 'phiv;': '\u03d5', 'Proportion;': '\u2237', 'Dcy;': '\u0414', 'edot;': '\u0117',
'CounterClockwiseContourIntegral;': '\u2233', 'shortparallel;': '\u2225', 'frac34': '\xbe', 'solbar;': '\u233f',
'sbquo;': '\u201a', 'LessLess;': '\u2aa1', 'harrcir;': '\u2948', 'Jfr;': '\U0001d50d', 'Xscr;': '\U0001d4b3',
'NotNestedLessLess;': '\u2aa1\u0338', 'zcaron;': '\u017e', 'abreve;': '\u0103', 'nacute;': '\u0144',
'ultri;': '\u25f8', 'Bcy;': '\u0411', 'ThickSpace;': '\u205f\u200a', 'questeq;': '\u225f',
'DoubleLongLeftArrow;': '\u27f8', 'ccaps;': '\u2a4d', 'rHar;': '\u2964', 'upharpoonleft;': '\u21bf',
'iacute': '\xed', 'cong;': '\u2245', 'yopf;': '\U0001d56a', 'nvlt;': '<\u20d2', 'bopf;': '\U0001d553',
'Supset;': '\u22d1', 'Subset;': '\u22d0', 'varsubsetneqq;': '\u2acb\ufe00', 'Omega;': '\u03a9',
'lsh;': '\u21b0', 'iiiint;': '\u2a0c', 'copy': '\xa9', 'gscr;': '\u210a', 'Star;': '\u22c6', 'boxHU;': '\u2569',
'circ;': '\u02c6', 'lap;': '\u2a85', 'rlhar;': '\u21cc', 'percnt;': '%', 'NotLessSlantEqual;': '\u2a7d\u0338',
'maltese;': '\u2720', 'looparrowleft;': '\u21ab', 'LeftVectorBar;': '\u2952', 'nLeftrightarrow;': '\u21ce',
'bsolhsub;': '\u27c8', 'nsubseteqq;': '\u2ac5\u0338', 'Rfr;': '\u211c', 'lgE;': '\u2a91',
'RightTriangleBar;': '\u29d0', 'Superset;': '\u2283', 'reg;': '\xae', 'frac14;': '\xbc', 'RBarr;': '\u2910',
'realpart;': '\u211c', 'zwnj;': '\u200c', 'nrarrc;': '\u2933\u0338', 'pluscir;': '\u2a22', 'lharul;': '\u296a',
'thickapprox;': '\u2248', 'lscr;': '\U0001d4c1', 'caps;': '\u2229\ufe00', 'supsim;': '\u2ac8',
'cirfnint;': '\u2a10', 'boxvh;': '\u253c', 'therefore;': '\u2234', 'Verbar;': '\u2016', 'nsqsube;': '\u22e2',
'latail;': '\u2919', 'propto;': '\u221d', 'boxuR;': '\u2558', 'Omacr;': '\u014c', 'ges;': '\u2a7e',
'Scaron;': '\u0160', 'oslash': '\xf8', 'oast;': '\u229b', 'phi;': '\u03c6', 'cuwed;': '\u22cf',
'oplus;': '\u2295', 'ncedil;': '\u0146', 'scnap;': '\u2aba', 'Iogon;': '\u012e', 'bne;': '=\u20e5',
'Oslash;': '\xd8', 'xuplus;': '\u2a04', 'precnsim;': '\u22e8', 'bigtriangledown;': '\u25bd', 'iprod;': '\u2a3c',
'ange;': '\u29a4', 'RightTee;': '\u22a2', 'tosa;': '\u2929', 'Iukcy;': '\u0406', 'leftrightarrows;': '\u21c6',
'DoubleLeftArrow;': '\u21d0', 'COPY': '\xa9', 'frac13;': '\u2153', 'middot': '\xb7', 'pr;': '\u227a',
'rhov;': '\u03f1', 'Qopf;': '\u211a', 'weierp;': '\u2118', 'ofr;': '\U0001d52c', 'lrhard;': '\u296d',
'commat;': '@', 'nesear;': '\u2928', 'sopf;': '\U0001d564', 'raquo': '\xbb', 'malt;': '\u2720',
'OElig;': '\u0152', 'Uscr;': '\U0001d4b0', 'eqslantless;': '\u2a95', 'LeftTriangleEqual;': '\u22b4',
'oacute': '\xf3', 'andslope;': '\u2a58', 'yfr;': '\U0001d536', 'nsup;': '\u2285', 'NotElement;': '\u2209',
'angmsdaf;': '\u29ad', 'nsccue;': '\u22e1', 'ge;': '\u2265', 'fallingdotseq;': '\u2252', 'rbarr;': '\u290d',
'DoubleLongLeftRightArrow;': '\u27fa', 'uparrow;': '\u2191', 'orarr;': '\u21bb', 'Rcy;': '\u0420',
'acute;': '\xb4', 'NewLine;': '\n', 'lmoust;': '\u23b0', 'NegativeMediumSpace;': '\u200b', 'Nacute;': '\u0143',
'aelig': '\xe6', 'prcue;': '\u227c', 'ensp;': '\u2002', 'utdot;': '\u22f0', 'napos;': '\u0149',
'DoubleLongRightArrow;': '\u27f9', 'Vfr;': '\U0001d519', 'xutri;': '\u25b3', 'awint;': '\u2a11',
'leftrightsquigarrow;': '\u21ad', 'plusacir;': '\u2a23', 'FilledVerySmallSquare;': '\u25aa', 'Mscr;': '\u2133',
'leftrightharpoons;': '\u21cb', 'sqcups;': '\u2294\ufe00', 'LJcy;': '\u0409', 'circleddash;': '\u229d',
'NoBreak;': '\u2060', 'nlsim;': '\u2274', 'Uogon;': '\u0172', 'NotRightTriangleBar;': '\u29d0\u0338',
'Ecy;': '\u042d', 'sdot;': '\u22c5', 'smeparsl;': '\u29e4', 'niv;': '\u220b', 'kcedil;': '\u0137',
'xrarr;': '\u27f6', 'isindot;': '\u22f5', 'xodot;': '\u2a00', 'gtdot;': '\u22d7', 'natural;': '\u266e',
'eqvparsl;': '\u29e5', 'gnap;': '\u2a8a', 'Psi;': '\u03a8', 'Rho;': '\u03a1', 'micro;': '\xb5',
'cylcty;': '\u232d', 'gesles;': '\u2a94', 'uHar;': '\u2963', 'CircleTimes;': '\u2297', 'sqsub;': '\u228f',
'ldrushar;': '\u294b', 'bsol;': '\\', 'rcedil;': '\u0157', 'nprec;': '\u2280', 'vltri;': '\u22b2',
'atilde;': '\xe3', 'prsim;': '\u227e', 'primes;': '\u2119', 'Omicron;': '\u039f', 'ocirc;': '\xf4',
'iiint;': '\u222d', 'quest;': '?', 'daleth;': '\u2138', 'nbsp': '\xa0', 'nwArr;': '\u21d6', 'gammad;': '\u03dd',
'heartsuit;': '\u2665', 'wedbar;': '\u2a5f', 'OverBrace;': '\u23de', 'spar;': '\u2225', 'brvbar': '\xa6',
'blacktriangleleft;': '\u25c2', 'lopar;': '\u2985', 'xwedge;': '\u22c0', 'iexcl;': '\xa1', 'boxul;': '\u2518',
'Imacr;': '\u012a', 'ominus;': '\u2296', 'eopf;': '\U0001d556', 'DotDot;': '\u20dc', 'Scirc;': '\u015c',
'succnsim;': '\u22e9', 'sigmaf;': '\u03c2', 'ReverseEquilibrium;': '\u21cb', 'DiacriticalDot;': '\u02d9',
'AElig;': '\xc6', 'zigrarr;': '\u21dd', 'NegativeThinSpace;': '\u200b', 'approxeq;': '\u224a', 'Gcy;': '\u0413',
'Vert;': '\u2016', 'NotSquareSupersetEqual;': '\u22e3', 'srarr;': '\u2192', 'rtrie;': '\u22b5',
'VeryThinSpace;': '\u200a', 'RightDoubleBracket;': '\u27e7', 'dfr;': '\U0001d521', 'Eogon;': '\u0118',
'Cscr;': '\U0001d49e', 'gnE;': '\u2269', 'nparallel;': '\u2226', 'lsime;': '\u2a8d', 'lceil;': '\u2308',
'ijlig;': '\u0133', 'RightCeiling;': '\u2309', 'Icy;': '\u0418', 'yuml': '\xff', 'exist;': '\u2203',
'DiacriticalAcute;': '\xb4', 'boxVr;': '\u255f', 'mscr;': '\U0001d4c2', 'NotGreaterSlantEqual;': '\u2a7e\u0338',
'leftrightarrow;': '\u2194', 'Wopf;': '\U0001d54e', 'supset;': '\u2283', 'DownArrowUpArrow;': '\u21f5',
'glj;': '\u2aa4', 'Colone;': '\u2a74', 'prnsim;': '\u22e8', 'Zfr;': '\u2128', 'lbrkslu;': '\u298d',
'scedil;': '\u015f', 'Dcaron;': '\u010e', 'coloneq;': '\u2254', 'CapitalDifferentialD;': '\u2145',
'nshortmid;': '\u2224', 'trianglelefteq;': '\u22b4', 'rarrb;': '\u21e5', 'ssetmn;': '\u2216', 'ufr;': '\U0001d532',
'Acirc;': '\xc2', 'LeftRightArrow;': '\u2194', 'varr;': '\u2195', 'eth': '\xf0', 'varsupsetneqq;': '\u2acc\ufe00',
'HilbertSpace;': '\u210b', 'diamond;': '\u22c4', 'npart;': '\u2202\u0338', 'Cfr;': '\u212d', 'slarr;': '\u2190',
'cwconint;': '\u2232', 'ncaron;': '\u0148', 'theta;': '\u03b8', 'NotSupersetEqual;': '\u2289',
'nsubset;': '\u2282\u20d2', 'EmptySmallSquare;': '\u25fb', 'Tstrok;': '\u0166', 'lg;': '\u2276', 'urcorn;': '\u231d',
'acy;': '\u0430', 'DoubleVerticalBar;': '\u2225', 'Phi;': '\u03a6', 'imof;': '\u22b7', 'angle;': '\u2220',
'supdot;': '\u2abe', 'timesb;': '\u22a0', 'bfr;': '\U0001d51f', 'dcaron;': '\u010f', 'Aacute': '\xc1',
'cent': '\xa2', 'rdquo;': '\u201d', 'jopf;': '\U0001d55b', 'sup2;': '\xb2', 'triangledown;': '\u25bf',
'lHar;': '\u2962', 'leftarrowtail;': '\u21a2', 'HorizontalLine;': '\u2500', 'duarr;': '\u21f5', 'cupcap;': '\u2a46',
'euml': '\xeb', 'shy': '\xad', 'curarr;': '\u21b7', 'larrhk;': '\u21a9', 'Kfr;': '\U0001d50e', 'olarr;': '\u21ba',
'nsupE;': '\u2ac6\u0338', 'colon;': ':', 'Eta;': '\u0397', 'dsol;': '\u29f6', 'LessGreater;': '\u2276',
'dblac;': '\u02dd', 'vopf;': '\U0001d567', 'incare;': '\u2105', 'wreath;': '\u2240', 'NotSucceedsEqual;': '\u2ab0\u0338',
'lcaron;': '\u013e', 'conint;': '\u222e', 'napid;': '\u224b\u0338', 'Equal;': '\u2a75', 'dscr;': '\U0001d4b9',
'Itilde;': '\u0128', 'iiota;': '\u2129', 'UpDownArrow;': '\u2195', 'Vcy;': '\u0412', 'lobrk;': '\u27e6',
'thksim;': '\u223c', 'Ucirc;': '\xdb', 'Rcedil;': '\u0156', 'tritime;': '\u2a3b', 'boxh;': '\u2500',
'Fouriertrf;': '\u2131', 'realine;': '\u211b', 'rightleftarrows;': '\u21c4', 'wp;': '\u2118', 'thkap;': '\u2248',
'sqsupset;': '\u2290', 'CloseCurlyQuote;': '\u2019', 'SquareSubsetEqual;': '\u2291', 'Iuml;': '\xcf',
'sqsup;': '\u2290', 'NotDoubleVerticalBar;': '\u2226', 'ugrave': '\xf9', 'acd;': '\u223f', 'oscr;': '\u2134',
'Qfr;': '\U0001d514', 'ncap;': '\u2a43', 'Vdash;': '\u22a9', 'nrtrie;': '\u22ed', 'lesdot;': '\u2a7f',
'nltri;': '\u22ea', 'ncy;': '\u043d', 'Hacek;': '\u02c7', 'radic;': '\u221a', 'frac78;': '\u215e',
'NotReverseElement;': '\u220c', 'Therefore;': '\u2234', 'lates;': '\u2aad\ufe00', 'varepsilon;': '\u03f5',
'ruluhar;': '\u2968', 'rsaquo;': '\u203a', 'Tscr;': '\U0001d4af', 'subsetneq;': '\u228a', 'UnderBrace;': '\u23df',
'Uring;': '\u016e', 'acirc': '\xe2', 'check;': '\u2713', 'rsquor;': '\u2019', 'tbrk;': '\u23b4',
'NotLessTilde;': '\u2274', 'vsupne;': '\u228b\ufe00', 'wfr;': '\U0001d534', 'hellip;': '\u2026', 'nless;': '\u226e',
'Yuml;': '\u0178', 'FilledSmallSquare;': '\u25fc', 'SucceedsEqual;': '\u2ab0', 'frac23;': '\u2154',
'OverBracket;': '\u23b4', 'SupersetEqual;': '\u2287', 'gesdot;': '\u2a80', 'excl;': '!', 'UpArrowBar;': '\u2912',
'barwed;': '\u2305', 'barwedge;': '\u2305', 'notinvc;': '\u22f6', 'uArr;': '\u21d1', 'lthree;': '\u22cb',
'risingdotseq;': '\u2253', 'Mopf;': '\U0001d544', 'yacute;': '\xfd', 'otimesas;': '\u2a36', 'capcup;': '\u2a47',
'ofcir;': '\u29bf', 'Upsi;': '\u03d2', 'Ecaron;': '\u011a', 'Qscr;': '\U0001d4ac', 'hookleftarrow;': '\u21a9',
'Ograve;': '\xd2', 'precnapprox;': '\u2ab9', 'Uarrocir;': '\u2949', 'part;': '\u2202', 'subsub;': '\u2ad5',
'lmidot;': '\u0140', 'DJcy;': '\u0402', 'nexists;': '\u2204', 'NotEqualTilde;': '\u2242\u0338',
'profalar;': '\u232e', 'sum;': '\u2211', 'Precedes;': '\u227a', 'Ofr;': '\U0001d512', 'fopf;': '\U0001d557',
'iecy;': '\u0435', 'ShortUpArrow;': '\u2191', 'nparsl;': '\u2afd\u20e5', 'boxUR;': '\u255a',
'exponentiale;': '\u2147', 'upsilon;': '\u03c5', 'Jopf;': '\U0001d541', 'VerticalSeparator;': '\u2758',
'Dfr;': '\U0001d507', 'NonBreakingSpace;': '\xa0', 'bottom;': '\u22a5', 'orslope;': '\u2a57', 'boxDL;': '\u2557',
'bigcap;': '\u22c2', 'Vbar;': '\u2aeb', 'pound;': '\xa3', 'boxvr;': '\u251c', 'Cup;': '\u22d3',
'bigtriangleup;': '\u25b3', 'RightAngleBracket;': '\u27e9', 'lesg;': '\u22da\ufe00', 'RightDownVector;': '\u21c2',
'Gfr;': '\U0001d50a', 'shy;': '\xad', 'supnE;': '\u2acc', 'cirE;': '\u29c3', 'angmsdae;': '\u29ac',
'Bumpeq;': '\u224e', 'delta;': '\u03b4', 'thinsp;': '\u2009', 'EmptyVerySmallSquare;': '\u25ab',
'leftleftarrows;': '\u21c7', 'les;': '\u2a7d', 'ltcc;': '\u2aa6', 'TildeFullEqual;': '\u2245', 'iocy;': '\u0451',
'supsetneqq;': '\u2acc', 'rharul;': '\u296c', 'hArr;': '\u21d4', 'amp': '&', 'Cdot;': '\u010a', 'rbrack;': ']',
'nspar;': '\u2226', 'pcy;': '\u043f', 'NotSucceedsTilde;': '\u227f\u0338', 'acute': '\xb4', 'dlcrop;': '\u230d',
'subdot;': '\u2abd', 'UnionPlus;': '\u228e', 'mapstoleft;': '\u21a4', 'DoubleRightTee;': '\u22a8',
'sigmav;': '\u03c2', 'sfr;': '\U0001d530', 'Igrave': '\xcc', 'euro;': '\u20ac', 'complement;': '\u2201',
'profsurf;': '\u2313', 'nabla;': '\u2207', 'para;': '\xb6', 'Dopf;': '\U0001d53b', 'cdot;': '\u010b',
'sim;': '\u223c', 'popf;': '\U0001d561', 'ImaginaryI;': '\u2148', 'notni;': '\u220c', 'RightArrowBar;': '\u21e5',
'intlarhk;': '\u2a17', 'gtcir;': '\u2a7a', 'llcorner;': '\u231e', 'Bfr;': '\U0001d505', 'Rang;': '\u27eb',
'ddagger;': '\u2021', 'vBarv;': '\u2ae9', 'forkv;': '\u2ad9', 'angmsd;': '\u2221', 'ouml;': '\xf6',
'nvgt;': '>\u20d2', 'Dagger;': '\u2021', 'lharu;': '\u21bc', 'Exists;': '\u2203', 'LeftTriangleBar;': '\u29cf',
'ratio;': '\u2236', 'TildeTilde;': '\u2248', 'minusb;': '\u229f', 'race;': '\u223d\u0331', 'rAarr;': '\u21db',
'bigoplus;': '\u2a01', 'rangd;': '\u2992', 'micro': '\xb5', 'osol;': '\u2298', 'strns;': '\xaf',
'Longleftrightarrow;': '\u27fa', 'boxUl;': '\u255c', 'Sc;': '\u2abc', 'ocirc': '\xf4', 'ac;': '\u223e',
'nsubE;': '\u2ac5\u0338', 'DotEqual;': '\u2250', 'zopf;': '\U0001d56b', 'llarr;': '\u21c7', 'permil;': '\u2030',
'Topf;': '\U0001d54b', 'UpperLeftArrow;': '\u2196', 'ulcorn;': '\u231c', 'curlyeqsucc;': '\u22df',
'aleph;': '\u2135', 'image;': '\u2111', 'igrave': '\xec', 'NestedLessLess;': '\u226a', 'LongLeftRightArrow;': '\u27f7',
'sqsupe;': '\u2292', 'midast;': '*', 'dwangle;': '\u29a6', 'uring;': '\u016f', 'becaus;': '\u2235',
'GreaterFullEqual;': '\u2267', 'dd;': '\u2146', 'kcy;': '\u043a', 'Laplacetrf;': '\u2112', 'marker;': '\u25ae',
'simrarr;': '\u2972', 'Agrave;': '\xc0', 'bNot;': '\u2aed', 'ocir;': '\u229a', 'supsetneq;': '\u228b',
'fork;': '\u22d4', 'pi;': '\u03c0', 'topbot;': '\u2336', 'xharr;': '\u27f7', 'Jukcy;': '\u0404',
'naturals;': '\u2115', 'csup;': '\u2ad0', 'ltimes;': '\u22c9', 'mcy;': '\u043c', 'lessgtr;': '\u2276',
'uuml': '\xfc', 'iquest;': '\xbf', 'boxhd;': '\u252c', 'nsupe;': '\u2289', 'leftharpoondown;': '\u21bd',
'Lacute;': '\u0139', 'Emacr;': '\u0112', 'Vee;': '\u22c1', 'cupcup;': '\u2a4a', 'backsimeq;': '\u22cd',
'dlcorn;': '\u231e', 'bprime;': '\u2035', 'HumpEqual;': '\u224f', 'simdot;': '\u2a6a', 'oelig;': '\u0153',
'ntilde;': '\xf1', 'xdtri;': '\u25bd', 'hscr;': '\U0001d4bd', 'cups;': '\u222a\ufe00', 'pre;': '\u2aaf',
'yscr;': '\U0001d4ce', 'boxplus;': '\u229e', 'Jcirc;': '\u0134', 'suphsol;': '\u27c9', 'Nopf;': '\u2115',
'DZcy;': '\u040f', 'flat;': '\u266d', 'ldquor;': '\u201e', 'Leftrightarrow;': '\u21d4', 'veebar;': '\u22bb',
'Rrightarrow;': '\u21db', 'compfn;': '\u2218', 'succ;': '\u227b', 'NegativeVeryThinSpace;': '\u200b',
'cupbrcap;': '\u2a48', 'notindot;': '\u22f5\u0338', 'supseteqq;': '\u2ac6', 'plankv;': '\u210f', 'ordm': '\xba',
'nsupseteq;': '\u2289', 'sacute;': '\u015b', 'ordm;': '\xba', 'dtdot;': '\u22f1', 'NotSubsetEqual;': '\u2288',
'subedot;': '\u2ac3', 'curlywedge;': '\u22cf', 'GreaterGreater;': '\u2aa2', 'dbkarow;': '\u290f',
'quatint;': '\u2a16', 'ContourIntegral;': '\u222e', 'LeftTriangle;': '\u22b2', 'lrcorner;': '\u231f',
'RightVectorBar;': '\u2953', 'nequiv;': '\u2262', 'ltrie;': '\u22b4', 'divonx;': '\u22c7', 'topf;': '\U0001d565',
'cuepr;': '\u22de', 'LeftRightVector;': '\u294e', 'rtimes;': '\u22ca', 'LeftCeiling;': '\u2308', 'iukcy;': '\u0456',
'ordf;': '\xaa', 'OpenCurlyQuote;': '\u2018', 'fnof;': '\u0192', 'thorn': '\xfe', 'star;': '\u2606',
'lne;': '\u2a87', 'hearts;': '\u2665', 'dash;': '\u2010', 'vartriangleleft;': '\u22b2', 'shcy;': '\u0448',
'hfr;': '\U0001d525', 'uuarr;': '\u21c8', 'isin;': '\u2208', 'tcaron;': '\u0165', 'bigodot;': '\u2a00',
'lurdshar;': '\u294a', 'ucy;': '\u0443', 'nmid;': '\u2224', 'semi;': ';', 'laquo;': '\xab', 'bullet;': '\u2022',
'hslash;': '\u210f', 'gtrsim;': '\u2273', 'InvisibleTimes;': '\u2062', 'cfr;': '\U0001d520', 'tscr;': '\U0001d4c9',
'nltrie;': '\u22ec', 'succcurlyeq;': '\u227d', 'ogon;': '\u02db', 'NotExists;': '\u2204', 'kgreen;': '\u0138',
'seArr;': '\u21d8', 'Product;': '\u220f', 'sqcap;': '\u2293', 'rx;': '\u211e', 'nLeftarrow;': '\u21cd',
'Updownarrow;': '\u21d5', 'Ecirc': '\xca', 'Lcy;': '\u041b', 'icirc;': '\xee', 'bigstar;': '\u2605',
'gtcc;': '\u2aa7', 'olcross;': '\u29bb', 'in;': '\u2208', 'VerticalTilde;': '\u2240', 'filig;': '\ufb01',
'rightsquigarrow;': '\u219d', 'pfr;': '\U0001d52d', 'Intersection;': '\u22c2', 'Not;': '\u2aec', 'rsqb;': ']',
'Ncy;': '\u041d', 'period;': '.', 'xhArr;': '\u27fa', 'phmmat;': '\u2133', 'NotCongruent;': '\u2262',
'boxdR;': '\u2552', 'kjcy;': '\u045c', 'bigwedge;': '\u22c0', 'NotGreaterTilde;': '\u2275', 'nsqsupe;': '\u22e3',
'aring;': '\xe5', 'prnE;': '\u2ab5', 'LessFullEqual;': '\u2266', 'eqcirc;': '\u2256', 'downharpoonleft;': '\u21c3',
'rlarr;': '\u21c4', 'smallsetminus;': '\u2216', 'omega;': '\u03c9', 'mldr;': '\u2026', 'vzigzag;': '\u299a',
'nleqq;': '\u2266\u0338', 'ulcrop;': '\u230f', 'straightepsilon;': '\u03f5', 'Auml;': '\xc4', 'LongLeftArrow;': '\u27f5'}
def substitute_entity(match):
ent = match.group(2) + match.group(3)
res = ""
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
try:
res = ent[-1] + res
ent = ent[:-1]
except:
break
if match.group(1) == "#":
ent = unichr(int(ent.replace(";","")))
return ent.encode('utf-8')
else:
cp = html5.get(ent)
if cp:
return cp.decode("unicode-escape").encode('utf-8') + res
else:
return match.group()
return entity_re.subn(substitute_entity, data)[0]
| gpl-3.0 | -1,300,616,030,855,172,400 | 83.053549 | 134 | 0.492533 | false | 2.658234 | false | false | false |
CyrilWaechter/pyRevitMEP | pyRevitMEP.tab/Create.panel/BatchCreation.pulldown/BatchDependentViewCreation.pushbutton/script.py | 1 | 2538 | # coding: utf8
import rpw
# noinspection PyUnresolvedReferences
from rpw import revit, DB
from pyrevit.forms import WPFWindow
from pyrevit import script
from pyrevitmep.workset import Workset
# noinspection PyUnresolvedReferences
from System.Collections.ObjectModel import ObservableCollection
__doc__ = "Batch create dependent views corresponding to existing Scope Boxes for selected views"
__title__ = "DependentViews"
__author__ = "Cyril Waechter"
__context__ = "selection"
doc = rpw.revit.doc
logger = script.get_logger()
class Gui(WPFWindow):
def __init__(self, xaml_file_name):
WPFWindow.__init__(self, xaml_file_name)
volume_of_interest = DB.FilteredElementCollector(doc).OfCategory(DB.BuiltInCategory.OST_VolumeOfInterest)
self.data_grid_content = ObservableCollection[object](volume_of_interest)
self.datagrid.ItemsSource = self.data_grid_content
image_dict = {
"plus_img": "icons8-plus-32.png",
"minus_img": "icons8-minus-32.png",
"import_img": "icons8-import-32.png",
"ok_img": "icons8-checkmark-32.png"
}
for k, v in image_dict.items():
self.set_image_source(getattr(self, k), v)
# noinspection PyUnusedLocal
def ok_click(self, sender, e):
for view_id in rpw.uidoc.Selection.GetElementIds():
view = doc.GetElement(view_id)
try:
with rpw.db.Transaction("BatchCreateDependentViews"):
for volume_of_interest in self.data_grid_content:
new_view_id = view.Duplicate(DB.ViewDuplicateOption.AsDependent)
new_view = doc.GetElement(new_view_id)
parameter = new_view.get_Parameter(DB.BuiltInParameter.VIEWER_VOLUME_OF_INTEREST_CROP)
parameter.Set(volume_of_interest.Id)
except AttributeError as e:
print("{} doesn't seem to be a view".format(view))
logger.debug("{}".format(e.message))
# noinspection PyUnusedLocal
def load_from_file_click(self, sender, e):
for workset in Workset.read_from_txt():
self.data_grid_content.Add(workset)
# noinspection PyUnusedLocal
def add(self, sender, e):
self.data_grid_content.Add(Workset(""))
# noinspection PyUnusedLocal
def remove(self, sender, e):
for item in list(self.datagrid.SelectedItems):
self.data_grid_content.Remove(item)
gui = Gui("WPFWindow.xaml")
gui.ShowDialog()
| gpl-3.0 | 2,052,743,929,366,040,600 | 35.782609 | 113 | 0.644208 | false | 3.636103 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/topology_resource.py | 1 | 1570 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyResource(Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other
resources in the resource group.
:type associations:
list[~azure.mgmt.network.v2017_06_01.models.TopologyAssociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def __init__(self, **kwargs):
super(TopologyResource, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.location = kwargs.get('location', None)
self.associations = kwargs.get('associations', None)
| mit | -4,338,701,136,379,454,000 | 36.380952 | 81 | 0.591083 | false | 4.361111 | false | false | false |
eliben/code-for-blog | 2018/type-inference/parser.py | 1 | 7046 | # EBNF specification for micro-ML. { x } means zero or more repetitions of x.
#
# The top-level is decl.
#
# decl: ID { ID } '=' expr
#
# expr: INT
# | bool
# | ID
# | ID '(' { expr ',' } ')'
# | '(' expr ')'
# | expr op expr
# | 'if' expr 'then' expr 'else' expr
# | 'lambda' { ID } '->' expr
#
# op: + | * | - | == | > | >= | <= | < | !=
# bool: 'true' | 'false'
#
# ID: identifier
# INT: an integer
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import ast
import lexer
class ParseError(Exception):
pass
class Parser:
"""Parser for micro-ML.
The only public method here is parse_decl that parses a 'decl' from a
string. Usage:
p = Parser()
decl = p.parse_decl(<some micro-ML code>)
# decl is now an ast.Decl node
parse_decl() can be called multiple times with the same parser to parse
multiple decls (state is wiped out between calls).
"""
def __init__(self):
lex_rules = (
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('true', 'TRUE'),
('false', 'FALSE'),
('lambda', 'LAMBDA'),
('\d+', 'INT'),
('->', 'ARROW'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\(', '('),
('\)', ')'),
('=', '='),
(',', ','),
('[a-zA-Z_]\w*', 'ID'),
)
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self.cur_token = None
self.operators = {'!=', '==', '>=', '<=', '<', '>', '+', '-', '*'}
def parse_decl(self, text):
"""Parse declaration given in text and return an AST node for it."""
self.lexer.input(text)
self._get_next_token()
decl = self._decl()
if self.cur_token.type != None:
self._error('Unexpected token "{}" (at #{})'.format(
self.cur_token.val, self.cur_token.pos))
return decl
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
"""Advances the parser's internal lexer to the next token.
This method doesn't return anything; it assigns self.cur_token to the
next token in the input stream.
"""
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError as e:
self._error('Lexer error at position {}: {}'.format(e.pos, e))
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format(type,
self.cur_token.type))
def _decl(self):
name = self._match('ID')
argnames = []
# If we have arguments, collect them. Only IDs allowed here.
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
self._match('=')
expr = self._expr()
if len(argnames) > 0:
return ast.Decl(name, ast.LambdaExpr(argnames, expr))
else:
return ast.Decl(name, expr)
def _expr(self):
"""Parse an expr of the form:
expr op expr
We only allow a single operator between expressions. Additional
operators should be nested using parens, e.g. x + (y * z)
"""
node = self._expr_component()
if self.cur_token.type in self.operators:
op = self.cur_token.type
self._get_next_token()
rhs = self._expr_component()
return ast.OpExpr(op, node, rhs)
else:
return node
def _expr_component(self):
"""Parse an expr component (components can be separated by an operator).
"""
curtok = self.cur_token
if self.cur_token.type == 'INT':
self._get_next_token()
return ast.IntConstant(curtok.val)
elif self.cur_token.type in ('FALSE', 'TRUE'):
self._get_next_token()
return ast.BoolConstant(curtok.val)
elif self.cur_token.type == 'ID':
self._get_next_token()
if self.cur_token.type == '(':
# ID followed by '(' is function application
return self._app(curtok.val)
else:
return ast.Identifier(curtok.val)
elif self.cur_token.type == '(':
self._get_next_token()
expr = self._expr()
self._match(')')
return expr
elif self.cur_token.type == 'IF':
return self._ifexpr()
elif self.cur_token.type == 'LAMBDA':
return self._lambda()
else:
self._error("Don't support {} yet".format(curtok.type))
def _ifexpr(self):
self._match('IF')
ifexpr = self._expr()
self._match('THEN')
thenexpr = self._expr()
self._match('ELSE')
elseexpr = self._expr()
return ast.IfExpr(ifexpr, thenexpr, elseexpr)
def _lambda(self):
self._match('LAMBDA')
argnames = []
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
if len(argnames) < 1:
self._error('Expected non-empty argument list for lambda')
self._match('ARROW')
expr = self._expr()
return ast.LambdaExpr(argnames, expr)
def _app(self, name):
self._match('(')
args = []
while self.cur_token.type != ')':
args.append(self._expr())
if self.cur_token.type == ',':
self._get_next_token()
elif self.cur_token.type == ')':
pass # the loop will break
else:
self._error("Unexpected {} in application".format(
self.cur_token.val))
self._match(')')
return ast.AppExpr(ast.Identifier(name), args)
| unlicense | -3,593,171,190,825,320,400 | 31.925234 | 80 | 0.453591 | false | 4.106061 | false | false | false |
nsi-iff/nsi_site | apps/news/migrations/0002_auto.py | 1 | 6169 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field project on 'New'
db.delete_table('news_new_project')
# Adding M2M table for field projects_relateds on 'New'
db.create_table('news_new_projects_relateds', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('new', models.ForeignKey(orm['news.new'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_new_projects_relateds', ['new_id', 'project_id'])
def backwards(self, orm):
# Adding M2M table for field project on 'New'
db.create_table('news_new_project', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('new', models.ForeignKey(orm['news.new'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_new_project', ['new_id', 'project_id'])
# Removing M2M table for field projects_relateds on 'New'
db.delete_table('news_new_projects_relateds')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'news.new': {
'Meta': {'object_name': 'New'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'projects_relateds': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sponsor': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['news']
| mit | 7,173,289,495,667,929,000 | 61.94898 | 182 | 0.556006 | false | 3.734262 | false | false | false |
fifengine/fifengine | tests/fife_test/scripts/test.py | 1 | 3991 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import print_function
from builtins import object
import os
class TestManager(object):
def __init__(self, engine, application, settings):
self._engine = engine
self._application = application
self._settings = settings
self._running = None
self._testdir = "tests"
self._tests = []
files = []
for f in os.listdir(self._testdir):
path = os.path.join(self._testdir, f)
if os.path.isfile(path) and os.path.splitext(f)[1] == ".py" and f != "__init__.py":
files.append(os.path.splitext(f)[0])
for f in files:
importtest = self._settings.get("Tests", f, False)
if importtest:
try:
print("Importing test plugin: ", f)
exec("import " + self._testdir + "." + f)
test = eval(self._testdir + "." + f + "." + f + "()")
if isinstance(test, Test) is False:
print(f + " is not an instance of Test!")
else:
self._tests.append(test)
except BaseException as error:
print("Error: ", error)
print("Invalid test: ", f)
else:
print("Not importing test: ", f)
self._settings.set("Tests", f, importtest)
def _getRunningTest(self):
return self._running
def runTest(self, test):
if test in self._tests and not self._running:
self._running = test
self._running.create(self._engine, self._application)
self._running.run()
def stopTest(self):
if self._running:
if self._running.isRunning():
self._running.stop()
self._running.destroy()
self._running = None
def resetTest(self):
if self._running:
if self._running.isRunning():
self._running.stop()
self._running.destroy()
self._running.create(self._engine, self._application)
self._running.run()
def _getTests(self):
return self._tests
def _getTestNameList(self):
namelist = []
for t in self._tests:
namelist.append(t.getName())
return namelist
tests = property(_getTests)
testnames = property(_getTestNameList)
runningtest = property(_getRunningTest)
class Test(object):
""" The base calss for all tests. All tests must override these functions! """
def create(self, engine, application):
raise NotImplementedError("Test has not implemented the init() function!")
def destroy(self):
raise NotImplementedError("Test has not implemented the destroy() function!")
def run(self):
raise NotImplementedError("Test has not implemented the run() function!")
def stop(self):
raise NotImplementedError("Test has not implemented the stop() function!")
def isRunning(self):
raise NotImplementedError("Test has not implemented the isRunning() function!")
def getName(self):
raise NotImplementedError("Test has not implemented the getName() function!")
def getAuthor(self):
return "unknown"
def getDescription(self):
return "none"
def getHelp(self):
return "You're on your own for this one!"
def onConsoleCommand(self, cmd):
return cmd[0] + ": not found."
def pump(self):
pass
| lgpl-2.1 | 1,703,994,664,542,917,000 | 27.105634 | 86 | 0.656978 | false | 3.585804 | true | false | false |
Patrick-Cole/pygmi | pygmi/clust/graphtool.py | 1 | 26477 | # -----------------------------------------------------------------------------
# Name: graph_tool.py (part of PyGMI)
#
# Author: Patrick Cole
# E-Mail: [email protected]
#
# Copyright: (c) 2013 Council for Geoscience
# Licence: GPL-3.0
#
# This file is part of PyGMI
#
# PyGMI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyGMI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Multi-function graphing tool for use with cluster analysis."""
import numpy as np
from PyQt5 import QtWidgets, QtCore
from matplotlib.figure import Figure
from matplotlib import cm
from matplotlib.artist import Artist
from matplotlib.patches import Polygon
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.ticker import NullFormatter
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
class GraphHist(FigureCanvasQTAgg):
"""Graph Hist."""
def __init__(self, parent=None):
self.figure = Figure()
super().__init__(self.figure)
self.setParent(parent)
self.nullfmt = NullFormatter()
self.pntxy = None
self.polyi = None
self.axhistx = None
self.axhisty = None
self.axscatter = None
self.histx = None
self.histy = None
self.xcoord = None
self.ycoord = None
self.data = []
self.cindx = [0, 1, 0]
self.cdata = []
self.csp = None
def get_hist(self, bins):
"""
Routine to get the scattergram with histogram overlay.
Parameters
----------
bins : int
Number of bins.
Returns
-------
xymahist : numpy array
Output data.
"""
xyhist = np.zeros((bins + 1, bins + 1))
xxx = self.xcoord.compressed()
yyy = self.ycoord.compressed()
xyhist = np.histogram2d(xxx, yyy, bins + 1)
xymahist = np.ma.masked_equal(xyhist[0], 0)
return xymahist
def get_clust_scat(self, bins, dattmp, ctmp):
"""
Routine to get the scattergram with cluster overlay.
Parameters
----------
bins : int
Number of bins.
dattmp : list
Data.
ctmp : list
Cluster indices.
Returns
-------
xymahist : numpy array
Output data.
"""
clust = np.ma.array(dattmp[ctmp[2] - 1].data.flatten())
clust.mask = np.ma.getmaskarray(self.xcoord)
clust = clust.compressed()
xxx = self.xcoord.compressed()
yyy = self.ycoord.compressed()
xyhist = np.zeros((bins + 1, bins + 1))
xyhist[xxx, yyy] = (clust + 1)
xymahist = np.ma.masked_equal(xyhist, 0)
return xymahist
def init_graph(self):
"""
Initialize the Graph.
Returns
-------
None.
"""
self.figure.clf()
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = bottom + height + 0.02
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
self.axscatter = self.figure.add_axes(rect_scatter, label='s')
self.axhistx = self.figure.add_axes(rect_histx, label='x')
self.axhisty = self.figure.add_axes(rect_histy, label='y')
# Setup the coordinates
self.setup_coords()
# setup 1d histograms
self.setup_hist()
# Compressed eliminates the masked values so that hist
xymahist = self.get_hist(50)
self.axscatter.get_xaxis().set_visible(False)
self.axscatter.get_yaxis().set_visible(False)
self.csp = self.axscatter.imshow(xymahist.T, interpolation='nearest',
cmap=cm.get_cmap('jet'),
aspect='auto')
self.csp.set_clim(xymahist.min(), xymahist.max())
self.csp.changed()
self.figure.canvas.draw()
def polyint(self):
"""
Polygon Interactor routine.
Returns
-------
None.
"""
pntxy = np.transpose([self.xcoord, self.ycoord])
self.polyi = PolygonInteractor(self.axscatter, pntxy)
self.polyi.ishist = True
def setup_coords(self):
"""
Routine to setup the coordinates for the scattergram.
Returns
-------
None.
"""
self.xcoord = self.data[self.cindx[0]].data.flatten()
self.ycoord = self.data[self.cindx[1]].data.flatten()
self.xcoord -= self.xcoord.min()
self.ycoord -= self.ycoord.min()
xptp = self.xcoord.ptp()
yptp = self.ycoord.ptp()
xstep = xptp / 50
ystep = yptp / 50
self.xcoord /= xstep
self.ycoord /= ystep
self.xcoord = self.xcoord.astype(int)
self.ycoord = self.ycoord.astype(int)
def setup_hist(self):
"""
Routine to setup the 1D histograms.
Returns
-------
None.
"""
self.axhistx.xaxis.set_major_formatter(self.nullfmt)
self.axhisty.yaxis.set_major_formatter(self.nullfmt)
self.axhistx.yaxis.set_major_formatter(self.nullfmt)
self.axhisty.xaxis.set_major_formatter(self.nullfmt)
xrng = [self.xcoord.min(), self.xcoord.max()]
yrng = [self.ycoord.min(), self.ycoord.max()]
self.histx = self.axhistx.hist(self.xcoord.compressed(), 50)
self.histy = self.axhisty.hist(self.ycoord.compressed(), 50,
orientation='horizontal')
self.axhistx.set_xlim(xrng)
self.axhisty.set_ylim(yrng[::-1])
def update_graph(self, clearaxis=False):
"""
Draw Routine.
Parameters
----------
clearaxis : bool, optional
True to clear the axis. The default is False.
Returns
-------
None.
"""
if clearaxis is True:
self.axhistx.cla()
self.axhisty.cla()
self.setup_coords()
self.polyi.pntxy = np.array([self.xcoord, self.ycoord]).T
self.setup_hist()
if self.cindx[2] > 0:
xymahist = self.get_clust_scat(50, self.cdata, self.cindx)
else:
xymahist = self.get_hist(50)
if self.csp is None:
return
self.csp.set_data(xymahist.T)
self.csp.set_clim(xymahist.min(), xymahist.max())
self.csp.changed()
self.figure.canvas.draw()
self.polyi.draw_callback()
class GraphMap(FigureCanvasQTAgg):
"""
Graph Map.
Attributes
----------
parent : parent
reference to the parent routine
"""
def __init__(self, parent=None):
self.figure = Figure()
super().__init__(self.figure)
self.setParent(parent)
self.parent = parent
self.polyi = None
self.data = []
self.cdata = []
self.mindx = [0, 0]
self.csp = None
self.subplot = None
def init_graph(self):
"""
Initialize the Graph.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]]
self.figure.clf()
self.subplot = self.figure.add_subplot(111)
self.subplot.get_xaxis().set_visible(False)
self.subplot.get_yaxis().set_visible(False)
self.csp = self.subplot.imshow(dat.data, cmap=cm.get_cmap('jet'))
self.subplot.figure.colorbar(self.csp)
self.figure.canvas.draw()
def polyint(self):
"""
Polygon Integrator.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]].data
xtmp = np.arange(dat.shape[1])
ytmp = np.arange(dat.shape[0])
xmesh, ymesh = np.meshgrid(xtmp, ytmp)
xmesh = np.ma.array(xmesh, dtype=float, mask=dat.mask)
ymesh = np.ma.array(ymesh, dtype=float, mask=dat.mask)
xmesh = xmesh.flatten()
ymesh = ymesh.flatten()
xmesh = xmesh.filled(np.nan)
ymesh = ymesh.filled(np.nan)
pntxy = np.transpose([xmesh, ymesh])
self.polyi = PolygonInteractor(self.subplot, pntxy)
self.polyi.ishist = False
def update_graph(self):
"""
Draw routine.
Returns
-------
None.
"""
mtmp = self.mindx
dat = self.data[mtmp[0]]
if mtmp[1] > 0:
cdat = self.cdata[mtmp[1] - 1].data
self.csp.set_data(cdat)
self.csp.set_clim(cdat.min(), cdat.max())
else:
self.csp.set_data(dat.data)
self.csp.set_clim(dat.data.min(), dat.data.max())
self.csp.changed()
self.figure.canvas.draw()
self.polyi.draw_callback()
class PolygonInteractor(QtCore.QObject):
"""Polygon Interactor."""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
polyi_changed = QtCore.pyqtSignal(list)
def __init__(self, axtmp, pntxy):
super().__init__()
self.ax = axtmp
self.poly = Polygon([(1, 1)], animated=True)
self.ax.add_patch(self.poly)
self.canvas = self.poly.figure.canvas
self.poly.set_alpha(0.5)
self.pntxy = pntxy
self.ishist = True
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
xtmp, ytmp = list(zip(*self.poly.xy))
self.line = Line2D(xtmp, ytmp, marker='o', markerfacecolor='r',
color='y', animated=True)
self.ax.add_line(self.line)
self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
self.canvas.mpl_connect('button_press_event',
self.button_press_callback)
self.canvas.mpl_connect('button_release_event',
self.button_release_callback)
self.canvas.mpl_connect('motion_notify_event',
self.motion_notify_callback)
def draw_callback(self):
"""
Draw callback.
Returns
-------
None.
"""
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
QtWidgets.QApplication.processEvents()
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
def new_poly(self, npoly):
"""
Create new Polygon.
Parameters
----------
npoly : list
New polygon coordinates.
Returns
-------
None.
"""
self.poly.set_xy(npoly)
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.draw()
self.update_plots()
def poly_changed(self, poly):
"""
Polygon changed.
Parameters
----------
poly : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# this method is called whenever the polygon object is called
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
"""
Get the index of vertex under point if within epsilon tolerance.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
ind : int or None
Index of vertex under point.
"""
# display coords
xytmp = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xytmp)
xtt, ytt = xyt[:, 0], xyt[:, 1]
dtt = np.sqrt((xtt - event.x) ** 2 + (ytt - event.y) ** 2)
indseq = np.nonzero(np.equal(dtt, np.amin(dtt)))[0]
ind = indseq[0]
if dtt[ind] >= self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
"""
Button press callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if event.inaxes is None:
return
if event.button != 1:
return
if self.ax.get_navigate_mode() is not None:
return
self._ind = self.get_ind_under_point(event)
if self._ind is None:
xys = self.poly.get_transform().transform(self.poly.xy)
ptmp = self.poly.get_transform().transform([event.xdata,
event.ydata])
# ptmp = event.x, event.y # display coords
if len(xys) == 1:
self.poly.xy = np.array(
[(event.xdata, event.ydata)] +
[(event.xdata, event.ydata)])
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
return
dmin = -1
imin = -1
for i in range(len(xys) - 1):
s0tmp = xys[i]
s1tmp = xys[i + 1]
dtmp = dist_point_to_segment(ptmp, s0tmp, s1tmp)
if dmin == -1:
dmin = dtmp
imin = i
elif dtmp < dmin:
dmin = dtmp
imin = i
i = imin
if np.array_equal(self.poly.xy, np.ones((2, 2))):
self.poly.set_xy([[event.xdata, event.ydata]])
else:
self.poly.xy = np.array(list(self.poly.xy[:i + 1]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i + 1:]))
# self.poly.xy = np.array(list(self.poly.xy[:i + 1]) +
# [(event.xdata, event.ydata)] +
# list(self.poly.xy[i + 1:]))
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
def button_release_callback(self, event):
"""
Button release callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if event.button != 1:
return
self._ind = None
self.update_plots()
def update_plots(self):
"""
Update plots.
Returns
-------
None.
"""
polymask = Path(self.poly.xy).contains_points(self.pntxy)
self.polyi_changed.emit(polymask.tolist())
def motion_notify_callback(self, event):
"""
Mouse notify callback.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if self._ind is None:
return
if event.inaxes is None:
return
if event.button != 1:
return
xtmp, ytmp = event.xdata, event.ydata
self.poly.xy[self._ind] = xtmp, ytmp
if self._ind == 0:
self.poly.xy[-1] = xtmp, ytmp
self.line.set_data(list(zip(*self.poly.xy)))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.update()
class ScatterPlot(QtWidgets.QDialog):
"""
Main Graph Tool Routine.
Attributes
----------
parent : parent
reference to the parent routine
indata : dictionary
dictionary of input datasets
outdata : dictionary
dictionary of output datasets
"""
def __init__(self, parent=None):
super().__init__(parent)
self.indata = {}
self.outdata = {}
self.parent = parent
self.m1 = 0
self.c = [0, 1, 0]
self.m = [0, 0]
self.dat_tmp = None
if parent is None:
self.showprocesslog = print
else:
self.showprocesslog = parent.showprocesslog
self.map = GraphMap(self)
self.hist = GraphHist(self)
self.cp_dpoly = QtWidgets.QPushButton('Delete Polygon')
self.cp_combo = QtWidgets.QComboBox()
self.cp_combo2 = QtWidgets.QComboBox()
self.cp_combo3 = QtWidgets.QComboBox()
self.map_dpoly = QtWidgets.QPushButton('Delete Polygon')
self.map_combo = QtWidgets.QComboBox()
self.map_combo2 = QtWidgets.QComboBox()
self.setupui()
self.hist.cindx = self.c
self.map.mindx = self.m
def setupui(self):
"""
Set up UI.
Returns
-------
None.
"""
grid_main = QtWidgets.QGridLayout(self)
group_cp = QtWidgets.QGroupBox('Cross Plot Settings')
grid_left = QtWidgets.QGridLayout(group_cp)
group_map = QtWidgets.QGroupBox('Map Settings')
grid_right = QtWidgets.QGridLayout(group_map)
self.setWindowTitle('Graph Window')
lbl_combo_left = QtWidgets.QLabel('X Data Band:')
lbl_combo2_left = QtWidgets.QLabel('Y Data Band:')
lbl_combo3_left = QtWidgets.QLabel('Cluster Overlay:')
lbl_combo_right = QtWidgets.QLabel('Data Band:')
lbl_combo2_right = QtWidgets.QLabel('Cluster Overlay:')
grid_left.addWidget(lbl_combo_left, 0, 0, 1, 1)
grid_left.addWidget(lbl_combo2_left, 1, 0, 1, 1)
grid_left.addWidget(lbl_combo3_left, 2, 0, 1, 1)
grid_left.addWidget(self.cp_dpoly, 0, 2, 1, 1)
grid_left.addWidget(self.cp_combo, 0, 1, 1, 1)
grid_left.addWidget(self.cp_combo2, 1, 1, 1, 1)
grid_left.addWidget(self.cp_combo3, 2, 1, 1, 1)
grid_right.addWidget(lbl_combo_right, 0, 0, 1, 1)
grid_right.addWidget(lbl_combo2_right, 1, 0, 1, 1)
grid_right.addWidget(self.map_dpoly, 0, 2, 1, 1)
grid_right.addWidget(self.map_combo, 0, 1, 1, 1)
grid_right.addWidget(self.map_combo2, 1, 1, 1, 1)
grid_main.addWidget(self.hist, 0, 0, 1, 1)
grid_main.addWidget(self.map, 0, 1, 1, 1)
grid_main.addWidget(group_cp, 1, 0, 1, 1)
grid_main.addWidget(group_map, 1, 1, 1, 1)
self.cp_dpoly.clicked.connect(self.on_cp_dpoly)
self.map_dpoly.clicked.connect(self.on_map_dpoly)
def on_cp_dpoly(self):
"""
On cp dpoly.
Returns
-------
None.
"""
self.hist.polyi.new_poly([[10, 10]])
mtmp = self.map_combo.currentIndex()
mask = self.indata['Raster'][mtmp].data.mask
dattmp = self.map.csp.get_array()
dattmp.mask = mask
self.map.csp.changed()
self.map.figure.canvas.draw()
def on_map_dpoly(self):
"""
On map dpoly.
Returns
-------
None.
"""
self.map.polyi.new_poly([[10, 10]])
dattmp = self.hist.csp.get_array()
dattmp.mask = np.ma.getmaskarray(np.ma.masked_equal(dattmp.data, 0.))
self.hist.csp.changed()
self.hist.figure.canvas.draw()
def on_cp_combo(self):
"""
On cp combo.
Returns
-------
None.
"""
gstmp = self.cp_combo.currentIndex()
if gstmp != self.c[0]:
self.c[0] = gstmp
self.hist.update_graph(clearaxis=True)
self.map.polyi.update_plots()
def on_cp_combo2(self):
"""
On cp combo 2.
Returns
-------
None.
"""
gstmp = self.cp_combo2.currentIndex()
if gstmp != self.c[1]:
self.c[1] = gstmp
self.hist.update_graph(clearaxis=True)
self.map.polyi.update_plots()
def on_cp_combo3(self):
"""
On cp combo 3.
Returns
-------
None.
"""
self.c[2] = self.cp_combo3.currentIndex()
self.hist.update_graph()
self.map.polyi.update_plots()
def on_map_combo(self):
"""
On map combo.
Returns
-------
None.
"""
self.m[0] = self.map_combo.currentIndex()
self.map.update_graph()
self.hist.polyi.update_plots()
def on_map_combo2(self):
"""
On map combo 2.
Returns
-------
None.
"""
self.m[1] = self.map_combo2.currentIndex()
self.map.update_graph()
self.hist.polyi.update_plots()
def settings(self, nodialog=False):
"""
Run.
Returns
-------
bool
True if successful, False otherwise.
"""
if 'Raster' not in self.indata:
self.showprocesslog('Error: You must have a multi-band raster '
'dataset in addition to your cluster analysis'
' results')
return False
self.dat_tmp = self.indata['Raster']
self.map.data = self.indata['Raster']
self.hist.data = self.indata['Raster']
bands = [i.dataid for i in self.indata['Raster']]
self.cp_combo.clear()
self.cp_combo2.clear()
self.map_combo.clear()
self.cp_combo.addItems(bands)
self.cp_combo2.addItems(bands)
self.map_combo.addItems(bands)
self.cp_combo2.setCurrentIndex(1)
self.cp_combo.currentIndexChanged.connect(self.on_cp_combo)
self.cp_combo2.currentIndexChanged.connect(self.on_cp_combo2)
self.map_combo.currentIndexChanged.connect(self.on_map_combo)
cbands = ['Scatter Amplitudes']
mbands = ['None']
if 'Cluster' in self.indata:
self.hist.cdata = self.indata['Cluster']
self.map.cdata = self.indata['Cluster']
cbands += [i.dataid for i in self.indata['Cluster']]
mbands += [i.dataid for i in self.indata['Cluster']]
self.cp_combo3.clear()
self.map_combo2.clear()
self.cp_combo3.addItems(cbands)
self.map_combo2.addItems(mbands)
self.cp_combo3.currentIndexChanged.connect(self.on_cp_combo3)
self.map_combo2.currentIndexChanged.connect(self.on_map_combo2)
self.hist.init_graph()
self.map.init_graph()
self.show()
self.hist.polyint()
self.map.polyint()
self.hist.polyi.polyi_changed.connect(self.update_map)
self.map.polyi.polyi_changed.connect(self.update_hist)
self.hist.update_graph(clearaxis=True)
self.map.update_graph()
return True
def loadproj(self, projdata):
"""
Load project data into class.
Parameters
----------
projdata : dictionary
Project data loaded from JSON project file.
Returns
-------
chk : bool
A check to see if settings was successfully run.
"""
return False
def saveproj(self):
"""
Save project data from class.
Returns
-------
projdata : dictionary
Project data to be saved to JSON project file.
"""
projdata = {}
# projdata['ftype'] = '2D Mean'
return projdata
def update_map(self, polymask):
"""
Update map.
Parameters
----------
polymask : numpy array
Polygon mask.
Returns
-------
None.
"""
if max(polymask) is False:
return
mtmp = self.map_combo.currentIndex()
mask = self.indata['Raster'][mtmp].data.mask
polymask = np.array(polymask)
polymask.shape = mask.shape
polymask = np.logical_or(~polymask, mask)
dattmp = self.map.csp.get_array()
dattmp.mask = polymask
self.map.csp.changed()
self.map.figure.canvas.draw()
def update_hist(self, polymask):
"""
Update histogram.
Parameters
----------
polymask : numpy array
Polygon mask.
Returns
-------
None.
"""
if max(polymask) is False:
return
polymask = np.array(polymask)
dattmp = self.hist.csp.get_array()
atmp = np.array([self.hist.xcoord[polymask],
self.hist.ycoord[polymask]]).T
dattmp.mask = np.ones_like(np.ma.getmaskarray(dattmp))
for i in atmp:
dattmp.mask[i[1], i[0]] = False
self.hist.csp.changed()
self.hist.figure.canvas.draw()
def dist_point_to_segment(p, s0, s1):
"""
Dist point to segment.
Reimplementation of Matplotlib's dist_point_to_segment, after it was
depreciated. Follows http://geomalgorithms.com/a02-_lines.html
Parameters
----------
p : numpy array
Point.
s0 : numpy array
Start of segment.
s1 : numpy array
End of segment.
Returns
-------
numpy array
Distance of point to segment.
"""
p = np.array(p)
s0 = np.array(s0)
s1 = np.array(s1)
v = s1 - s0
w = p - s0
c1 = np.dot(w, v)
if c1 <= 0:
return np.linalg.norm(p - s0)
c2 = np.dot(v, v)
if c2 <= c1:
return np.linalg.norm(p - s1)
b = c1/c2
pb = s0 + b*v
return np.linalg.norm(p - pb)
| gpl-3.0 | 3,424,058,039,443,024,000 | 25.583333 | 79 | 0.526721 | false | 3.744449 | false | false | false |
aspose-pdf/Aspose.Pdf-for-Java | Plugins/Aspose_Pdf_Java_for_Python/WorkingWithDocumentObject/__init__.py | 1 | 9774 | __author__ = 'fahadadeel'
import jpype
import re
import datetime
class AddJavascript:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.JavascriptAction=jpype.JClass("com.aspose.pdf.JavascriptAction")
def main(self):
# Open a pdf document.
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'Template.pdf'
# Adding JavaScript at Document Level
# Instantiate JavascriptAction with desried JavaScript statement
javaScript = self.JavascriptAction("this.print({bUI:true,bSilent:false,bShrinkToFit:true});");
# Assign JavascriptAction object to desired action of Document
doc.setOpenAction(javaScript)
js=self.JavascriptAction("app.alert('page 2 is opened')")
# Adding JavaScript at Page Level
doc.getPages.get_Item(2)
doc.getActions().setOnOpen(js())
doc.getPages().get_Item(2).getActions().setOnClose(self.JavascriptAction("app.alert('page 2 is closed')"))
# Save PDF Document
doc.save(self.dataDir + "JavaScript-Added.pdf")
print "Added JavaScript Successfully, please check the output file."
class AddToc:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.TocInfo=jpype.JClass("com.aspose.pdf.TocInfo")
self.TextFragment=jpype.JClass("com.aspose.pdf.TextFragment")
self.TextSegment=jpype.JClass("com.aspose.pdf.TextSegment")
self.Heading=jpype.JClass("com.aspose.pdf.Heading")
def main(self):
# Open a pdf document.
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get access to first page of PDF file
toc_page = doc.getPages().insert(1)
# Create object to represent TOC information
toc_info = self.TocInfo()
title = self.TextFragment("Table Of Contents")
title.getTextState().setFontSize(20)
# Set the title for TOC
toc_info.setTitle(title)
toc_page.setTocInfo(toc_info)
# Create string objects which will be used as TOC elements
titles = ["First page", "Second page"]
i = 0;
while (i < 2):
# Create Heading object
heading2 = self.Heading(1);
segment2 = self.TextSegment
heading2.setTocPage(toc_page)
heading2.getSegments().add(segment2)
# Specify the destination page for heading object
heading2.setDestinationPage(doc.getPages().get_Item(i + 2))
# Destination page
heading2.setTop(doc.getPages().get_Item(i + 2).getRect().getHeight())
# Destination coordinate
segment2.setText(titles[i])
# Add heading to page containing TOC
toc_page.getParagraphs().add(heading2)
i +=1;
# Save PDF Document
doc.save(self.dataDir + "TOC.pdf")
print "Added TOC Successfully, please check the output file."
class GetDocumentWindow:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get different document properties
# Position of document's window - Default: false
print "CenterWindow :- " + str(doc.getCenterWindow())
# Predominant reading order; determine the position of page
# when displayed side by side - Default: L2R
print "Direction :- " + str(doc.getDirection())
# Whether window's title bar should display document title.
# If false, title bar displays PDF file name - Default: false
print "DisplayDocTitle :- " + str(doc.getDisplayDocTitle())
#Whether to resize the document's window to fit the size of
#first displayed page - Default: false
print "FitWindow :- " + str(doc.getFitWindow())
# Whether to hide menu bar of the viewer application - Default: false
print "HideMenuBar :-" + str(doc.getHideMenubar())
# Whether to hide tool bar of the viewer application - Default: false
print "HideToolBar :-" + str(doc.getHideToolBar())
# Whether to hide UI elements like scroll bars
# and leaving only the page contents displayed - Default: false
print "HideWindowUI :-" + str(doc.getHideWindowUI())
# The document's page mode. How to display document on exiting full-screen mode.
print "NonFullScreenPageMode :-" + str(doc.getNonFullScreenPageMode())
# The page layout i.e. single page, one column
print "PageLayout :-" + str(doc.getPageLayout())
#How the document should display when opened.
print "pageMode :-" + str(doc.getPageMode())
class GetPdfFileInfo:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get document information
doc_info = doc.getInfo();
# Show document information
print "Author:-" + str(doc_info.getAuthor())
print "Creation Date:-" + str(doc_info.getCreationDate())
print "Keywords:-" + str(doc_info.getKeywords())
print "Modify Date:-" + str(doc_info.getModDate())
print "Subject:-" + str(doc_info.getSubject())
print "Title:-" + str(doc_info.getTitle())
class GetXMPMetadata:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get properties
print "xmp:CreateDate: " + str(doc.getMetadata().get_Item("xmp:CreateDate"))
print "xmp:Nickname: " + str(doc.getMetadata().get_Item("xmp:Nickname"))
print "xmp:CustomProperty: " + str(doc.getMetadata().get_Item("xmp:CustomProperty"))
class Optimize:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
# self.OptimizationOptions=jpype.JClass("com.aspose.pdf.Document.OptimizationOptions")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Optimize for web
doc.optimize();
#Save output document
doc.save(self.dataDir + "Optimized_Web.pdf")
print "Optimized PDF for the Web, please check output file."
class RemoveMetadata:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
if (re.findall('/pdfaid:part/',doc.getMetadata())):
doc.getMetadata().removeItem("pdfaid:part")
if (re.findall('/dc:format/',doc.getMetadata())):
doc.getMetadata().removeItem("dc:format")
# save update document with new information
doc.save(self.dataDir + "Remove_Metadata.pdf")
print "Removed metadata successfully, please check output file."
class SetExpiration:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
self.JavascriptAction=jpype.JClass("com.aspose.pdf.JavascriptAction")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
javascript = self.JavascriptAction(
"var year=2014; var month=4;today = new Date();today = new Date(today.getFullYear(), today.getMonth());expiry = new Date(year, month);if (today.getTime() > expiry.getTime())app.alert('The file is expired. You need a new one.');");
doc.setOpenAction(javascript);
# save update document with new information
doc.save(self.dataDir + "set_expiration.pdf");
print "Update document information, please check output file."
class SetPdfFileInfo:
def __init__(self, dataDir):
self.dataDir = dataDir
self.Document = jpype.JClass("com.aspose.pdf.Document")
def main(self):
doc= self.Document()
pdf = self.Document()
pdf=self.dataDir + 'input1.pdf'
# Get document information
doc_info = doc.getInfo();
doc_info.setAuthor("Aspose.Pdf for java");
doc_info.setCreationDate(datetime.today.strftime("%m/%d/%Y"));
doc_info.setKeywords("Aspose.Pdf, DOM, API");
doc_info.setModDate(datetime.today.strftime("%m/%d/%Y"));
doc_info.setSubject("PDF Information");
doc_info.setTitle("Setting PDF Document Information");
# save update document with new information
doc.save(self.dataDir + "Updated_Information.pdf")
print "Update document information, please check output file."
| mit | -4,455,166,005,678,652,000 | 32.907143 | 246 | 0.593513 | false | 4.032178 | false | false | false |
andr3wmac/metaTower | mt/EventManager.py | 1 | 1710 | """
* metaTower v0.4.5
* http://www.metatower.com
*
* Copyright 2012, Andrew Mac
* http://www.andrewmac.ca
* Licensed under GPL v3.
* See license.txt
* or http://www.metatower.com/license.txt
"""
import mt, inspect
class EventManager:
class EventItem:
def __init__(self, event, function, source):
self.event = event
self.function = function
self.source = source
def __init__(self):
self.events = []
def register(self, event, function):
source = mt.utils.getSource()
newEvent = self.EventItem(event, function, source)
self.events.append(newEvent)
def clear(self, function = None):
if ( function == None ): self.events = []
else:
new_list = []
for e in self.events:
if e.function != function: new_list.append(e)
self.events = new_list
def clearSource(self, source):
new_list = []
for e in self.events:
if e.source != source: new_list.append(e)
self.events = new_list
def trigger(self, event, *args):
#print "Triggering event: " + event + " with " + str(len(args)) + " arg(s)"
result = None
for e in self.events:
if e.event == event:
arg_count = len(inspect.getargspec(e.function).args)
if ( arg_count == 0 ) : result = e.function()
if ( arg_count > 0 ):
if ( arg_count == len(args) ):
result = e.function(*args)
if ( arg_count < len(args) ):
result = e.function(*args[:(arg_count-len(args))])
return result
| gpl-3.0 | -5,570,807,352,983,404,000 | 30.090909 | 83 | 0.523392 | false | 3.808463 | false | false | false |
andreagrandi/workshopvenues | workshopvenues/venues/migrations/0007_auto__add_country__chg_field_address_country__add_index_address_countr.py | 1 | 3797 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table(u'venues_country', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal(u'venues', ['Country'])
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
def backwards(self, orm):
# Deleting model 'Country'
db.delete_table(u'venues_country')
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['venues'] | bsd-3-clause | -1,636,155,754,172,928,000 | 50.324324 | 141 | 0.54148 | false | 3.595644 | false | false | false |
bswartz/cinder | cinder/volume/drivers/netapp/dataontap/fc_cmode.py | 1 | 5282 | # Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ConsistencyGroupVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
| apache-2.0 | -1,633,660,446,188,067,300 | 39.320611 | 78 | 0.65373 | false | 4.129789 | false | false | false |
cloudbrain/cloudbrain_examples | sandbox/print_data.py | 1 | 1223 | import time
from cloudbrain.subscribers.rabbitmq import PikaSubscriber
from cloudbrain_examples.settings import (base_routing_key, metric_name, num_channels, buffer_size,
rabbitmq_address, rabbitmq_user, rabbitmq_pwd)
def _print_callback(unsed_ch, unsed_method, unsed_properties, body):
print "==> %s" % body
def main():
# Setup the subscriber
subscriber = PikaSubscriber(base_routing_key=base_routing_key,
rabbitmq_address=rabbitmq_address,
rabbitmq_user=rabbitmq_user,
rabbitmq_pwd=rabbitmq_pwd)
subscriber.connect()
subscriber.register(metric_name, num_channels)
time.sleep(1) # Leave it some time to register
# Get one message at a time
one_message = subscriber.get_one_message(metric_name)
print "\n==> Got one message: %s\n" % one_message
time.sleep(2) # Give people time to read the message
# Get message continuously
print "==> Subscribing ..."
try:
subscriber.subscribe(metric_name, _print_callback)
except KeyboardInterrupt:
subscriber.disconnect()
if __name__ == '__main__':
main()
| agpl-3.0 | -5,175,418,729,232,379,000 | 28.829268 | 99 | 0.623876 | false | 4.009836 | false | false | false |
rokuz/pygeom | vec2.py | 1 | 4197 | import math
import copy
import geom_exceptions
import functions
import vec2_gen
class Vec2(vec2_gen.GenVec2):
"""2D Vector."""
def __init__(self, x=0.0, y=0.0):
vec2_gen.GenVec2.__init__(self, x, y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise ValueError("Integer key in the range [0;1] required")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise ValueError("Integer key in the range [0;1] required")
def __len__(self):
return 2
def __str__(self):
return 'Vec2({}; {})'.format(self.x, self.y)
def __copy__(self):
return Vec2(self.x, self.y)
def __deepcopy__(self, memodict={}):
return Vec2(self.x, self.y)
def __add__(self, other):
return Vec2(self.x + other[0], self.y + other[1])
def __iadd__(self, other):
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
return Vec2(self.x - other[0], self.y - other[1])
def __isub__(self, other):
self.x -= other[0]
self.y -= other[1]
return self
def __mul__(self, scalar):
return Vec2(self.x * scalar, self.y * scalar)
def __imul__(self, scalar):
self.x *= scalar
self.y *= scalar
return self
def __div__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def __truediv__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def __idiv__(self, scalar):
self.x /= scalar
self.y /= scalar
return self
def __itruediv__(self, scalar):
self.x /= scalar
self.y /= scalar
return self
def __neg__(self):
return Vec2(-self.x, -self.y)
def __eq__(self, other):
return functions.almost_equal(self.x, other[0]) and functions.almost_equal(self.y, other[1])
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if functions.almost_equal(self.x, other[0]):
return self.y < other[1]
return self.x < other[0]
def __gt__(self, other):
if functions.almost_equal(self.x, other[0]):
return self.y > other[1]
return self.x > other[0]
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def length_squared(self):
"""Calculates squared length of a vector."""
return self.x * self.x + self.y * self.y
def length(self):
"""Calculates length of a vector."""
return math.sqrt(self.length_squared())
def normalize(self):
"""Performs vector normalization. Raises VectorException in case of zero length."""
ls = self.length_squared()
if ls == 0.0:
raise geom_exceptions.VectorException("Zero-length normalization")
l = math.sqrt(ls)
self.x /= l
self.y /= l
def get_normalized(self):
"""Returns normalized copy of a vector. Raises VectorException in case of zero length."""
c = copy.copy(self)
c.normalize()
return c
def dot(self, v2):
"""Calculated dot product of current vector and vector v2."""
return self.x * v2[0] + self.y * v2[1]
def cross(self, v2):
"""Calculates cross product. It's a scalar which absolute value equals to
square of a parallelogram constructed on the current vector and vector v2.
The sign tells either v2 is on the left side (positive value) of the current
vector or on the right side (negative value)."""
return self.x * v2[1] - self.y * v2[0]
@property
def left_normal(self):
"""Calculates left normal vector to the current vector."""
return Vec2(-self.y, self.x)
@property
def right_normal(self):
"""Calculates right normal vector to the current vector."""
return Vec2(self.y, -self.x)
| mit | 6,090,678,753,408,154,000 | 27.358108 | 100 | 0.554205 | false | 3.675131 | false | false | false |
eddienigma/rpi-rht | GraphIndexTH.py | 1 | 4632 | #
# pull data from sql, plot using matplotlib
# see http://stackoverflow.com/questions/18663746/matplotlib-multiple-lines-with-common-date-on-x-axis-solved
#
# rev 1.0 12/02/2013 WPNS built from GraphAirmuxSD.py V1.1
# rev 1.1 12/02/2013 WPNS remove large delta values
# rev 1.2 12/02/2013 WPNS remove -0.1 values (failed to read)
# rev 1.3 12/02/2013 WPNS show count of anomalies
# rev 1.4 12/03/2013 WPNS cleanup, release
# rev 1.5 12/03/2013 WPNS better label
# rev 1.6 12/03/2013 WPNS bugfix, release
# rev 1.69 12/04/2013 WPNS release to Instructables
# rev 2.0-JAS 1/11/2014 JAS adjusted graph ranges for current conditions and to use SQLite3 instead of MySQL
import sys
import os
import time
import math
import datetime
import numpy
import sqlite3 as lite
# so matplotlib has to have some of the setup parameters _before_ pyplot
import matplotlib
matplotlib.use('agg')
#matplotlib.rcParams['figure.dpi'] = 100
#matplotlib.rcParams['figure.figsize'] = [10.24, 7.68]
matplotlib.rcParams['lines.linewidth'] = 1
matplotlib.rcParams['axes.color_cycle'] = ['r','g','b','k']
matplotlib.rcParams['axes.labelsize'] = 'large'
matplotlib.rcParams['font.size'] = 8
matplotlib.rcParams['grid.linestyle']='-'
import matplotlib.pyplot as plt
anomalies = 0
print "GraphTH.py V1.69 12/04/2013 WPNS",time.asctime(),
print "GraphTH.py V1.0-JAS 12/22/2013 JAS"
# open the database connection, read the last <many> seconds of data, put them in a Numpy array called Raw
DBconn = lite.connect('/var/rht/db/rht.db')
cursor = DBconn.cursor()
sql = "select ComputerTime,TempF,Humidity from rht where ComputerTime >= (strftime('%s','now')-(60*60*24))"
cursor.execute(sql)
cursor2 = DBconn.cursor()
sql2 = "SELECT datetime(ComputerTime,'unixepoch','localtime'),TempF,Humidity FROM rht WHERE ComputerTime = (select max(ComputerTime) from rht)"
cursor2.execute(sql2)
lastRow = cursor2.fetchone()
Raw = numpy.fromiter(cursor.fetchall(), count=-1, dtype=[('', numpy.float)]*3)
Raw = Raw.view(numpy.float).reshape(-1, 3)
(samples,ports)=Raw.shape
print 'Samples: {}, DataPoints: {}'.format(samples,ports),
plotme=numpy.zeros((samples,ports-1)) # make an array the same shape minus the epoch numbers
for y in range(ports-1):
# print y
for x in range(samples-1): # can't do last one, there's no (time) delta from previous sample
seconds = Raw[x+1,0]-Raw[x,0]
# if the number didn't overflow the counter
plotme[x,y] = Raw[x,y+1]
plotme[samples-1,y] = None # set last sample to "do not plot"
for x in range(samples-1): # go thru the dataset again
if (Raw[x+1,1] == -0.1): # if values are "reading failed" flag
plotme[x+1,0] = plotme[x,0] # copy current sample over it
plotme[x+1,1] = plotme[x,1] # for temperature and humidity both
anomalies += 1
if (abs(Raw[x+1,1]-Raw[x,1]) > 10): # if temperature jumps more than 10 degrees in a minute
plotme[x+1,0] = plotme[x,0] # copy current sample over it
plotme[x+1,1] = plotme[x,1] # for temperature and humidity both
anomalies += 1
print "Anomalies: ",anomalies,
#print plotme
# get an array of adatetime objects (askewchan from stackoverflow, above)
dts = map(datetime.datetime.fromtimestamp, Raw[:,0])
# set up the plot details we want
plt.grid(True)
plt.ylabel('Temp F, RH %%')
plt.axis(ymax=100,ymin=10)
plt.xlabel(time.asctime())
plt.title("Outdoor: Temperature (Red), Humidity (Green)")
plt.hold(True)
# and some fiddly bits around formatting the X (date) axis
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%m/%d %H:%M'))
plt.gca().xaxis.set_major_locator(matplotlib.dates.HourLocator())
lines = plt.plot(dts,plotme)
plt.gcf().autofmt_xdate()
FileName = '/var/rht/images/TH.png'
plt.savefig(FileName)
web = open('/var/www/index.html', 'w')
web.write('<HTML>\n')
web.write('<HEAD>\n')
web.write('<meta http-equiv=\"refresh\" content=\"60\">\n')
web.write('<TITLE>Raspberry Pi Temperature and Humidity Readings</TITLE>\n')
web.write('</HEAD>\n')
web.write('\n')
web.write('<BODY BGCOLOR="#FFFFFF">\n')
web.write('<CENTER>\n')
web.write('<IMG SRC="/images/TH.png">\n')
web.write('<BR><BR>\n')
web.write('<FONT COLOR=\"#FF0000\" SIZE=+2>Temp: ' + str(lastRow[1]) + 'F </FONT> <FONT COLOR=\"#00FF00\" SIZE=+2>Humidity: ' + str(lastRow[2]) + '% </FONT><BR>\n')
web.write('<FONT SIZE=+2>Time: ' + str(lastRow[0]) + '</FONT><BR>\n')
web.write('</CENTER>\n')
web.write('</BODY>\n')
web.write('\n')
web.write('</HTML\n')
print 'Done at',time.asctime()
| gpl-3.0 | -4,583,286,232,077,759,000 | 36.354839 | 185 | 0.676166 | false | 2.927939 | false | false | false |
chagaz/SamSpecCoEN | code/setupCV_computeNetworks.py | 1 | 4177 | # @Author
# Chloe-Agathe Azencott
# [email protected]
# April 2016
import argparse
import h5py
import numpy as np
import os
import sys
import CoExpressionNetwork
def main():
""" Create sample-specific co-expression networks for one fold and one repeat
of a cross-validation for which fold indices have already been computed.
The data will be stored under
<data_dir>/repeat<repeat idx>
with the following structure:
edges.gz:
Gzipped file containing the list of edges of the co-expression networks.
Each line is an undirected edge, formatted as:
<index of gene 1> <index of gene 2>
By convention, the index of gene 1 is smaller than that of gene 2.
For k=0..(numFolds-1):
<k>/lioness/edge_weights.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the LIONESS co-expression networks
for the training samples.
<k>/lioness/edge_weights_te.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the LIONESS co-expression networks
for the test samples.
<k>/regline/edge_weights.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the Regline co-expression networks
for the training samples.
<k>/regline/edge_weights_te.gz:
gzipped file containing the (self.numSamples, numEdges) array
describing the edge weights of the Regline co-expression networks
for the test samples.
Parameters
----------
aces_dir: path
Path to the ACES folder.
data_dir: path
Path to the folder containing fold indices (under <data_dir>/repeat<repeat_idx>/fold<fold_idx>).
fold: int
Fold index.
repeat: int
Repeat index.
Example
-------
$ python setUpSubTypeStratifiedCV_computeNetworks.py ACES outputs/U133A_combat_RFS/subtype_stratified 0 0
Reference
---------
Allahyar, A., and Ridder, J. de (2015).
FERAL: network-based classifier with application to breast cancer outcome prediction.
Bioinformatics 31, i311--i319.
"""
parser = argparse.ArgumentParser(description="Build sample-specific co-expression networks" + \
"for a 10-fold sub-type stratified CV on the RFS data",
add_help=True)
parser.add_argument("aces_dir", help="Path to ACES data")
parser.add_argument("data_dir", help="Path to the fold indices")
parser.add_argument("fold", help="Index of the fold", type=int)
parser.add_argument("repeat", help="Index of the repeat", type=int)
args = parser.parse_args()
outDir = '%s/repeat%d' % (args.data_dir, args.repeat)
# Get expression data, sample labels.
# Do not normalize the data while loading it (so as not to use test data for normalization).
f = h5py.File("%s/experiments/data/U133A_combat.h5" % args.aces_dir)
expressionData = np.array(f['U133A_combat_RFS']['ExpressionData'])
sampleLabels = np.array(f['U133A_combat_RFS']['PatientClassLabels'])
f.close()
foldNr = args.fold
# Output directory
foldDir = "%s/fold%d" % (outDir, foldNr)
# Read train indices from file
trIndicesF = '%s/train.indices' % foldDir
trIndices = np.loadtxt(trIndicesF, dtype=int)
sys.stdout.write("Read training indices for fold %d from %s\n" % (foldNr, trIndicesF))
# Read test indices from file
teIndicesF = '%s/test.indices' % foldDir
teIndices = np.loadtxt(teIndicesF, dtype=int)
sys.stdout.write("Read training indices for fold %d from %s\n" % (foldNr, teIndicesF))
print teIndices
print teIndices.shape
# Create networks
CoExpressionNetwork.run_whole_data(expressionData, sampleLabels, foldDir,
trIndices=trIndices, teIndices=teIndices)
if __name__ == "__main__":
main()
| mit | -4,337,070,075,003,851,000 | 38.037383 | 113 | 0.639454 | false | 3.860444 | false | false | false |
anaran/olympia | services/update.py | 1 | 14315 | import smtplib
import sys
import traceback
from email.Utils import formatdate
from email.mime.text import MIMEText
from time import time
from urlparse import parse_qsl
from django.utils.http import urlencode
import settings_local as settings
# This has to be imported after the settings so statsd knows where to log to.
from django_statsd.clients import statsd
import commonware.log
import MySQLdb as mysql
import sqlalchemy.pool as pool
try:
from compare import version_int
except ImportError:
from apps.versions.compare import version_int
from constants import applications, base
from utils import (APP_GUIDS, get_mirror, log_configure, PLATFORMS,
STATUSES_PUBLIC)
# Go configure the log.
log_configure()
good_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
<RDF:li resource="urn:mozilla:%(type)s:%(guid)s:%(version)s"/>
</RDF:Seq>
</em:updates>
</RDF:Description>
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s:%(version)s">
<em:version>%(version)s</em:version>
<em:targetApplication>
<RDF:Description>
<em:id>%(appguid)s</em:id>
<em:minVersion>%(min)s</em:minVersion>
<em:maxVersion>%(max)s</em:maxVersion>
<em:updateLink>%(url)s</em:updateLink>
%(if_update)s
%(if_hash)s
</RDF:Description>
</em:targetApplication>
</RDF:Description>
</RDF:RDF>"""
bad_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
</RDF:RDF>"""
no_updates_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
</RDF:Seq>
</em:updates>
</RDF:Description>
</RDF:RDF>"""
timing_log = commonware.log.getLogger('z.timer')
error_log = commonware.log.getLogger('z.services')
def getconn():
db = settings.SERVICES_DATABASE
return mysql.connect(host=db['HOST'], user=db['USER'],
passwd=db['PASSWORD'], db=db['NAME'])
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5, recycle=300)
class Update(object):
def __init__(self, data, compat_mode='strict'):
self.conn, self.cursor = None, None
self.data = data.copy()
self.data['row'] = {}
self.version_int = 0
self.compat_mode = compat_mode
def is_valid(self):
# If you accessing this from unit tests, then before calling
# is valid, you can assign your own cursor.
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
data = self.data
# Version can be blank.
data['version'] = data.get('version', '')
for field in ['reqVersion', 'id', 'appID', 'appVersion']:
if field not in data:
return False
data['app_id'] = APP_GUIDS.get(data['appID'])
if not data['app_id']:
return False
sql = """SELECT id, status, addontype_id, guid FROM addons
WHERE guid = %(guid)s AND
inactive = 0 AND
status != %(STATUS_DELETED)s
LIMIT 1;"""
self.cursor.execute(sql, {'guid': self.data['id'],
'STATUS_DELETED': base.STATUS_DELETED})
result = self.cursor.fetchone()
if result is None:
return False
data['id'], data['addon_status'], data['type'], data['guid'] = result
data['version_int'] = version_int(data['appVersion'])
if 'appOS' in data:
for k, v in PLATFORMS.items():
if k in data['appOS']:
data['appOS'] = v
break
else:
data['appOS'] = None
return True
def get_update(self):
data = self.data
data.update(STATUSES_PUBLIC)
data['STATUS_BETA'] = base.STATUS_BETA
sql = ["""
SELECT
addons.guid as guid, addons.addontype_id as type,
addons.inactive as disabled_by_user,
applications.guid as appguid, appmin.version as min,
appmax.version as max, files.id as file_id,
files.status as file_status, files.hash,
files.filename, versions.id as version_id,
files.datestatuschanged as datestatuschanged,
files.strict_compatibility as strict_compat,
versions.releasenotes, versions.version as version,
addons.premium_type
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN applications
ON applications_versions.application_id = applications.id
AND applications.id = %(app_id)s
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
INNER JOIN files
ON files.version_id = versions.id AND (files.platform_id = 1
"""]
if data.get('appOS'):
sql.append(' OR files.platform_id = %(appOS)s')
sql.append("""
)
-- Find a reference to the user's current version, if it exists.
-- These should never be inner joins. We need results even if we
-- can't find the current version.
LEFT JOIN versions curver
ON curver.addon_id = addons.id AND curver.version = %(version)s
LEFT JOIN files curfile
ON curfile.version_id = curver.id
WHERE
-- Note that the WHEN clauses here will evaluate to the same
-- thing for each row we examine. The JOINs above narrow the
-- rows matched by the WHERE clause to versions of a specific
-- add-on, and the ORDER BY and LIMIT 1 clauses below make it
-- unlikely that we'll be examining a large number of rows,
-- so this is fairly cheap.
CASE
WHEN curfile.status = %(STATUS_BETA)s
THEN
-- User's current version is a known beta version.
--
-- Serve only beta updates. Serving a full version here
-- will forever kick users out of the beta update channel.
--
-- If the add-on does not have full review, serve no
-- updates.
addons.status = %(STATUS_PUBLIC)s AND
files.status = %(STATUS_BETA)s
WHEN addons.status IN (%(STATUS_LITE)s,
%(STATUS_LITE_AND_NOMINATED)s)
AND (curfile.id IS NULL OR curfile.status = %(STATUS_LITE)s)
THEN
-- Add-on is prelim, and user's current version is either a
-- known prelim, or an unknown version.
--
-- Serve only prelim versions. Serving a full version here
-- will prevent users from receiving further updates until
-- the add-on achieves full review.
files.status = %(STATUS_LITE)s
ELSE
-- Anything else, including:
--
-- * Add-on has full review
-- * User's current version has full review, regardless
-- of add-on status
--
-- Serve only full-reviewed updates.
files.status = %(STATUS_PUBLIC)s
END
""")
sql.append('AND appmin.version_int <= %(version_int)s ')
if self.compat_mode == 'ignore':
pass # no further SQL modification required.
elif self.compat_mode == 'normal':
# When file has strict_compatibility enabled, or file has binary
# components, default to compatible is disabled.
sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_max = applications.D2C_MAX_VERSIONS.get(data['app_id'])
if d2c_max:
data['d2c_max_version'] = version_int(d2c_max)
sql.append("AND appmax.version_int >= %(d2c_max_version)s ")
# Filter out versions found in compat overrides
sql.append("""AND
NOT versions.id IN (
SELECT version_id FROM incompatible_versions
WHERE app_id=%(app_id)s AND
(min_app_version='0' AND
max_app_version_int >= %(version_int)s) OR
(min_app_version_int <= %(version_int)s AND
max_app_version='*') OR
(min_app_version_int <= %(version_int)s AND
max_app_version_int >= %(version_int)s)) """)
else: # Not defined or 'strict'.
sql.append('AND appmax.version_int >= %(version_int)s ')
# Special case for bug 1031516.
if data['guid'] == '[email protected]':
app_version = data['version_int']
hotfix_version = data['version']
if version_int('10') <= app_version <= version_int('16.0.1'):
if hotfix_version < '20121019.01':
sql.append("AND versions.version = '20121019.01' ")
elif hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
elif version_int('16.0.2') <= app_version <= version_int('24.*'):
if hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
sql.append('ORDER BY versions.id DESC LIMIT 1;')
self.cursor.execute(''.join(sql), data)
result = self.cursor.fetchone()
if result:
row = dict(zip([
'guid', 'type', 'disabled_by_user', 'appguid', 'min', 'max',
'file_id', 'file_status', 'hash', 'filename', 'version_id',
'datestatuschanged', 'strict_compat', 'releasenotes',
'version', 'premium_type'],
list(result)))
row['type'] = base.ADDON_SLUGS_UPDATE[row['type']]
row['url'] = get_mirror(data['addon_status'],
data['id'], row)
data['row'] = row
return True
return False
def get_bad_rdf(self):
return bad_rdf
def get_rdf(self):
if self.is_valid():
if self.get_update():
rdf = self.get_good_rdf()
else:
rdf = self.get_no_updates_rdf()
else:
rdf = self.get_bad_rdf()
self.cursor.close()
if self.conn:
self.conn.close()
return rdf
def get_no_updates_rdf(self):
name = base.ADDON_SLUGS_UPDATE[self.data['type']]
return no_updates_rdf % ({'guid': self.data['guid'], 'type': name})
def get_good_rdf(self):
data = self.data['row']
data['if_hash'] = ''
if data['hash']:
data['if_hash'] = ('<em:updateHash>%s</em:updateHash>' %
data['hash'])
data['if_update'] = ''
if data['releasenotes']:
data['if_update'] = ('<em:updateInfoURL>%s%s%s/%%APP_LOCALE%%/'
'</em:updateInfoURL>' %
(settings.SITE_URL, '/versions/updateInfo/',
data['version_id']))
return good_rdf % data
def format_date(self, secs):
return '%s GMT' % formatdate(time() + secs)[:25]
def get_headers(self, length):
return [('Content-Type', 'text/xml'),
('Cache-Control', 'public, max-age=3600'),
('Last-Modified', self.format_date(0)),
('Expires', self.format_date(3600)),
('Content-Length', str(length))]
def mail_exception(data):
if settings.EMAIL_BACKEND != 'django.core.mail.backends.smtp.EmailBackend':
return
msg = MIMEText('%s\n\n%s' % (
'\n'.join(traceback.format_exception(*sys.exc_info())), data))
msg['Subject'] = '[Update] ERROR at /services/update'
msg['To'] = ','.join([a[1] for a in settings.ADMINS])
msg['From'] = settings.DEFAULT_FROM_EMAIL
conn = smtplib.SMTP(getattr(settings, 'EMAIL_HOST', 'localhost'),
getattr(settings, 'EMAIL_PORT', '25'))
conn.sendmail(settings.DEFAULT_FROM_EMAIL, msg['To'], msg.as_string())
conn.close()
def log_exception(data):
(typ, value, traceback) = sys.exc_info()
error_log.error(u'Type: %s, %s. Query: %s' % (typ, value, data))
def application(environ, start_response):
status = '200 OK'
with statsd.timer('services.update'):
data = dict(parse_qsl(environ['QUERY_STRING']))
compat_mode = data.pop('compatMode', 'strict')
try:
update = Update(data, compat_mode)
output = update.get_rdf()
start_response(status, update.get_headers(len(output)))
except:
#mail_exception(data)
log_exception(data)
raise
return [output]
| bsd-3-clause | -7,104,861,223,561,318,000 | 36.473822 | 79 | 0.533147 | false | 4.066761 | false | false | false |
pLeBlanc93/ArcREST | src/arcrest/manageorg/_portals.py | 1 | 82353 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from ..security import PortalServerSecurityHandler
from ..manageags import AGSAdministration
from ..hostedservice import Services
from ..common.general import local_time_to_online
from .._abstract.abstract import BaseAGOLClass
import os
from ..packages.six.moves import urllib_parse as urlparse
from . import _parameters as parameters
import json
########################################################################
class Portals(BaseAGOLClass):
"""
A multitenant portal contains multiple portals, each one of which is
owned by and represents an organization. Each user in the multitenant
portal belongs to one of these organizational portals or to a default
portal that includes all users who do not belong to an organization.
The Portals Root resource is a root placeholder resource that covers
all the portals contained in the multitenant portal.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_culture = None
_region = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.lower().endswith("/portals"):
self._url = url
else:
self._url = "%s/portals" % url
self._securityHandler = securityHandler
self._proxy_port = proxy_port
self._proxy_url = proxy_url
#----------------------------------------------------------------------
@property
def root(self):
"""gets the classes url"""
return self._url
#----------------------------------------------------------------------
@property
def regions(self):
"""gets the regions value"""
url = "%s/regions" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def languages(self):
"""returns the site's languages"""
url = "%s/languages" % self.root
params = {'f': "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def info(self):
"""gets the sharing api information"""
url = "%s/info" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def portalSelf(self):
"""The portal to which the current user belongs. This is an
organizational portal if the user belongs to an organization or the
default portal if the user does not belong to one"""
url = "%s/self" % self.root
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
)
#----------------------------------------------------------------------
def portal(self, portalID=None):
"""returns a specific reference to a portal"""
if portalID is None:
portalID = self.portalSelf.id
url = "%s/%s" % (self.root, portalID)
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=True)
#----------------------------------------------------------------------
@property
def portalId(self):
"""gets the portal Id"""
return self.portalSelf.id
########################################################################
class Portal(BaseAGOLClass):
"""
Portal returns information on your organization and is accessible to
administrators. Publishers and information workers can view users and
resources of the organization.
"""
_bingKey = None
_authorizedCrossOriginDomains = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_canSharePublic = None
_defaultExtent = None
_supportsHostedServices = None
_homePageFeaturedContentCount = None
_supportsOAuth = None
_portalName = None
_databaseUsage = None
_culture = None
_helpBase = None
_galleryTemplatesGroupQuery = None
_commentsEnabled = None
_databaseQuota = None
_id = None
_canSearchPublic = None
_customBaseUrl = None
_allSSL = None
_httpPort = None
_featuredGroupsId = None
_defaultBasemap = None
_created = None
_access = None
_platform = None
_isPortal = None
_canSignInArcGIS = None
_disableSignup = None
_httpsPort = None
_units = None
_backgroundImage = None
_mfaEnabled = None
_featuredGroups = None
_thumbnail = None
_featuredItemsGroupQuery = None
_canSignInIDP = None
_useStandardizedQuery = None
_rotatorPanels = None
_description = None
_homePageFeaturedContent = None
_helperServices = None
_canProvisionDirectPurchase = None
_canListData = None
_user = None
_helpMap = None
_canListPreProvisionedItems = None
_colorSetsGroupQuery = None
_canListApps = None
_portalProperties = None
_isWindows = None
_name = None
_supportsSceneServices = None
_stylesGroupQuery = None
_samlEnabled = None
_symbolSetsGroupQuery = None
_portalLocalHttpPort = None
_storageQuota = None
_canShareBingPublic = None
_maxTokenExpirationMinutes = None
_layerTemplatesGroupQuery = None
_staticImagesUrl = None
_modified = None
_portalHostname = None
_showHomePageDescription = None
_availableCredits = None
_portalMode = None
_portalLocalHttpsPort = None
_hostedServerHostedFolder = None
_storageUsage = None
_templatesGroupQuery = None
_portalLocalHostname = None
_basemapGalleryGroupQuery = None
_mfaAdmins = None
_portalId = None
_subscriptionInfo = None
_urlKey = None
_metadataEditable = None
_portalThumbnail = None
_metadataFormats = None
_ipCntryCode = None
_livingAtlasGroupQuery = None
_region = None
_contacts = None
_appInfo = None
_creditAssignments = None
_updateUserProfileDisabled = None
_analysisLayersGroupQuery = None
_defaultUserCreditAssignment = None
_analysisLayersGroupQuery = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "json"
}
json_dict = self._get(url=self.root,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
setattr(self, k, v)
print( k, " - attribute not implemented in Portal class.")
#----------------------------------------------------------------------
def _findPortalId(self):
"""gets the portal id for a site if not known."""
if not self.root.lower().endswith("/self"):
url = self.root + "/self"
else:
url = self.root
params = {
"f" : "json"
}
res = self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'id' in res:
return res['id']
return None
@property
def analysisLayersGroupQuery(self):
if self._analysisLayersGroupQuery is None:
self.__init()
return self._analysisLayersGroupQuery
#----------------------------------------------------------------------
@property
def defaultUserCreditAssignment(self):
"""gets the property value for defaultUserCreditAssignment"""
if self._defaultUserCreditAssignment is None:
self.__init()
return self._defaultUserCreditAssignment
#----------------------------------------------------------------------
@property
def analysisLayersGroupQueryt(self):
"""gets the property value for analysisLayersGroupQuery"""
if self._analysisLayersGroupQuery is None:
self.__init()
return self._analysisLayersGroupQuery
#----------------------------------------------------------------------
@property
def updateUserProfileDisabled(self):
'''gets the property value for updateUserProfileDisabled'''
if self._updateUserProfileDisabled is None:
self.__init()
return self._updateUserProfileDisabled
#----------------------------------------------------------------------
@property
def bingKey(self):
'''gets the property value for bingKey'''
if self._bingKey is None:
self.__init()
return self._bingKey
#----------------------------------------------------------------------
@property
def subscriptionInfo(self):
'''gets the property value for subscriptionInfo'''
if self._subscriptionInfo is None:
self.__init()
return self._subscriptionInfo
#----------------------------------------------------------------------
@property
def authorizedCrossOriginDomains(self):
""" gets the authorizedCrossOriginDomains property """
if self._authorizedCrossOriginDomains is None:
self.__init()
return self._authorizedCrossOriginDomains
#----------------------------------------------------------------------
@property
def appInfo(self):
'''gets the property value for appInfo'''
if self._appInfo is None:
self.__init()
return self._appInfo
#----------------------------------------------------------------------
@property
def contacts(self):
'''gets the property value for contacts'''
if self._contacts is None:
self.__init()
return self._contacts
#----------------------------------------------------------------------
@property
def urlKey(self):
'''gets the property value for urlKey'''
if self._urlKey is None:
self.__init()
return self._urlKey
#----------------------------------------------------------------------
@property
def metadataEditable(self):
'''gets the property value for metadataEditable'''
if self._metadataEditable is None:
self.__init()
return self._metadataEditable
#----------------------------------------------------------------------
@property
def portalThumbnail(self):
'''gets the property value for portalThumbnail'''
if self._portalThumbnail is None:
self.__init()
return self._portalThumbnail
#----------------------------------------------------------------------
@property
def metadataFormats(self):
'''gets the property value for metadataFormats'''
if self._metadataFormats is None:
self.__init()
return self._metadataFormats
#----------------------------------------------------------------------
@property
def ipCntryCode(self):
'''gets the property value for ipCntryCode'''
if self._ipCntryCode is None:
self.__init()
return self._ipCntryCode
#----------------------------------------------------------------------
@property
def livingAtlasGroupQuery(self):
'''gets the property value for livingAtlasGroupQuery'''
if self._livingAtlasGroupQuery is None:
self.__init()
return self._livingAtlasGroupQuery
#----------------------------------------------------------------------
@property
def region(self):
'''gets the property value for region'''
if self._region is None:
self.__init()
return self._region
#----------------------------------------------------------------------
@property
def portalId(self):
"""gets the portal Id"""
if self._portalId is None:
self._portalId = self._findPortalId()
return self._portalId
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._url
#----------------------------------------------------------------------
@property
def canSharePublic(self):
'''gets the property value for canSharePublic'''
if self._canSharePublic is None:
self.__init()
return self._canSharePublic
#----------------------------------------------------------------------
@property
def defaultExtent(self):
'''gets the property value for defaultExtent'''
if self._defaultExtent is None:
self.__init()
return self._defaultExtent
#----------------------------------------------------------------------
@property
def supportsHostedServices(self):
'''gets the property value for supportsHostedServices'''
if self._supportsHostedServices is None:
self.__init()
return self._supportsHostedServices
#----------------------------------------------------------------------
@property
def homePageFeaturedContentCount(self):
'''gets the property value for homePageFeaturedContentCount'''
if self._homePageFeaturedContentCount is None:
self.__init()
return self._homePageFeaturedContentCount
#----------------------------------------------------------------------
@property
def supportsOAuth(self):
'''gets the property value for supportsOAuth'''
if self._supportsOAuth is None:
self.__init()
return self._supportsOAuth
#----------------------------------------------------------------------
@property
def portalName(self):
'''gets the property value for portalName'''
if self._portalName is None:
self.__init()
return self._portalName
#----------------------------------------------------------------------
@property
def databaseUsage(self):
'''gets the property value for databaseUsage'''
if self._databaseUsage is None:
self.__init()
return self._databaseUsage
#----------------------------------------------------------------------
@property
def culture(self):
'''gets the property value for culture'''
if self._culture is None:
self.__init()
return self._culture
#----------------------------------------------------------------------
@property
def helpBase(self):
'''gets the property value for helpBase'''
if self._helpBase is None:
self.__init()
return self._helpBase
#----------------------------------------------------------------------
@property
def galleryTemplatesGroupQuery(self):
'''gets the property value for galleryTemplatesGroupQuery'''
if self._galleryTemplatesGroupQuery is None:
self.__init()
return self._galleryTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def commentsEnabled(self):
'''gets the property value for commentsEnabled'''
if self._commentsEnabled is None:
self.__init()
return self._commentsEnabled
#----------------------------------------------------------------------
@property
def databaseQuota(self):
'''gets the property value for databaseQuota'''
if self._databaseQuota is None:
self.__init()
return self._databaseQuota
#----------------------------------------------------------------------
@property
def id(self):
'''gets the property value for id'''
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def canSearchPublic(self):
'''gets the property value for canSearchPublic'''
if self._canSearchPublic is None:
self.__init()
return self._canSearchPublic
#----------------------------------------------------------------------
@property
def customBaseUrl(self):
'''gets the property value for customBaseUrl'''
if self._customBaseUrl is None:
self.__init()
return self._customBaseUrl
#----------------------------------------------------------------------
@property
def allSSL(self):
'''gets the property value for allSSL'''
if self._allSSL is None:
self.__init()
return self._allSSL
#----------------------------------------------------------------------
@property
def httpPort(self):
'''gets the property value for httpPort'''
if self._httpPort is None:
self.__init()
return self._httpPort
#----------------------------------------------------------------------
@property
def featuredGroupsId(self):
'''gets the property value for featuredGroupsId'''
if self._featuredGroupsId is None:
self.__init()
return self._featuredGroupsId
#----------------------------------------------------------------------
@property
def defaultBasemap(self):
'''gets the property value for defaultBasemap'''
if self._defaultBasemap is None:
self.__init()
return self._defaultBasemap
#----------------------------------------------------------------------
@property
def created(self):
'''gets the property value for created'''
if self._created is None:
self.__init()
return self._created
#----------------------------------------------------------------------
@property
def access(self):
'''gets the property value for access'''
if self._access is None:
self.__init()
return self._access
#----------------------------------------------------------------------
@property
def platform(self):
'''gets the property value for platform'''
if self._platform is None:
self.__init()
return self._platform
#----------------------------------------------------------------------
@property
def isPortal(self):
'''gets the property value for isPortal'''
if self._isPortal is None:
self.__init()
return self._isPortal
#----------------------------------------------------------------------
@property
def canSignInArcGIS(self):
'''gets the property value for canSignInArcGIS'''
if self._canSignInArcGIS is None:
self.__init()
return self._canSignInArcGIS
#----------------------------------------------------------------------
@property
def disableSignup(self):
'''gets the property value for disableSignup'''
if self._disableSignup is None:
self.__init()
return self._disableSignup
#----------------------------------------------------------------------
@property
def httpsPort(self):
'''gets the property value for httpsPort'''
if self._httpsPort is None:
self.__init()
return self._httpsPort
#----------------------------------------------------------------------
@property
def units(self):
'''gets the property value for units'''
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def backgroundImage(self):
'''gets the property value for backgroundImage'''
if self._backgroundImage is None:
self.__init()
return self._backgroundImage
#----------------------------------------------------------------------
@property
def mfaEnabled(self):
'''gets the property value for mfaEnabled'''
if self._mfaEnabled is None:
self.__init()
return self._mfaEnabled
#----------------------------------------------------------------------
@property
def featuredGroups(self):
'''gets the property value for featuredGroups'''
if self._featuredGroups is None:
self.__init()
return self._featuredGroups
#----------------------------------------------------------------------
@property
def thumbnail(self):
'''gets the property value for thumbnail'''
if self._thumbnail is None:
self.__init()
return self._thumbnail
#----------------------------------------------------------------------
@property
def featuredItemsGroupQuery(self):
'''gets the property value for featuredItemsGroupQuery'''
if self._featuredItemsGroupQuery is None:
self.__init()
return self._featuredItemsGroupQuery
#----------------------------------------------------------------------
@property
def canSignInIDP(self):
'''gets the property value for canSignInIDP'''
if self._canSignInIDP is None:
self.__init()
return self._canSignInIDP
#----------------------------------------------------------------------
@property
def useStandardizedQuery(self):
'''gets the property value for useStandardizedQuery'''
if self._useStandardizedQuery is None:
self.__init()
return self._useStandardizedQuery
#----------------------------------------------------------------------
@property
def rotatorPanels(self):
'''gets the property value for rotatorPanels'''
if self._rotatorPanels is None:
self.__init()
return self._rotatorPanels
#----------------------------------------------------------------------
@property
def description(self):
'''gets the property value for description'''
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def homePageFeaturedContent(self):
'''gets the property value for homePageFeaturedContent'''
if self._homePageFeaturedContent is None:
self.__init()
return self._homePageFeaturedContent
#----------------------------------------------------------------------
@property
def helperServices(self):
'''gets the property value for helperServices'''
if self._helperServices is None:
self.__init()
return self._helperServices
#----------------------------------------------------------------------
@property
def canProvisionDirectPurchase(self):
'''gets the property value for canProvisionDirectPurchase'''
if self._canProvisionDirectPurchase is None:
self.__init()
return self._canProvisionDirectPurchase
#----------------------------------------------------------------------
@property
def canListData(self):
'''gets the property value for canListData'''
if self._canListData is None:
self.__init()
return self._canListData
#----------------------------------------------------------------------
@property
def user(self):
'''gets the property value for user'''
if self._user is None:
self.__init()
return self._user
#----------------------------------------------------------------------
@property
def helpMap(self):
'''gets the property value for helpMap'''
if self._helpMap is None:
self.__init()
return self._helpMap
#----------------------------------------------------------------------
@property
def canListPreProvisionedItems(self):
'''gets the property value for canListPreProvisionedItems'''
if self._canListPreProvisionedItems is None:
self.__init()
return self._canListPreProvisionedItems
#----------------------------------------------------------------------
@property
def colorSetsGroupQuery(self):
'''gets the property value for colorSetsGroupQuery'''
if self._colorSetsGroupQuery is None:
self.__init()
return self._colorSetsGroupQuery
#----------------------------------------------------------------------
@property
def canListApps(self):
'''gets the property value for canListApps'''
if self._canListApps is None:
self.__init()
return self._canListApps
#----------------------------------------------------------------------
@property
def portalProperties(self):
'''gets the property value for portalProperties'''
if self._portalProperties is None:
self.__init()
return self._portalProperties
#----------------------------------------------------------------------
@property
def isWindows(self):
'''gets the property value for isWindows'''
if self._isWindows is None:
self.__init()
return self._isWindows
#----------------------------------------------------------------------
@property
def name(self):
'''gets the property value for name'''
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def supportsSceneServices(self):
'''gets the property value for supportsSceneServices'''
if self._supportsSceneServices is None:
self.__init()
return self._supportsSceneServices
#----------------------------------------------------------------------
@property
def stylesGroupQuery(self):
'''gets the property value for stylesGroupQuery'''
if self._stylesGroupQuery is None:
self.__init()
return self._stylesGroupQuery
#----------------------------------------------------------------------
@property
def samlEnabled(self):
'''gets the property value for samlEnabled'''
if self._samlEnabled is None:
self.__init()
return self._samlEnabled
#----------------------------------------------------------------------
@property
def symbolSetsGroupQuery(self):
'''gets the property value for symbolSetsGroupQuery'''
if self._symbolSetsGroupQuery is None:
self.__init()
return self._symbolSetsGroupQuery
#----------------------------------------------------------------------
@property
def portalLocalHttpPort(self):
'''gets the property value for portalLocalHttpPort'''
if self._portalLocalHttpPort is None:
self.__init()
return self._portalLocalHttpPort
#----------------------------------------------------------------------
@property
def storageQuota(self):
'''gets the property value for storageQuota'''
if self._storageQuota is None:
self.__init()
return self._storageQuota
#----------------------------------------------------------------------
@property
def canShareBingPublic(self):
'''gets the property value for canShareBingPublic'''
if self._canShareBingPublic is None:
self.__init()
return self._canShareBingPublic
#----------------------------------------------------------------------
@property
def maxTokenExpirationMinutes(self):
'''gets the property value for maxTokenExpirationMinutes'''
if self._maxTokenExpirationMinutes is None:
self.__init()
return self._maxTokenExpirationMinutes
#----------------------------------------------------------------------
@property
def layerTemplatesGroupQuery(self):
'''gets the property value for layerTemplatesGroupQuery'''
if self._layerTemplatesGroupQuery is None:
self.__init()
return self._layerTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def staticImagesUrl(self):
'''gets the property value for staticImagesUrl'''
if self._staticImagesUrl is None:
self.__init()
return self._staticImagesUrl
#----------------------------------------------------------------------
@property
def modified(self):
'''gets the property value for modified'''
if self._modified is None:
self.__init()
return self._modified
#----------------------------------------------------------------------
@property
def portalHostname(self):
'''gets the property value for portalHostname'''
if self._portalHostname is None:
self.__init()
return self._portalHostname
#----------------------------------------------------------------------
@property
def showHomePageDescription(self):
'''gets the property value for showHomePageDescription'''
if self._showHomePageDescription is None:
self.__init()
return self._showHomePageDescription
#----------------------------------------------------------------------
@property
def availableCredits(self):
'''gets the property value for availableCredits'''
if self._availableCredits is None:
self.__init()
return self._availableCredits
#----------------------------------------------------------------------
@property
def portalMode(self):
'''gets the property value for portalMode'''
if self._portalMode is None:
self.__init()
return self._portalMode
#----------------------------------------------------------------------
@property
def portalLocalHttpsPort(self):
'''gets the property value for portalLocalHttpsPort'''
if self._portalLocalHttpsPort is None:
self.__init()
return self._portalLocalHttpsPort
#----------------------------------------------------------------------
@property
def hostedServerHostedFolder(self):
'''gets the property value for hostedServerHostedFolder'''
if self._hostedServerHostedFolder is None:
self.__init()
return self._hostedServerHostedFolder
#----------------------------------------------------------------------
@property
def storageUsage(self):
'''gets the property value for storageUsage'''
if self._storageUsage is None:
self.__init()
return self._storageUsage
#----------------------------------------------------------------------
@property
def templatesGroupQuery(self):
'''gets the property value for templatesGroupQuery'''
if self._templatesGroupQuery is None:
self.__init()
return self._templatesGroupQuery
#----------------------------------------------------------------------
@property
def portalLocalHostname(self):
'''gets the property value for portalLocalHostname'''
if self._portalLocalHostname is None:
self.__init()
return self._portalLocalHostname
#----------------------------------------------------------------------
@property
def basemapGalleryGroupQuery(self):
'''gets the property value for basemapGalleryGroupQuery'''
if self._basemapGalleryGroupQuery is None:
self.__init()
return self._basemapGalleryGroupQuery
#----------------------------------------------------------------------
@property
def mfaAdmins(self):
'''gets the property value for mfaAdmins'''
if self._mfaAdmins is None:
self.__init()
return self._mfaAdmins
#----------------------------------------------------------------------
@property
def creditAssignments(self):
'''gets the property value for creditAssignments'''
if self._creditAssignments is None:
self.__init()
return self._creditAssignments
#----------------------------------------------------------------------
@property
def urls(self):
"""gets the urls for a portal"""
url = "%s/urls" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def featureServers(self):
"""gets the hosting feature AGS Server"""
services = []
if self.urls == {}:
return {}
urls = self.urls
if 'https' in urls['urls']['features']:
res = urls['urls']['features']['https']
else:
res = urls['urls']['features']['http']
for https in res:
if self.isPortal:
url = "%s/admin" % https
services.append(AGSAdministration(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
else:
url = "https://%s/%s/ArcGIS/admin" % (https, self.portalId)
services.append(Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
return services
#----------------------------------------------------------------------
@property
def tileServers(self):
"""
Returns the objects to manage site's tile hosted services/servers. It returns
AGSAdministration object if the site is Portal and it returns a
hostedservice.Services object if it is AGOL.
"""
services = []
ishttps = False
if self.urls == {}:
return {}
urls = self.urls["urls"]['tiles']
if 'https' in urls:
res = urls['https']
ishttps = True
else:
res = urls['http']
for https in res:
if ishttps:
scheme = "https"
else:
scheme = "http"
if self.isPortal == False:
url = "%s://%s/tiles/%s/arcgis/admin/services" % (scheme, https, self.portalId)
services.append(Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
else:
url = "%s/admin" % https
servers = self.servers
for server in servers.servers:
url = server.adminUrl
sh = PortalServerSecurityHandler(tokenHandler=self._securityHandler,
serverUrl=url,
referer=server.name.split(":")[0]
)
services.append(
AGSAdministration(url=url,
securityHandler=sh,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
)
return services
#----------------------------------------------------------------------
@property
def purchases(self):
"""gets the portal's purchases"""
url = "%s/purchases" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def customers(self):
"""gets the site's customers"""
url = "%s/customers" % self.root
params = {"f":"json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def exportCustomers(self, outPath):
"""exports customer list to a csv file
Input:
outPath - save location of the customer list
"""
url = "%s/customers/export" % self.root
params = {"f":"csv"}
dirPath = None
fileName = None
if outPath is not None:
dirPath = os.path.dirname(outPath)
fileName = os.path.basename(outPath)
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler, proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
out_folder=dirPath,
file_name=fileName)
#----------------------------------------------------------------------
def update(self,
updatePortalParameters,
clearEmptyFields=False):
"""
The Update operation allows administrators only to update the
organization information such as name, description, thumbnail, and
featured groups.
Inputs:
updatePortalParamters - parameter.PortalParameters object that holds information to update
clearEmptyFields - boolean that clears all whitespace from fields
"""
url = self.root + "/update"
params = {
"f" : "json",
"clearEmptyFields" : clearEmptyFields
}
if isinstance(updatePortalParameters, parameters.PortalParameters):
params.update(updatePortalParameters.value)
elif isinstance(updatePortalParameters, dict):
for k,v in updatePortalParameters.items():
params[k] = v
else:
raise AttributeError("updatePortalParameters must be of type parameter.PortalParameters")
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateUserRole(self,
user,
role):
"""
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
"""
url = self._url + "/updateuserrole"
params = {
"f" : "json",
"user" : user,
"role" : role
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeUser(self, users):
"""
The Remove Users operation allows the administrator to remove users
from a portal. Before the administrator can remove the user, all of
the user's content and groups must be reassigned or deleted.
Inputs:
users - Comma-separated list of usernames to remove.
"""
url = self._url + "/removeusers"
params = {
"f" : "json",
"users" : users
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def isServiceNameAvailable(self,
name,
serviceType):
"""
Checks to see if a given service name and type are available for
publishing a new service. true indicates that the name and type is
not found in the organization's services and is available for
publishing. false means the requested name and type are not available.
Inputs:
name - requested name of service
serviceType - type of service allowed values: Feature Service or
Map Service
"""
_allowedTypes = ['Feature Service', "Map Service"]
url = self._url + "/isServiceNameAvailable"
params = {
"f" : "json",
"name" : name,
"type" : serviceType
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def servers(self):
"""gets the federated or registered servers for Portal"""
url = "%s/servers" % self.root
return Servers(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def assignUserCredits(self, usernames, credits):
"""
assigns credit to a user.
Inputs:
usernames - list of users
credits - number of credits to assign to the users
Ouput:
dictionary
"""
userAssignments = []
for name in usernames:
userAssignments.append(
{
"username" : name,
"credits" : credits
}
)
params = {
"userAssignments" : userAssignments,
"f" : "json"
}
url = self.root + "/assignUserCredits"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def users(self,
start=1,
num=10,
sortField="fullName",
sortOrder="asc",
role=None):
"""
Lists all the members of the organization. The start and num paging
parameters are supported.
Inputs:
start - The number of the first entry in the result set response.
The index number is 1-based.
The default value of start is 1 (that is, the first
search result).
The start parameter, along with the num parameter, can
be used to paginate the search results.
num - The maximum number of results to be included in the result
set response.
The default value is 10, and the maximum allowed value is
100.The start parameter, along with the num parameter, can
be used to paginate the search results.
sortField - field to sort on
sortOrder - asc or desc on the sortField
role - name of the role or role id to search
Output:
list of User classes
"""
users = []
url = self._url + "/users"
params = {
"f" : "json",
"start" : start,
"num" : num
}
if not role is None:
params['role'] = role
if not sortField is None:
params['sortField'] = sortField
if not sortOrder is None:
params['sortOrder'] = sortOrder
from ._community import Community
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if "users" in res:
if len(res['users']) > 0:
parsed = urlparse.urlparse(self._url)
if parsed.netloc.lower().find('arcgis.com') == -1:
cURL = "%s://%s/%s/sharing/rest/community" % (parsed.scheme,
parsed.netloc,
parsed.path[1:].split('/')[0])
else:
cURL = "%s://%s/sharing/rest/community" % (parsed.scheme,
parsed.netloc)
com = Community(url=cURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
for r in res['users']:
users.append(
com.users.user(r["username"])
)
res['users'] = users
return res
#----------------------------------------------------------------------
def createRole(self, name, description):
"""
creates a role for a portal/agol site.
Inputs:
names - name of the role
description - brief text string stating the nature of this
role.
Ouput:
dictionary
"""
params = {
"name" : name,
"description" : description,
"f" : "json"
}
url = self.root + "/createRole"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def roles(self):
"""gets the roles class that allows admins to manage custom roles
on portal"""
return Roles(url="%s/roles" % self.root,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def cost(self,
tileStorage=0,
fileStorage=0,
featureStorage=0,
generatedTileCount=0,
loadedTileCount=0,
enrichVariableCount=0,
enrichReportCount=0,
serviceAreaCount=0,
geocodeCount=0):
"""
returns the cost values for a given portal
Inputs:
tileStorage - int - numbe of tiles to store in MBs
fileStorage - int - size of file to store in MBs
featureStorage - int - size in MBs
generateTileCount - int - number of tiles to genearte on site
loadedTileCount -int- cost to host a certian number of tiles
enrichVariableCount - int - cost to enrich data
enrichReportCount - int - cost to generate an enrichment report
serviceAreaCount - int - cost to generate x number of service
areas
geocodeCount - int - cost to generate x number of addresses
"""
params = {
"f" : "json",
"tileStorage": tileStorage,
"fileStorage": fileStorage,
"featureStorage": featureStorage,
"generatedTileCount": generatedTileCount,
"loadedTileCount":loadedTileCount,
"enrichVariableCount": enrichVariableCount,
"enrichReportCount" : enrichReportCount,
"serviceAreaCount" : serviceAreaCount,
"geocodeCount" : geocodeCount
}
url = self._url + "/cost"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def resources(self,
start=1,
num=10):
"""
Resources lists all file resources for the organization. The start
and num paging parameters are supported.
Inputs:
start - the number of the first entry in the result set response
The index number is 1-based and the default is 1
num - the maximum number of results to be returned as a whole #
"""
url = self._url + "/resources"
params = {
"f" : "json",
"start" : start,
"num" : num
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def addResource(self, key, filePath, text):
"""
The add resource operation allows the administrator to add a file
resource, for example, the organization's logo or custom banner.
The resource can be used by any member of the organization. File
resources use storage space from your quota and are scanned for
viruses.
Inputs:
key - The name the resource should be stored under.
filePath - path of file to upload
text - Some text to be written (for example, JSON or JavaScript)
directly to the resource from a web client.
"""
url = self.root + "/addresource"
params = {
"f": "json",
"token" : self._securityHandler.token,
"key" : key,
"text" : text
}
files = {}
files['file'] = filePath
res = self._post(url=url,
param_dict=params,
files=files,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res
#----------------------------------------------------------------------
def removeResource(self, key):
"""
The Remove Resource operation allows the administrator to remove a
file resource.
Input:
key - name of resource to delete
"""
url = self._url + "/removeresource"
params = {
"key" : key,
"f" : "json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def securityPolicy(self):
"""gets the object to manage the portal's security policy"""
url = "%s/securityPolicy" % self.root
params = {'f': 'json'}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def resetSecurityPolicy(self):
"""resets the security policy to default install"""
params = {"f" : "json"}
url = "%s/securityPolicy/reset" % self.root
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateSecurityPolicy(self,
minLength=8,
minUpper=None,
minLower=None,
minLetter=None,
minDigit=None,
minOther=None,
expirationInDays=None,
historySize=None):
"""updates the Portals security policy"""
params = {
"f" : "json",
"minLength" : minLength,
"minUpper": minUpper,
"minLower": minLower,
"minLetter": minLetter,
"minDigit": minDigit,
"minOther": minOther,
"expirationInDays" : expirationInDays,
"historySize": historySize
}
url = "%s/securityPolicy/update" % self.root
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def portalAdmin(self):
"""gets a reference to a portal administration class"""
from ..manageportal import PortalAdministration
return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False)
#----------------------------------------------------------------------
def addUser(self, invitationList,
subject, html):
"""
adds a user without sending an invitation email
Inputs:
invitationList - InvitationList class used to add users without
sending an email
subject - email subject
html - email message sent to users in invitation list object
"""
url = self._url + "/invite"
params = {"f" : "json"}
if isinstance(invitationList, parameters.InvitationList):
params['invitationList'] = invitationList.value()
params['html'] = html
params['subject'] = subject
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def inviteByEmail(self,
emails,
subject,
text,
html,
role="org_user",
mustApprove=True,
expiration=1440):
"""Invites a user or users to a site.
Inputs:
emails - comma seperated list of emails
subject - title of email
text - email text
html - email text in html
role - site role (can't be administrator)
mustApprove - verifies if user that is join must be approved by
an administrator
expiration - time in seconds. Default is 1 day 1440
"""
url = self.root + "/inviteByEmail"
params = {
"f" : "json",
"emails": emails,
"subject": subject,
"text": text,
"html" : html,
"role" : role,
"mustApprove": mustApprove,
"expiration" : expiration
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def invitations(self):
"""gets all the invitations to the current portal"""
params = {"f": "json"}
url = "%s/invitations" % self.root
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def usage(self, startTime, endTime, vars=None, period=None,
groupby=None, name=None, stype=None, etype=None,
appId=None, deviceId=None, username=None, appOrgId=None,
userOrgId=None, hostOrgId=None):
"""
returns the usage statistics value
"""
url = self.root + "/usage"
startTime = str(int(local_time_to_online(dt=startTime)))
endTime = str(int(local_time_to_online(dt=endTime)))
params = {
'f' : 'json',
'startTime' : startTime,
'endTime' : endTime,
'vars' : vars,
'period' : period,
'groupby' : groupby,
'name' : name,
'stype' : stype,
'etype' : etype,
'appId' : appId,
'deviceId' : deviceId,
'username' : username,
'appOrgId' : appOrgId,
'userOrgId' : userOrgId,
'hostOrgId' : hostOrgId,
}
params = {key:item for key,item in params.items() if item is not None}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def IDP(self):
"""gets the IDP information for the portal/agol"""
url = "%s/idp" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class Servers(BaseAGOLClass):
"""This resource lists the ArcGIS Server sites that have been federated
with the portal.This resource is not applicable to ArcGIS Online; it is
only applicable to Portal for ArcGIS.
"""
_servers = None
_surl = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
########################################################################
class Server(BaseAGOLClass):
_surl = None
_url = None
_id = None
_name = None
_adminUrl = None
_url = None
_isHosted = None
_serverKey = None
_serverType = None
_surl = None
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
"""represents a single server instance registers with portal"""
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
self._surl = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "pjson"
}
json_dict = self._get(url=self._surl,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in Servers.Server class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._url
#----------------------------------------------------------------------
@property
def id(self):
"""gets the server id"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
"""gets the server name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def adminUrl(self):
"""gets the adminURL for the server"""
if self._adminUrl is None:
self.__init()
return self._adminUrl
#----------------------------------------------------------------------
@property
def url(self):
"""gets the url for the server"""
if self._url is None:
self.__init()
return self._url
#----------------------------------------------------------------------
@property
def isHosted(self):
"""gets the isHosted value"""
if self._isHosted is None:
self.__init()
return self._isHosted
#----------------------------------------------------------------------
@property
def serverKey(self):
"""gets the server key"""
if self._serverKey is None:
self.__init()
return self._serverKey
#----------------------------------------------------------------------
@property
def serverType(self):
"""gets the server type"""
if self._serverType is None:
self.__init()
return self._serverType
#----------------------------------------------------------------------
def unregister(self):
"""
This operation unregisters an ArcGIS Server site from the portal.
The server is no longer federated with the portal after this
operation completes.
After this operation completes, you must invoke the Update Security
Configuration operation on your ArcGIS Server site to specify how
you want the server to work with users and roles.
Inputs:
serverId - unique identifier of the server
"""
url = self._url + "/unregister"
params = {
"f" : "json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def update(self,
name,
url,
adminUrl,
isHosted,
serverType):
"""
This operation updates the properties of an ArcGIS Server site that
has been registered, or federated, with the portal. For example,
you can use this operation to change the federated site that acts
as the portal's hosting server.
Inputs:
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of the ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or will not be allowed to host services
(false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self._url + "/update"
params = {
"name" : name,
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"serverType" : serverType
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
if url.lower().endswith('/servers') == False:
url = url + "/servers"
self._surl = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the property data into the class"""
params = {
"f" : "json"
}
json_dict = self._get(url=self._surl,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in Servers class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns class as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""iterates through raw JSON"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def root(self):
"""returns classes URL"""
return self._surl
#----------------------------------------------------------------------
def register(self,
name,
url,
adminUrl,
isHosted,
serverType):
"""
You can optionally register (or "federate") an ArcGIS Server site
with your Portal for ArcGIS deployment. This provides the
following benefits:
The server and the portal share the same user store (that of
the portal). This results in a convenient single sign-on
experience.
Any items you publish to the server are automatically shared
on the portal.
You can optionally allow the server to host tiled map services
and feature services published by portal users.
After you register a server with your portal, you must invoke the
Update Security Configuration operation on the ArcGIS Server site
and configure the site's security store to take advantage of users
and roles from the portal.
This operation is only applicable to Portal for ArcGIS; it is not
supported with ArcGIS Online.
Inputs:
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of your ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or not be allowed to host services (false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self.root + "/register"
params = {
"f" : "json",
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"name" : name,
"serverType" : serverType
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def servers(self):
"""gets all the server resources"""
self.__init()
items = []
for k,v in self._json_dict.items():
if k == "servers":
for s in v:
if 'id' in s:
url = "%s/%s" % (self.root, s['id'])
items.append(
self.Server(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
del k,v
return items
########################################################################
class Roles(BaseAGOLClass):
"""Handles the searching, creation, deletion and updating of roles on
AGOL or Portal.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.find('/roles') < 0:
url = url + "/roles"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
#----------------------------------------------------------------------
def __str__(self):
"""returns the roles as a string"""
nextCount = 0
start = 0
num = 100
results = []
while nextCount != -1:
res = self.roles(start=start + nextCount, num=num)
results = results + res['roles']
nextCount = int(res['nextStart'])
return json.dumps(results)
#----------------------------------------------------------------------
def __iter__(self):
"""iterator to loop through role entries"""
nextCount = 0
start = 0
num = 100
results = []
while nextCount != -1:
res = self.roles(start=start + nextCount, num=num)
for r in res['roles']:
yield r
nextCount = int(res['nextStart'])
#----------------------------------------------------------------------
def roles(self, start, num):
"""
lists the custom roles on the AGOL/Portal site
Input:
start - default 1
num - 100 - number of roles to return
"""
url = self._url
params = {
"f" : "json",
"start" : start,
"num" : num
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def deleteRole(self, roleID):
"""
deletes a role by ID
"""
url = self._url + "/%s/delete" % roleID
params = {
"f" : "json"
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateRole(self, roleID, name, description):
"""allows for the role name or description to be modified"""
params = {
"name" : name,
"description" : description,
"f" : "json"
}
url = self._url + "/%s/update"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def info(self, roleID):
""""""
url = self._url + "/%s" % roleID
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def findRoleID(self, name):
"""searches the roles by name and returns the role's ID"""
for r in self:
if r['name'].lower() == name.lower():
return r['id']
del r
return None
#----------------------------------------------------------------------
def privileges(self, roleID):
"""returns the assigned priveleges for a given custom role"""
url = self._url + "/%s/privileges" % roleID
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def setPrivileges(self, roleID, privileges):
"""
assigns a role a set of actions that the role can perform on the
AGOL or Portal site.
Input:
roleID - unique id of the role
privileges - list of privileges to assign to role.
"""
params = {
"f" : "json",
"privileges" : {"privileges": privileges},
"id": roleID
}
url = self._url + "/%s/setPrivileges" % roleID
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
| apache-2.0 | 4,190,805,069,658,481,000 | 37.992898 | 101 | 0.445108 | false | 5.53113 | false | false | false |
avanc/mopidy-usbplaylist | mopidy_usbplaylist/playlists.py | 1 | 1414 | from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from mopidy import backend
from mopidy.models import Playlist
from mopidy.models import Track
import os
import fnmatch
import glob
def find_files(path):
matches = glob.glob(os.path.join(path,'*.mp3'))
return matches
def find_files2(path):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.mp3'):
matches.append(os.path.join(root, filename))
return matches
class USBPlaylistProvider(backend.PlaylistsProvider):
def create(self, name):
pass
def delete(self, uri):
pass
def lookup(self, uri):
path=self.backend.config['usbplaylist']['path']
for playlist in self.playlists:
if playlist.uri == uri:
files=find_files2(path)
tracks =[]
for file in files:
tracks.append(Track(uri='file:'+file, name="USB-File"))
return playlist.copy(tracks=tracks)
def refresh(self):
playlists=[]
uri="usb://playall"
playlist = Playlist(uri=uri, name="USB")
playlists.append(playlist)
self.playlists = playlists
backend.BackendListener.send('playlists_loaded')
def save(self, playlist):
pass
| apache-2.0 | -1,820,384,431,323,083,800 | 24.25 | 75 | 0.603253 | false | 4.063218 | false | false | false |
TetraAsh/baruwa2 | baruwa/forms/accounts.py | 1 | 6737 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""accounts forms"""
from wtforms import PasswordField, validators, DecimalField, RadioField
from wtforms import BooleanField, TextField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from pylons.i18n.translation import lazy_ugettext as _
from sqlalchemy.orm.exc import NoResultFound
from baruwa.forms import Form
from baruwa.model.accounts import User
from baruwa.model.domains import Domain
from baruwa.model.meta import Session
from baruwa.forms.organizations import check_pw_strength
from baruwa.forms import TIMEZONE_TUPLES, REQ_MSG, EMAIL_MSG
from baruwa.forms.messages import MultiCheckboxField
ACCOUNT_TYPES = (
('3', _('User')),
('2', _('Domain admin')),
('1', _('Administrator')),
)
def check_password(form, field):
"check password strength"
check_pw_strength(field.data)
def check_domain(form, field):
"check domain"
domain = field.data.split('@')[1]
try:
Session.query(Domain).filter(Domain.name == domain).one()
except NoResultFound:
raise validators.ValidationError(
_(u'The domain: %(dom)s is not local')
% dict(dom=domain)
)
def check_account(form, field):
"check account"
if field.data == 3 and not form.domains.data:
raise validators.ValidationError(
_(u'Please select atleast one domain')
)
def can_reset(form, field):
"check account is legible to reset"
try:
user = Session.query(User)\
.filter(User.email == field.data)\
.one()
if user.account_type != 3:
raise validators.ValidationError(
_("Admin accounts cannot be reset via the web"))
except NoResultFound:
raise validators.ValidationError(_("Account not found"))
class AddUserForm(Form):
"""Add user"""
username = TextField(_('Username'),
[validators.Required(message=REQ_MSG),
validators.Length(min=4, max=254)])
firstname = TextField(_('First name'),
[validators.Length(max=254)])
lastname = TextField(_('Last name'),
[validators.Length(max=254)])
password1 = PasswordField(_('New Password'), [check_password,
validators.Required(message=REQ_MSG),
validators.EqualTo('password2',
message=_('Passwords must match'))])
password2 = PasswordField(_('Retype Password'),
[validators.Required(message=REQ_MSG)])
email = TextField(_('Email address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG)])
timezone = SelectField(_('Timezone'), choices=TIMEZONE_TUPLES)
account_type = SelectField(_('Account type'),
choices=list(ACCOUNT_TYPES))
domains = QuerySelectMultipleField(_('Domains'),
get_label='name',
allow_blank=True)
active = BooleanField(_('Enabled'))
send_report = BooleanField(_('Send reports'))
spam_checks = BooleanField(_('Enable spam checks'), default=True)
low_score = DecimalField(_('Probable spam score'), places=1, default=0)
high_score = DecimalField(_('Definite spam score'), places=1, default=0)
def validate_domains(form, field):
if int(form.account_type.data) == 3 and not field.data:
raise validators.ValidationError(
_(u'Please select atleast one domain'))
class EditUserForm(Form):
"""Edit user"""
username = TextField(_('Username'), [validators.Required(message=REQ_MSG),
validators.Length(min=4, max=254)])
firstname = TextField(_('First name'), [validators.Length(max=254)])
lastname = TextField(_('Last name'), [validators.Length(max=254)])
email = TextField(_('Email address'),
[validators.Required(message=REQ_MSG)])
timezone = SelectField(_('Timezone'), choices=TIMEZONE_TUPLES)
domains = QuerySelectMultipleField(_('Domains'), get_label='name',
allow_blank=False)
active = BooleanField(_('Enabled'))
send_report = BooleanField(_('Send reports'))
spam_checks = BooleanField(_('Enable spam checks'))
low_score = DecimalField(_('Spam low score'), places=1)
high_score = DecimalField(_('Spam high score'), places=1)
class BulkDelUsers(Form):
"""Bulk account delete form"""
accountid = MultiCheckboxField('')
whatdo = RadioField('', choices=[('delete', _('delete'),),
('disable', _('disable'),),
('enable', _('enable'),),])
class AddressForm(Form):
"""Add alias address"""
address = TextField(_('Email Address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG), check_domain])
enabled = BooleanField(_('Enabled'))
class ChangePasswordForm(Form):
"""Admin change user password"""
password1 = PasswordField(_('New Password'),
[check_password, validators.Required(message=REQ_MSG),
validators.EqualTo('password2',
message=_('Passwords must match'))])
password2 = PasswordField(_('Retype Password'),
[validators.Required(message=REQ_MSG)])
class UserPasswordForm(ChangePasswordForm):
"""User password change"""
password3 = PasswordField(_('Old Password'),
[validators.Required(message=REQ_MSG)])
class ResetPwForm(Form):
"""User reset password form"""
email = TextField(_('Email Address'),
[validators.Required(message=REQ_MSG),
validators.Email(message=EMAIL_MSG),
can_reset])
| gpl-3.0 | 4,717,546,960,922,763,000 | 38.629412 | 78 | 0.614962 | false | 4.332476 | false | false | false |
Schille/weimar-graphstore | weimar.py | 1 | 1485 | '''
Created on Mar 17, 2014
@author: mschilonka
'''
import argparse, sys
from remote import server as Server
from remote import worker as Worker
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--worker", help="Starts a weimar worker instance.", action="store_true")
parser.add_argument("-t", "--threads",type=int, dest='threads', help="The number of threads running in one a worker (Default=3).")
parser.add_argument("-s", "--server", help="Starts a weimar graph server.", action="store_true")
parser.add_argument("-i", "--hyperdex-ip",type=str ,dest='hyperdex_ip', help='The HyperDex coordinator IP address. Must be specified if a server is started.')
parser.add_argument("-p", "--hyperdex-port",type=int ,dest='hyperdex_port', help="The HyperDex coordinator port number. Must be specified if a server is started.")
args = parser.parse_args()
if args.worker:
if(args.threads is None):
args.threads = 3
Worker.start_worker(args.threads)
elif args.server:
if(args.hyperdex_ip is None or args.hyperdex_port is None):
print('When starting a Weimar server, please specify the HyperDex\'s coordinators ip and port.')
parser.print_help()
sys.exit(1)
if(args.threads is not None):
print('--threads only refers to a worker process and will be omitted.')
Server.start_server(args.hyperdex_ip, args.hyperdex_port) | mit | 6,257,446,607,517,198,000 | 46.935484 | 167 | 0.665993 | false | 3.788265 | false | true | false |
yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/notification/describe_notification_items.py | 1 | 2183 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeNotificationItemsAction(BaseAction):
action = 'DescribeNotificationItems'
command = 'describe-notification-items'
usage = '%(prog)s [-i --notification_items...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--notification-items', dest='notification_items',
action='store', type=str, default=None,
help='An array including IDs of notification items.')
parser.add_argument('-l', '--notification-list', dest='notification_list',
action='store', type=str, default=None,
help='The ID of notification list.')
parser.add_argument('-t', '--notification-item-type', dest='notification_item_type',
action='store', type=str, default=None,
help='The type of notification item, including email, phone and webhook.')
@classmethod
def build_directive(cls, options):
directive = {
"notification_items": options.notification_items,
"notification_list": options.notification_list,
"notification_item_type": options.notification_item_type
}
return directive
| apache-2.0 | 3,015,553,966,601,937,400 | 45.446809 | 102 | 0.584059 | false | 4.905618 | false | false | false |
hack4impact/Givology | mainSite/source/proj/giv/captcha.py | 1 | 4110 | import urllib2, urllib
from proj.settings import *
API_SSL_SERVER="https://www.google.com/recaptcha/api"
API_SERVER="http://www.google.com/recaptcha/api"
VERIFY_SERVER="www.google.com"
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def displayhtml (public_key,
use_ssl = False,
error = None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
return """<script type="text/javascript" src="%(ApiServer)s/challenge?k=%(PublicKey)s%(ErrorParam)s"></script>
<noscript>
<iframe src="%(ApiServer)s/noscript?k=%(PublicKey)s%(ErrorParam)s" height="300" width="500" frameborder="0"></iframe><br />
<textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea>
<input type='hidden' name='recaptcha_response_field' value='manual_challenge' />
</noscript>
""" % {
'ApiServer' : server,
'PublicKey' : public_key,
'ErrorParam' : error_param,
}
def submit (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode ({
'privatekey': encode_if_necessary(private_key),
'remoteip' : encode_if_necessary(remoteip),
'challenge': encode_if_necessary(recaptcha_challenge_field),
'response' : encode_if_necessary(recaptcha_response_field),
})
request = urllib2.Request (
url = "http://%s/recaptcha/api/verify" % VERIFY_SERVER,
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = httpresp.read ().splitlines ();
httpresp.close();
return_code = return_values [0]
if (return_code == "true"):
return RecaptchaResponse (is_valid=True)
else:
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
def check_captcha(request):
captcha_challenge = request.POST.get('recaptcha_challenge_field')
captcha_response = request.POST.get('recaptcha_response_field')
captcha_result = None
ip = None
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
elif 'REMOTE_ADDR' in request.META:
ip = request.META['REMOTE_ADDR']
if captcha_response is not None and captcha_challenge is not None:
captcha_result = submit(captcha_challenge,
captcha_response,
recaptcha_private_key,
ip)
return captcha_result
def new_captcha_html(captcha_result):
if captcha_result is None:
captcha_html = displayhtml(recaptcha_public_key, use_ssl=True)
else:
captcha_html = displayhtml(recaptcha_public_key, use_ssl=True, error = captcha_result.error_code)
return captcha_html
| mit | 701,470,126,479,050,600 | 32.414634 | 125 | 0.62871 | false | 3.736364 | false | false | false |
meta-it/misc-addons | web_debranding/models/web_planner.py | 1 | 1046 | # -*- coding: utf-8 -*-
import re
from openerp import models, api
class Planner(models.Model):
_inherit = 'web.planner'
@api.model
def render(self, template_id, planner_app):
res = super(Planner, self).render(template_id, planner_app)
params = self.env['ir.config_parameter'].get_debranding_parameters()
planner_footer = params.get('web_debranding.planner_footer')
planner_footer = '<p>' + str(planner_footer) + '</p>'
res = re.sub(r'<p>[^<]*to contact our accounting experts by using the[\s\S]*?</div>', planner_footer, res)
res = re.sub(r'<p>[^<]*If you need help, do not hesitate to contact our experts[\s\S]*?</div>', planner_footer, res)
res = re.sub(r'<h4>Don\'t hesitate to[\s\S]*logo.png"/>', '', res)
res = re.sub(r'<p>Once it\'s fully working[\s\S]*odoo_logo.png"/>', planner_footer, res)
res = re.sub(r'<div class="mt32">[\s\S]*Fabien Pinckaers, Founder[\s\S]*?</div>', planner_footer, res)
return self.env['ir.translation']._debrand(res)
| lgpl-3.0 | -2,570,573,885,413,056,500 | 48.809524 | 124 | 0.616635 | false | 2.921788 | false | false | false |
radjkarl/dataArtist | dataArtist/items/GridROI.py | 1 | 14750 | # coding=utf-8
from __future__ import division
from __future__ import absolute_import
import pyqtgraph_karl as pg
import numpy as np
from math import cos, sin, pi
import cv2
from qtpy import QtCore
from .PseudoSquareROI import PseudoSquareROI
from dataArtist.items.QPainterPath import QPainterPath
class GridROI(pg.ROI):
'''
An ROI displaying mini ROIs of different shapes as a grid
'''
# TODO: default argument is mutable: Default argument values are evaluated only once at function definition time,
# which means that modifying the default value of the argument will affect all subsequent calls of the function.
def __init__(self, pos=[20, 20], size=[20, 20], grid=[4, 5],
shape='Rect', gap=[0, 0], subgrid=([], []),
subgrid_width=0.05, pen='w', **kwargs):
'''
shape = ['Rect', 'Square', 'Circular', 'Pseudosquare']
'''
self.opts = {'shape': shape,
'grid': np.asarray(grid),
'gap': np.asfarray(gap),
'subgrid': subgrid,
'subgrid_width': subgrid_width
}
# TODO: limit max cell size while rescale
self.maxCellSize = size / self.opts['grid']
self.cells = []
self._createCells()
self._createSubgrid()
# cannot set brush at the moment, so:
if 'brush' in kwargs:
kwargs.pop('brush')
pg.ROI.__init__(self, pos, size, pen=pen, **kwargs)
self.translatable = False
self.mouseHovering = False
self._setCellSize(self.state['size'])
self._setCellPos(pos)
self.layout_rescaling = False
self.addScaleHandle([1, 1], [0, 0])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleHandle([1, 0], [0, 1])
self.addScaleHandle([0, 1], [1, 0])
self.addRotateHandle([0.5, 1], [0.5, 0.5])
def getCellParameters(self, array, fn=np.mean):
out = np.arange(len(self.cells),
dtype=float).reshape(self.opts['grid'])
s = array.shape
for (i, j), n in np.ndenumerate(out):
m = self.cells[int(n)].getMask(s)
out[i, j] = fn(array[m])
return out
def saveState(self):
s = pg.ROI.saveState(self)
o = self.opts
s['gap'] = tuple(o['gap'])
s['grid'] = tuple(o['grid'])
s['shape'] = o['shape']
return s
def painterPath(self):
'''
Return a qpainterpath including all cells
'''
p = self.cells[0].painterPath()
for c in self.cells[1:]:
p.addPath(c.painterPath())
return p
def _createCells(self):
grid = self.opts['grid']
cellClass = {'Rect': RectROI,
'Circle': CircleROI,
'Pseudosquare': CellPseudoSquareROI}[self.opts['shape']]
self.layout_rescaling = True
for c in self.cells:
self.vb.removeItem(c)
self.cells = [cellClass(pos=[1, 1]) for _ in range(grid[0] * grid[1])]
i_scaleCell = -(grid[0] * grid[1] - grid[1] + 1)
self._scaleCell = c = self.cells[i_scaleCell]
c.setScaleCell()
c.sigRegionChanged.connect(self._cellResized)
def _createSubgrid(self):
for c in self.cells:
for line in c.subgrid:
self.vb.removeItem(line)
s = self.opts['subgrid']
w = self.opts['subgrid_width']
for c in self.cells:
for pos in s[0]:
c.subgrid.append(SubLine(c, orientation=0, pos=pos,
thickness=w))
for pos in s[1]:
c.subgrid.append(SubLine(c, orientation=1, pos=pos,
thickness=w))
for n, line in enumerate(self._scaleCell.subgrid):
line.setScaleLine()
line.sigRegionChanged.connect(lambda line, n=n:
self._lineResized(line, n))
def setPen(self, pen):
pg.ROI.setPen(self, pen)
for c in self.cells:
c.setPen(pen)
for line in c.subgrid:
line.setPen(pen)
def setBrush(self, pen):
pass
# TODO
# pg.ROI.setB(pen)
# for c in self.cells:
# c.setBrush(pen)
# #raises: AttributeError: 'RectROI' object has no attribute 'setBrush'
def getMask(self, shape):
m = self.cells[0].getMask(shape)
for c in self.cells[1:]:
m += c.getMask(shape)
return m
def __iter__(self):
return iter(self.cells)
def __len__(self):
return len(self.cells)
def _lineResized(self, line, n):
if not self.layout_rescaling:
#size = line.state['size']
pos = line.state['pos']
thick, pos = line.fromState()
for c in self.cells:
ln = c.subgrid[n]
if ln != line:
ln.thickness = thick
ln.pos = pos
ln.updatePos()
ln.updateSize()
def _cellResized(self, cell):
if not self.layout_rescaling:
size = cell.state['size']
self.opts['gap'] = (self.state['size'] - (
size * self.opts['grid'])) / (self.opts['grid'] - 1)
for c in self.cells:
if c != cell:
c.setSize(size)
self._setCellPos(self.state['pos'], True)
def setAngle(self, angle, **kwargs):
for c in self.cells:
c.setAngle(angle, **kwargs)
for line in c.subgrid:
line.setAngle(angle, **kwargs)
self._setCellPos(self.state['pos'])
pg.ROI.setAngle(self, angle, **kwargs)
def setPos(self, pos, **kwargs):
pg.ROI.setPos(self, pos, **kwargs)
self._setCellPos(pos)
def setSubGrid(self, s):
self.opts['subgrid'] = s
self.refresh()
def setGrid(self, x=None, y=None):
g = self.opts['grid']
if x is not None:
g[0] = x
if y is not None:
g[1] = y
self.refresh()
def setCellShape(self, shape):
self.opts['shape'] = shape
self.refresh()
def refresh(self):
self._createCells()
self._setCellSize(self.state['size'])
self._setCellPos(self.state['pos'])
[self.vb.addItem(c) for c in self.cells]
self._createSubgrid()
[[self.vb.addItem(line) for line in c.subgrid] for c in self.cells]
def setSize(self, size, update=True, finish=True):
pg.ROI.setSize(self, size, update, finish)
self.layout_rescaling = True
self._setCellSize(size)
self._setCellPos(self.state['pos'])
self.layout_rescaling = False
self.maxCellSize = size / self.opts['grid']
def _setCellSize(self, size):
size_cell = (size - (self.opts['grid'] - 1)
* self.opts['gap']) / self.opts['grid']
for c in self.cells:
c.setSize(size_cell)
for line in c.subgrid:
line.updateSize()
@staticmethod
def _rotatePoint(point, angle, center):
if angle == 0:
return point
x = point[0]
y = point[1]
cx = center[0]
cy = center[1]
point[0] = cos(angle) * (x - cx) - sin(angle) * (y - cy) + cx
point[1] = sin(angle) * (x - cx) + cos(angle) * (y - cy) + cy
def _setCellPos(self, pos, ignoreScaleCell=False):
size_cell = self._scaleCell.state['size']
rad = self.state['angle'] * pi / 180
# center of rotation:
c = self.state['pos']
if self.handles:
# centre defined by both edges:
c += 0.5 * self.handles[1]['item'].pos()
n = 0
for x in range(self.opts['grid'][0]):
for y in range(self.opts['grid'][1]):
cell = self.cells[n]
n += 1
if ignoreScaleCell and cell == self._scaleCell:
for line in cell.subgrid:
line.updatePos()
continue
p = pos + [x, y] * (size_cell + self.opts['gap'])
self._rotatePoint(p, rad, c)
cell.setPos(p)
for line in cell.subgrid:
line.updatePos()
def setViewBox(self, v):
'''
add grid and its cells to the ViewBox
'''
self.vb = v
v.addItem(self)
[v.addItem(c) for c in self.cells]
[[self.vb.addItem(line) for line in c.subgrid] for c in self.cells]
def show(self):
[c.show() for c in self.cells]
[[line.show() for line in c.subgrid] for c in self.cells]
pg.ROI.show(self)
def hide(self):
[c.hide() for c in self.cells]
[[line.hide() for line in c.subgrid] for c in self.cells]
pg.ROI.hide(self)
def close(self):
[self.vb.removeItem(c) for c in self.cells]
self.vb.removeItem(self)
class _CellBase(object):
'''
Base class for all cells in a grid
'''
def __init__(self, *args, **kwargs):
self.subgrid = []
self.translatable = False
self.mouseHovering = False
class SubLine(pg.ROI):
'''
one line for the subgrid
'''
def __init__(self, cell, orientation, pos, thickness):
pg.ROI.__init__(self, pos=(1, 1), size=(1, 1))
self.translatable = False
self.mouseHovering = False
self.pos = pos
self.thickness = thickness
if orientation == 0:
self.i = 0
self.j = 1
else:
self.i = 1
self.j = 0
self.cell = cell
def fromState(self):
'''
update thickness and position from current state
'''
j = self.j
s = self.state
cs = self.cell.state
p = self.pos = (s['pos'][j] - cs['pos'][j]) / cs['size'][j]
t = self.thickness = s['size'][j] / cs['size'][j]
return t, p
def setScaleLine(self):
self.addScaleHandle([0.5, 1], [0.5, 0])
self.addScaleHandle([0.5, 0], [0.5, 1])
def updateSize(self):
s = self.cell.state['size']
pg.ROI.setSize(self, (s[self.i], self.thickness * s[self.j]))
def updatePos(self):
p = self.cell.state['pos'].copy()
s = self.cell.state['size']
j = self.j
p[j] += s[j] * self.pos
pg.ROI.setPos(self, p)
class RectROI(pg.ROI, _CellBase):
def __init__(self, *args, **kwargs):
pg.ROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
def setScaleCell(self):
self.addScaleHandle([1, 0], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
a = self.boundingRect()
a.moveTo(self.state['pos'])
p.addRect(a)
return p
def getMask(self, shape):
p = self.state['pos']
s = self.state['size']
center = p + s / 2
a = self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr = np.zeros(shape, dtype=np.uint8)
# draw rotated rectangle:
vertices = np.int0(cv2.boxPoints((center, s, a)))
cv2.drawContours(arr, [vertices],
0,
color=1,
thickness=-1)
return arr.astype(bool).T
class CircleROI(_CellBase, pg.EllipseROI):
def __init__(self, *args, **kwargs):
pg.ROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
self._ratioEllispeRectangle = 1 # only changed in CellPseudoSquareROI
def setScaleCell(self):
self.addScaleHandle([cos(1), sin(0)], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
a = self.boundingRect()
a.moveTo(self.state['pos'])
p.addEllipse(a)
return p
def getMask(self, shape):
'''
returns bool array
'''
p = self.state['pos']
s = self.state['size']
center = p + s / 2
a = self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr = np.zeros(shape, dtype=np.uint8)
# draw ellipse:
cv2.ellipse(arr,
(int(center[0]), int(center[1])),
(int(s[0] / 2 * self._ratioEllispeRectangle),
int(s[1] / 2 * self._ratioEllispeRectangle)),
int(a),
startAngle=0,
endAngle=360,
color=1,
thickness=-1)
return arr.astype(bool).T
class CellPseudoSquareROI(_CellBase, PseudoSquareROI):
def __init__(self, *args, **kwargs):
PseudoSquareROI.__init__(self, *args, **kwargs)
_CellBase.__init__(self, *args, **kwargs)
def setScaleCell(self):
self.addScaleHandle([1, 0], [0, 1])
self.setPen('y')
def painterPath(self):
p = QPainterPath()
roundness = int(99 * float(self._alen) / 16 / 90)
r = QtCore.QRectF(self._rect)
r.moveTo(self.state['pos'])
p.addRoundRect(r, roundness)
return p
if __name__ == '__main__':
from pyqtgraph.Qt import QtGui
app = QtWidgets.QApplication([])
w = pg.GraphicsWindow(size=(1000, 800), border=True)
w.setWindowTitle('pyqtgraph example: ROI Examples')
w1 = w.addLayout(row=0, col=0)
#label1 = w1.addLabel('test', row=1, col=0)
v = w1.addViewBox(row=1, col=0, lockAspect=True)
v2 = w1.addViewBox(row=2, col=0, lockAspect=True)
img1b = pg.ImageItem()
v2.addItem(img1b)
v3 = w1.addViewBox(row=3, col=0, lockAspect=True)
img1c = pg.ImageItem()
v3.addItem(img1c)
# Create image to display
arr = np.ones((100, 100), dtype=float)
arr[45:55, 45:55] = 0
arr[25, :] = 5
arr[:, 25] = 5
arr[75, :] = 5
arr[:, 75] = 5
arr[50, :] = 10
arr[:, 50] = 10
arr += np.sin(np.linspace(0, 20, 100)).reshape(1, 100)
arr += np.random.normal(size=(100, 100))
img1a = pg.ImageItem(arr)
v.addItem(img1a)
r = GridROI([20, 20], [20, 20], pen=(0, 9),
subgrid=([0.3, 0.5, 1], []), shape='Pseudosquare')
r.setViewBox(v)
cell = r.cells[0]
v.autoRange(False)
def update(roi):
img1b.setImage(roi.getArrayRegion(arr, img1a), levels=(0, arr.max()))
img1c.setImage(np.int0(r.getMask(arr.shape)))
# cell.sigRegionChanged.connect(update)
# update(cell)
app.exec_()
| gpl-3.0 | -2,420,680,918,911,564,000 | 28.324056 | 118 | 0.51722 | false | 3.46651 | false | false | false |
jekahy/EIASR | src/canny.py | 1 | 3842 | # coding: utf8
from math import pi
import numpy as np
from scipy.signal import convolve2d
SOBEL_X = np.array([
[ 1, 0, -1],
[ 2, 0, -2],
[ 1, 0, -1],
])
SOBEL_Y = np.array([
[ 1, 2, 1],
[ 0, 0, 0],
[-1, -2, -1],
])
class GradientImage(object):
def __init__(self, magnitudes, angles):
self.magnitudes = magnitudes
self.angles = angles
@property
def w(self):
return self.magnitudes.shape[0]
@property
def h(self):
return self.magnitudes.shape[1]
@classmethod
def from_partials(cls, dxs, dys):
magnitudes = np.sqrt(dxs ** 2 + dys ** 2)
angles = np.arctan2(dys, dxs)
return cls(magnitudes, angles)
def gradient(in_):
dxs = convolve2d(in_, SOBEL_X, 'same', 'symm')
dys = convolve2d(in_, SOBEL_Y, 'same', 'symm')
return GradientImage.from_partials(dxs, dys)
def thin_nonmaximum(gradient_image):
thinned = np.copy(gradient_image.magnitudes)
for idx, s in np.ndenumerate(gradient_image.magnitudes):
s_nl = _neighbour_in_direction(
gradient_image.magnitudes, idx,
gradient_image.angles[idx])
s_nr = _neighbour_in_direction(
gradient_image.magnitudes, idx,
gradient_image.angles[idx] + pi)
# TODO: consider angle at nl, nr
if s < s_nl or s < s_nr:
thinned[idx] = 0
return GradientImage(thinned, gradient_image.angles)
def thin_hysteresis(gradient_image, t_high=0.2, t_low=0.1):
# 8 pixel neighborhood
x = [-1, 0, 1, -1, 1, -1, 0, 1]
y = [-1, -1, -1, 0, 0, 1, 1, 1]
magnitudes = gradient_image.magnitudes
# Dimensions
xdim, ydim = magnitudes.shape
# Max magnitude
max_magn = magnitudes.max()
# Pixels > t_high are kept automatically
thinned = np.where(magnitudes > (t_high * max_magn), magnitudes, 0)
# Pixels > t_low will be ad ded later if they prove to be
# adjacent to another pixel which has been included in the thinned list
cands = np.where(magnitudes > (t_low * max_magn), magnitudes, 0)
# Create an initial list of strong edge pixels
prevx, prevy = thinned.nonzero()
# If the previous loop of testing found no new pixels to move from
# the cands list to the edge list, then stop
while len(prevx) != 0:
newx, newy = [], []
# Loop over new edge pixels discovered on previous iteration
for ii in range(len(prevx)):
# Loop through 8 pixel neighborhood
for ij in range(len(x)):
xidx = prevx[ii] + x[ij]
yidx = prevy[ii] + y[ij]
# Check if pixel index falls within image boundary
if xidx >= 0 and xidx < xdim and yidx >= 0 and yidx < ydim:
# Check if pixel is on the cands list but has not yet been added to the thinned list
if cands[xidx][yidx] and not thinned[xidx][yidx]:
# Transfer to thinned list
thinned[xidx][yidx] = cands[xidx][yidx]
# Keep track of indices for next loop iteration
newx.append(xidx)
newy.append(yidx)
# Update for next iteration
prevx = newx
prevy = newy
return GradientImage(thinned, gradient_image.angles)
NEIGHBOURS = [
( 0, 1),
( 1, 1),
( 1, 0),
( 1, -1),
( 0, -1),
(-1, -1),
(-1, 0),
(-1, 1),
]
def _neighbour_in_direction(a, (x, y), direction):
w, h = a.shape
ndir = len(NEIGHBOURS)
discrete_direction = int((direction / (2*pi) * ndir + 0.5 * ndir) % ndir)
dx, dy = NEIGHBOURS[discrete_direction]
nx, ny = x + dx, y + dy
if not (0 <= nx < w and 0 <= ny < h):
return 0
return a[nx, ny]
| mit | -4,143,740,423,269,810,000 | 27.671642 | 104 | 0.558303 | false | 3.16214 | false | false | false |
volpino/Yeps-EURAC | lib/galaxy/jobs/runners/sge.py | 1 | 13219 | import os, logging, threading, time
from Queue import Queue, Empty
from galaxy import model
from paste.deploy.converters import asbool
import pkg_resources
try:
pkg_resources.require( "DRMAA_python" )
DRMAA = __import__( "DRMAA" )
except:
DRMAA = None
log = logging.getLogger( __name__ )
if DRMAA is not None:
DRMAA_state = {
DRMAA.Session.UNDETERMINED: 'process status cannot be determined',
DRMAA.Session.QUEUED_ACTIVE: 'job is queued and waiting to be scheduled',
DRMAA.Session.SYSTEM_ON_HOLD: 'job is queued and in system hold',
DRMAA.Session.USER_ON_HOLD: 'job is queued and in user hold',
DRMAA.Session.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
DRMAA.Session.RUNNING: 'job is running',
DRMAA.Session.SYSTEM_SUSPENDED: 'job is system suspended',
DRMAA.Session.USER_SUSPENDED: 'job is user suspended',
DRMAA.Session.DONE: 'job finished normally',
DRMAA.Session.FAILED: 'job finished, but failed',
}
sge_template = """#!/bin/sh
#$ -S /bin/sh
GALAXY_LIB="%s"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd %s
%s
"""
class SGEJobState( object ):
def __init__( self ):
"""
Encapsulates state related to a job that is being run via SGE and
that we need to monitor.
"""
self.job_wrapper = None
self.job_id = None
self.old_state = None
self.running = False
self.job_file = None
self.ofile = None
self.efile = None
self.runner_url = None
class SGEJobRunner( object ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
STOP_SIGNAL = object()
def __init__( self, app ):
"""Initialize this job runner and start the monitor thread"""
# Check if SGE was importable, fail if not
if DRMAA is None:
raise Exception( "SGEJobRunner requires DRMAA_python which was not found" )
self.app = app
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.queue = Queue()
self.default_cell = self.determine_sge_cell( self.app.config.default_cluster_job_runner )
self.ds = DRMAA.Session()
self.ds.init( self.default_cell )
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
log.debug( "ready" )
def determine_sge_cell( self, url ):
"""Determine what SGE cell we are using"""
url_split = url.split("/")
if url_split[0] == 'sge:':
return url_split[2]
# this could happen if sge is started, but is not the default runner
else:
return ''
def determine_sge_queue( self, url ):
"""Determine what SGE queue we are submitting to"""
url_split = url.split("/")
queue = url_split[3]
if queue == "":
# None == server's default queue
queue = None
return queue
def queue_job( self, job_wrapper ):
"""Create SGE script for a job and submit it to the SGE queue"""
try:
job_wrapper.prepare()
command_line = job_wrapper.get_command_line()
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
runner_url = job_wrapper.tool.job_runner
# This is silly, why would we queue a job with no command line?
if not command_line:
job_wrapper.finish( '', '' )
return
# Check for deletion before we change state
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
job_wrapper.cleanup()
return
# Change to queued state immediately
job_wrapper.change_state( model.Job.states.QUEUED )
if self.determine_sge_cell( runner_url ) != self.default_cell:
# TODO: support multiple cells
log.warning( "(%s) Using multiple SGE cells is not supported. This job will be submitted to the default cell." % job_wrapper.job_id )
sge_queue_name = self.determine_sge_queue( runner_url )
# define job attributes
ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.job_id)
efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.job_id)
jt = self.ds.createJobTemplate()
jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
jt.outputPath = ":%s" % ofile
jt.errorPath = ":%s" % efile
if sge_queue_name is not None:
jt.setNativeSpecification( "-q %s" % sge_queue_name )
script = sge_template % (job_wrapper.galaxy_lib_dir, os.path.abspath( job_wrapper.working_directory ), command_line)
fh = file( jt.remoteCommand, "w" )
fh.write( script )
fh.close()
os.chmod( jt.remoteCommand, 0750 )
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
self.cleanup( ( ofile, efile, jt.remoteCommand ) )
job_wrapper.cleanup()
return
galaxy_job_id = job_wrapper.job_id
log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
if sge_queue_name is None:
log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
else:
log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, sge_queue_name, job_id) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_runner( runner_url, job_id )
# Store SGE related state information for job
sge_job_state = SGEJobState()
sge_job_state.job_wrapper = job_wrapper
sge_job_state.job_id = job_id
sge_job_state.ofile = ofile
sge_job_state.efile = efile
sge_job_state.job_file = jt.remoteCommand
sge_job_state.old_state = 'new'
sge_job_state.running = False
sge_job_state.runner_url = runner_url
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.queue.put( sge_job_state )
def monitor( self ):
"""
Watches jobs currently in the PBS queue and deals with state changes
(queued to running) and job completion
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
sge_job_state = self.queue.get_nowait()
if sge_job_state is self.STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.ds.exit()
return
self.watched.append( sge_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
self.check_watched_items()
# Sleep a bit before the next state check
time.sleep( 1 )
def check_watched_items( self ):
"""
Called by the monitor thread to look at each watched job and deal
with state changes.
"""
new_watched = []
for sge_job_state in self.watched:
job_id = sge_job_state.job_id
galaxy_job_id = sge_job_state.job_wrapper.job_id
old_state = sge_job_state.old_state
try:
state = self.ds.getJobProgramStatus( job_id )
except DRMAA.InvalidJobError:
# we should only get here if an orphaned job was put into the queue at app startup
log.debug("(%s/%s) job left SGE queue" % ( galaxy_job_id, job_id ) )
self.finish_job( sge_job_state )
continue
except Exception, e:
# so we don't kill the monitor thread
log.exception("(%s/%s) Unable to check job status" % ( galaxy_job_id, job_id ) )
log.warning("(%s/%s) job will now be errored" % ( galaxy_job_id, job_id ) )
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
continue
if state != old_state:
log.debug("(%s/%s) state change: %s" % ( galaxy_job_id, job_id, DRMAA_state[state] ) )
if state == DRMAA.Session.RUNNING and not sge_job_state.running:
sge_job_state.running = True
sge_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
if state == DRMAA.Session.DONE:
self.finish_job( sge_job_state )
continue
if state == DRMAA.Session.FAILED:
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
sge_job_state.job_wrapper.cleanup()
continue
sge_job_state.old_state = state
new_watched.append( sge_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
def finish_job( self, sge_job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the SGE temporary files.
"""
ofile = sge_job_state.ofile
efile = sge_job_state.efile
job_file = sge_job_state.job_file
# collect the output
try:
ofh = file(ofile, "r")
efh = file(efile, "r")
stdout = ofh.read()
stderr = efh.read()
except:
stdout = ''
stderr = 'Job output not returned from cluster'
log.debug(stderr)
try:
sge_job_state.job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
# clean up the sge files
self.cleanup( ( ofile, efile, job_file ) )
def cleanup( self, files ):
if not asbool( self.app.config.get( 'debug', False ) ):
for file in files:
if os.access( file, os.R_OK ):
os.unlink( file )
def put( self, job_wrapper ):
"""Add a job to the queue (by job identifier)"""
self.queue_job( job_wrapper )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "sending stop signal to worker threads" )
self.queue.put( self.STOP_SIGNAL )
log.info( "sge job runner stopped" )
def stop_job( self, job ):
"""Attempts to delete a job from the SGE queue"""
try:
self.ds.control( job.job_runner_external_id, DRMAA.Session.TERMINATE )
log.debug( "(%s/%s) Removed from SGE queue at user's request" % ( job.id, job.job_runner_external_id ) )
except DRMAA.InvalidJobError:
log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.id, job.job_runner_external_id ) )
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
sge_job_state = SGEJobState()
sge_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.id)
sge_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.id)
sge_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.id)
sge_job_state.job_id = str( job.job_runner_external_id )
sge_job_state.runner_url = job_wrapper.tool.job_runner
job_wrapper.command_line = job.command_line
sge_job_state.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.RUNNING
sge_job_state.running = True
self.queue.put( sge_job_state )
elif job.state == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in SGE queued state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.QUEUED
sge_job_state.running = False
self.queue.put( sge_job_state )
| mit | -2,320,704,567,637,543,400 | 40.180685 | 146 | 0.577956 | false | 3.590168 | false | false | false |
sharad/calibre | src/calibre/gui2/dbus_export/menu.py | 1 | 14852 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Support for excporting Qt's MenuBars/Menus over DBUS. The API is defined in
# dbus-menu.xml from the libdbusmenu project https://launchpad.net/libdbusmenu
import dbus
from PyQt5.Qt import (
QApplication, QMenu, QIcon, QKeySequence, QObject, QEvent, QTimer, pyqtSignal, Qt)
from calibre.utils.dbus_service import Object, BusName, method as dbus_method, dbus_property, signal as dbus_signal
from calibre.gui2.dbus_export.utils import (
setup_for_cli_run, swap_mnemonic_char, key_sequence_to_dbus_shortcut, icon_to_dbus_menu_icon)
null = object()
def PropDict(mapping=()):
return dbus.Dictionary(mapping, signature='sv')
def create_properties_for_action(ac, previous=None):
ans = PropDict()
if ac.isSeparator():
ans['type'] = 'separator'
if not ac.isVisible():
ans['visible'] = False
return ans
text = ac.text() or ac.iconText()
if text:
ans['label'] = swap_mnemonic_char(text)
if not ac.isEnabled():
ans['enabled'] = False
if not ac.isVisible() or ac.property('blocked') is True:
ans['visible'] = False
if ac.menu() is not None:
ans['children-display'] = 'submenu'
if ac.isCheckable():
exclusive = ac.actionGroup() is not None and ac.actionGroup().isExclusive()
ans['toggle-type'] = 'radio' if exclusive else 'checkmark'
ans['toggle-state'] = int(ac.isChecked())
shortcuts = ac.shortcuts()
if shortcuts:
sc = dbus.Array(signature='as')
for s in shortcuts:
if not s.isEmpty():
for x in key_sequence_to_dbus_shortcut(s):
sc.append(dbus.Array(x, signature='s'))
if sc:
ans['shortcut'] = sc[:1] # Unity fails to display the shortcuts at all if more than one is specified
if ac.isIconVisibleInMenu():
icon = ac.icon()
if previous and previous.get('x-qt-icon-cache-key') == icon.cacheKey():
for x in 'icon-data x-qt-icon-cache-key'.split():
ans[x] = previous[x]
else:
data = icon_to_dbus_menu_icon(ac.icon())
if data is not None:
ans['icon-data'] = data
ans['x-qt-icon-cache-key'] = icon.cacheKey()
return ans
def menu_actions(menu):
try:
return menu.actions()
except TypeError:
if isinstance(menu, QMenu):
return QMenu.actions(menu)
raise
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
def init_maps(self, qmenu=None):
self.action_changes = set()
self.layout_changes = set()
self.qmenu = qmenu
self._id_to_action, self._action_to_id = {}, {}
self._action_properties = {}
@property
def next_id(self):
self._next_id += 1
return self._next_id
def id_to_action(self, action_id):
if self.qmenu is None:
return None
return self._id_to_action.get(action_id)
def action_to_id(self, action):
if self.qmenu is None:
return None
return self._action_to_id.get(action)
def action_properties(self, action_id, restrict_to=None):
if self.qmenu is None:
return {}
ans = self._action_properties.get(action_id, PropDict())
if restrict_to:
ans = PropDict({k:v for k, v in ans.iteritems() if k in restrict_to})
return ans
def publish_new_menu(self, qmenu=None):
self.init_maps(qmenu)
if qmenu is not None:
qmenu.destroyed.connect(lambda obj=None:self.publish_new_menu())
ac = qmenu.menuAction()
self.add_action(ac)
self.dbus_api.LayoutUpdated(self.dbus_api.revision, 0)
def set_visible(self, visible):
ac = self.id_to_action(0)
if ac is not None and self.qmenu is not None:
changed = False
blocked = not visible
for ac in menu_actions(ac.menu()):
ac_id = self.action_to_id(ac)
if ac_id is not None:
old = ac.property('blocked')
if old is not blocked:
ac.setProperty('blocked', blocked)
self.action_changes.add(ac_id)
changed = True
if changed:
self.action_changed_timer.start()
def add_action(self, ac):
ac_id = 0 if ac.menu() is self.qmenu else self.next_id
self._id_to_action[ac_id] = ac
self._action_to_id[ac] = ac_id
self._action_properties[ac_id] = create_properties_for_action(ac)
if ac.menu() is not None:
self.add_menu(ac.menu())
def add_menu(self, menu):
menu.installEventFilter(self)
for ac in menu_actions(menu):
self.add_action(ac)
def eventFilter(self, obj, ev):
ac = getattr(obj, 'menuAction', lambda : None)()
ac_id = self.action_to_id(ac)
if ac_id is not None:
etype = ev.type()
if etype == QEvent.ActionChanged:
ac_id = self.action_to_id(ev.action())
self.action_changes.add(ac_id)
self.action_changed_timer.start()
elif etype == QEvent.ActionAdded:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.add_action(ev.action())
elif etype == QEvent.ActionRemoved:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.action_removed(ev.action())
return False
def actions_changed(self):
updated_props = dbus.Array(signature='(ia{sv})')
removed_props = dbus.Array(signature='(ias)')
for ac_id in self.action_changes:
ac = self.id_to_action(ac_id)
if ac is None:
continue
old_props = self.action_properties(ac_id)
new_props = self._action_properties[ac_id] = create_properties_for_action(ac, old_props)
removed = set(old_props) - set(new_props)
if removed:
removed_props.append((ac_id, dbus.Array(removed, signature='as')))
updated = PropDict({k:v for k, v in new_props.iteritems() if v != old_props.get(k, null)})
if updated:
updated_props.append((ac_id, updated))
self.action_changes = set()
if updated_props or removed_props:
self.dbus_api.ItemsPropertiesUpdated(updated_props, removed_props)
return updated_props, removed_props
def layouts_changed(self):
changes = set()
for ac_id in self.layout_changes:
if ac_id in self._id_to_action:
changes.add(ac_id)
self.layout_changes = set()
if changes:
self.dbus_api.revision += 1
for change in changes:
self.dbus_api.LayoutUpdated(self.dbus_api.revision, change)
return changes
def action_is_in_a_menu(self, ac):
all_menus = {ac.menu() for ac in self._action_to_id}
all_menus.discard(None)
return bool(set(ac.associatedWidgets()).intersection(all_menus))
def action_removed(self, ac):
if not self.action_is_in_a_menu(ac):
ac_id = self._action_to_id.pop(ac, None)
self._id_to_action.pop(ac_id, None)
self._action_properties.pop(ac_id, None)
def get_layout(self, parent_id, depth, property_names):
# Ensure any pending updates are done, as they are needed now
self.actions_changed()
self.layouts_changed()
property_names = property_names or None
props = self.action_properties(parent_id, property_names)
return parent_id, props, self.get_layout_children(parent_id, depth, property_names)
def get_layout_children(self, parent_id, depth, property_names):
ans = dbus.Array(signature='(ia{sv}av)')
ac = self.id_to_action(parent_id)
if ac is not None and depth != 0 and ac.menu() is not None:
for child in menu_actions(ac.menu()):
child_id = self.action_to_id(child)
if child_id is not None:
props = self.action_properties(child_id, property_names)
ans.append((child_id, props, self.get_layout_children(child_id, depth - 1, property_names)))
return ans
def get_properties(self, ids=None, property_names=None):
property_names = property_names or None
ans = dbus.Array(signature='(ia{sv})')
for action_id in (ids or self._id_to_action):
ans.append((action_id, self.action_properties(action_id, property_names)))
return ans
def handle_event(self, action_id, event, data, timestamp):
ac = self.id_to_action(action_id)
if event == 'clicked':
if ac.isCheckable():
ac.toggle()
ac.triggered.emit(ac.isCheckable() and ac.isChecked())
def handle_about_to_show(self, ac):
child_ids = {self.action_to_id(x) for x in menu_actions(ac.menu())}
child_ids.discard(None)
ac_id = self.action_to_id(ac)
ac.menu().aboutToShow.emit()
if ac_id in self.layout_changes or child_ids.intersection(self.action_changes):
return True
return False
class DBusMenuAPI(Object):
IFACE = 'com.canonical.dbusmenu'
def __init__(self, menu, object_path, bus=None):
if bus is None:
bus = dbus.SessionBus()
Object.__init__(self, bus, object_path)
self.status = 'normal'
self.menu = menu
self.revision = 0
@dbus_property(IFACE, signature='u')
def Version(self):
return 3 # GTK 3 uses 3, KDE 4 uses 2
@dbus_property(IFACE, signature='s', emits_changed_signal=True)
def Status(self):
return self.status
def set_status(self, normal=True):
self.status = 'normal' if normal else 'notice'
self.PropertiesChanged(self.IFACE, {'Status': self.status}, [])
@dbus_property(IFACE, signature='s')
def TextDirection(self):
return 'ltr' if QApplication.instance().isLeftToRight() else 'rtl'
@dbus_property(IFACE, signature='as')
def IconThemePath(self):
return dbus.Array(signature='s')
@dbus_method(IFACE, in_signature='iias', out_signature='u(ia{sv}av)')
def GetLayout(self, parentId, recursionDepth, propertyNames):
layout = self.menu.get_layout(parentId, recursionDepth, propertyNames)
return self.revision, layout
@dbus_method(IFACE, in_signature='aias', out_signature='a(ia{sv})')
def GetGroupProperties(self, ids, propertyNames):
return self.menu.get_properties(ids, propertyNames)
@dbus_method(IFACE, in_signature='is', out_signature='v')
def GetProperty(self, id, name):
return self.menu.action_properties(id).get(name, '')
@dbus_method(IFACE, in_signature='isvu', out_signature='')
def Event(self, id, eventId, data, timestamp):
''' This is called by the applet to notify the application an event happened on a
menu item. eventId can be one of the following::
* "clicked"
* "hovered"
* "opened"
* "closed"
Vendor specific events can be added by prefixing them with "x-<vendor>-"'''
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
@dbus_method(IFACE, in_signature='a(isvu)', out_signature='ai')
def EventGroup(self, events):
''' Used to pass a set of events as a single message for possibily
several different menuitems. This is done to optimize DBus traffic.
Should return a list of ids that are not found. events is a list of
events in the same format as used for the Event method.'''
missing = dbus.Array(signature='u')
for id, eventId, data, timestamp in events:
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
else:
missing.append(id)
return missing
@dbus_method(IFACE, in_signature='i', out_signature='b')
def AboutToShow(self, id):
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
return self.menu.handle_about_to_show(ac)
return False
@dbus_method(IFACE, in_signature='ai', out_signature='aiai')
def AboutToShowGroup(self, ids):
updates_needed = dbus.Array(signature='i')
id_errors = dbus.Array(signature='i')
for ac_id in ids:
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
if self.menu.handle_about_to_show(ac):
updates_needed.append(ac_id)
else:
id_errors.append(ac_id)
return updates_needed, id_errors
@dbus_signal(IFACE, 'a(ia{sv})a(ias)')
def ItemsPropertiesUpdated(self, updatedProps, removedProps):
pass
@dbus_signal(IFACE, 'ui')
def LayoutUpdated(self, revision, parent):
pass
@dbus_signal(IFACE, 'iu')
def ItemActivationRequested(self, id, timestamp):
pass
def test():
setup_for_cli_run()
app = QApplication([])
bus = dbus.SessionBus()
dbus_name = BusName('com.calibre-ebook.TestDBusMenu', bus=bus, do_not_queue=True)
m = QMenu()
ac = m.addAction(QIcon(I('window-close.png')), 'Quit', app.quit)
ac.setShortcut(QKeySequence('Ctrl+Q'))
menu = DBusMenu('/Menu', bus=bus)
menu.publish_new_menu(m)
app.exec_()
del dbus_name
if __name__ == '__main__':
test()
| gpl-3.0 | 4,996,963,048,490,460,000 | 37.677083 | 115 | 0.597832 | false | 3.609235 | false | false | false |
tensorflow/agents | tf_agents/networks/nest_map.py | 1 | 8386 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network layer that allows mapping multiple inputs."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
import copy
import typing
import tensorflow.compat.v2 as tf
from tf_agents.networks import network
from tf_agents.networks import sequential
from tf_agents.typing import types
from tf_agents.utils import nest_utils
def NestFlatten() -> tf.keras.layers.Layer: # pylint: disable=invalid-name
"""Returns a Keras layer that takes a nest of inputs, and returns a list.
Useful in combination with `NestMap` to combine processed inputs:
```python
# Process inputs in dictionary {"inp1": ..., "inp2": ...}, then
# flatten the resulting tensors into a list, and finally pass this
# list to tf.keras.layers.Add() to sum the values element-wise.
net = tf_agents.networks.Sequence([
NestMap({"inp1": layer1, "inp2": layer2}),
NestFlatten(),
tf.keras.layers.Add(),
])
combined_outputs, next_state = net({"inp1": inp1, "inp2": inp2}, state)
```
"""
return tf.keras.layers.Lambda(tf.nest.flatten)
class NestMap(network.Network):
"""The `NestMap` network processes nested inputs via nested layers.
It is a TF-Agents network that can be used to process nested inputs.
Stateful Keras layers (e.g. LSTMCell, RNN, LSTM, TF-Agents DynamicUnroll)
are all supported. The `state_spec` of `NestMap` has a structure matching
that of `nested_layers`.
`NestMap` can be used in conjunction with `NestFlatten` and a combiner
(e.g. `tf.keras.layers.Add` or `tf.keras.layers.Concatenate`) to process
and aggregate in a preprocessing step.
Usage:
```python
net = NestMap({"inp1": layer1, "inp2": layer2})
outputs, next_state = net({"inp1": inp1, "inp2": inp2}, state)
```
"""
def __init__(self,
nested_layers: types.NestedLayer,
input_spec: typing.Optional[types.NestedTensorSpec] = None,
name: typing.Optional[typing.Text] = None):
"""Create a Sequential Network.
Args:
nested_layers: A nest of layers and/or networks. These will be used
to process the inputs (input nest structure will have to match this
structure). Any layers that are subclasses of
`tf.keras.layers.{RNN,LSTM,GRU,...}` are wrapped in
`tf_agents.keras_layers.RNNWrapper`.
input_spec: (Optional.) A nest of `tf.TypeSpec` representing the
input observations. The structure of `input_spec` must match
that of `nested_layers`.
name: (Optional.) Network name.
Raises:
TypeError: If any of the layers are not instances of keras `Layer`.
ValueError: If `input_spec` is provided but its nest structure does
not match that of `nested_layers`.
RuntimeError: If not `tf.executing_eagerly()`; as this is required to
be able to create deep copies of layers in `layers`.
"""
if not tf.executing_eagerly():
raise RuntimeError(
'Not executing eagerly - cannot make deep copies of `nested_layers`.')
flat_nested_layers = tf.nest.flatten(nested_layers)
for layer in flat_nested_layers:
if not isinstance(layer, tf.keras.layers.Layer):
raise TypeError(
'Expected all layers to be instances of keras Layer, but saw'
': \'{}\''.format(layer))
if input_spec is not None:
nest_utils.assert_same_structure(
nested_layers, input_spec,
message=(
'`nested_layers` and `input_spec` do not have matching structures'
))
flat_input_spec = tf.nest.flatten(input_spec)
else:
flat_input_spec = [None] * len(flat_nested_layers)
# Wrap in Sequential if necessary.
flat_nested_layers = [
sequential.Sequential([m], s) if not isinstance(m, network.Network)
else m
for (s, m) in zip(flat_input_spec, flat_nested_layers)
]
flat_nested_layers_state_specs = [m.state_spec for m in flat_nested_layers]
nested_layers = tf.nest.pack_sequence_as(nested_layers, flat_nested_layers)
# We use flattened layers and states here instead of tf.nest.map_structure
# for several reason. One is that we perform several operations against
# the layers and we want to avoid calling into tf.nest.map* multiple times.
# But the main reason is that network states have a different *structure*
# than the layers; e.g., `nested_layers` may just be tf.keras.layers.LSTM,
# but the states would then have structure `[.,.]`. Passing these in
# as args to tf.nest.map_structure causes it to fail. Instead we would
# have to use nest.map_structure_up_to -- but that function is not part
# of the public TF API. However, if we do everything in flatland and then
# use pack_sequence_as, we bypass the more rigid structure tests.
state_spec = tf.nest.pack_sequence_as(
nested_layers, flat_nested_layers_state_specs)
super(NestMap, self).__init__(input_tensor_spec=input_spec,
state_spec=state_spec,
name=name)
self._nested_layers = nested_layers
@property
def nested_layers(self) -> types.NestedNetwork:
# Return a shallow copy so users don't modify the layers list.
return tf.nest.map_structure(lambda m: m, self._nested_layers)
def copy(self, **kwargs) -> 'NestMap':
"""Make a copy of a `NestMap` instance.
**NOTE** A copy of a `NestMap` instance always performs a deep copy
of the underlying layers, so the new instance will not share weights
with the original - but it will start with the same weights.
Args:
**kwargs: Args to override when recreating this network. Commonly
overridden args include 'name'.
Returns:
A deep copy of this network.
"""
new_kwargs = dict(self._saved_kwargs, **kwargs)
if 'nested_layers' not in new_kwargs:
new_nested_layers = [copy.deepcopy(m) for m in self._nested_layers]
new_kwargs['nested_layers'] = new_nested_layers
return type(self)(**new_kwargs)
def call(self, inputs, network_state=(), **kwargs):
nest_utils.assert_same_structure(
self._nested_layers, inputs,
allow_shallow_nest1=True,
message=(
'`self.nested_layers` and `inputs` do not have matching structures')
)
if network_state:
nest_utils.assert_same_structure(
self.state_spec, network_state,
allow_shallow_nest1=True,
message=(
'network_state and state_spec do not have matching structure'))
nested_layers_state = network_state
else:
nested_layers_state = tf.nest.map_structure(
lambda _: (), self._nested_layers)
# Here we must use map_structure_up_to because nested_layers_state has a
# "deeper" structure than self._nested_layers. For example, an LSTM
# layer's state is composed of a list with two tensors. The
# tf.nest.map_structure function would raise an error if two
# "incompatible" structures are passed in this way.
def _mapper(inp, layer, state): # pylint: disable=invalid-name
return layer(inp, network_state=state, **kwargs)
outputs_and_next_state = nest_utils.map_structure_up_to(
self._nested_layers, _mapper,
inputs, self._nested_layers, nested_layers_state)
flat_outputs_and_next_state = nest_utils.flatten_up_to(
self._nested_layers, outputs_and_next_state)
flat_outputs, flat_next_state = zip(*flat_outputs_and_next_state)
outputs = tf.nest.pack_sequence_as(
self._nested_layers, flat_outputs)
next_network_state = tf.nest.pack_sequence_as(
self._nested_layers, flat_next_state)
return outputs, next_network_state
| apache-2.0 | 7,011,251,411,921,298,000 | 38.744076 | 80 | 0.673503 | false | 3.796288 | false | false | false |
AmI-2014/Python-Lab1 | fibonacci.py | 1 | 1295 | '''
Created on Mar 18, 2014
@author: Dario Bonino <[email protected]>
Copyright (c) 2014 Dario Bonino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
def fib(order):
# initialization, we use a tuple
a = (0, 1)
# the resulting array
fibonacci = []
# init while variable
i = 0
# fill the array
while i < order:
a = (a[1], a[0] + a[1])
fibonacci.append(a[0])
i+=1
return fibonacci
if __name__ == '__main__':
# get the series order as a string
order_as_string = raw_input("Insert the Fibonacci's series order:\n>")
# convert the string to an integer number
order = int(order_as_string)
# get the Fibonacci's series value
values = fib(order)
# print the values
print values
| apache-2.0 | -4,332,876,673,079,846,400 | 24.9 | 74 | 0.657143 | false | 3.678977 | false | false | false |
janusnic/dj-21v | unit_07/mysite/blog/models.py | 1 | 1898 | from django.db import models
import datetime
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
description = models.TextField(max_length=4096)
def __str__(self):
return '%s' % (self.name)
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
def __str__(self):
return '%s' % (self.name)
@python_2_unicode_compatible
class Article(models.Model):
ARTICLE_STATUS = (
('D', 'Not Reviewed'),
('P', 'Published'),
('E', 'Expired'),
)
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
status = models.IntegerField(default=0)
content = models.TextField()
status = models.CharField(max_length=1, choices=ARTICLE_STATUS, default='D')
category = models.ForeignKey(Category, verbose_name="the related category")
tags = models.ManyToManyField(Tag, verbose_name="the related tags", related_name="keyword_set", blank=True)
views = models.IntegerField(default=0)
publish_date = models.DateTimeField(auto_now=True, editable=False, help_text="Please use the following format: <em>YYYY-MM-DD</em>.")
created_date = models.DateTimeField(auto_now_add=True, editable=False)
def was_published_recently(self):
return self.publish_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'publish_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return '%s' % (self.title)
| mit | 9,192,965,130,482,229,000 | 39.382979 | 137 | 0.68862 | false | 3.615238 | false | false | false |
dgjnpr/py-junos-eznc | lib/jnpr/junos/factory/viewfields.py | 1 | 1923 | class ViewFields(object):
"""
Used to dynamically create a field dictionary used with the
RunstatView class
"""
def __init__(self):
self._fields = dict()
def _prockvargs(self, field, name, **kvargs):
if not len(kvargs):
return
field[name].update(kvargs)
@property
def end(self):
return self._fields
def str(self, name, xpath=None, **kvargs):
""" field is a string """
if xpath is None:
xpath = name
field = {name: {'xpath': xpath}}
self._prockvargs(field, name, **kvargs)
self._fields.update(field)
return self
def astype(self, name, xpath=None, astype=int, **kvargs):
"""
field string value will be passed to function :astype:
This is typically used to do simple type conversions,
but also works really well if you set :astype: to
a function that does a basic converstion like look
at the value and change it to a True/False. For
example:
astype=lambda x: True if x == 'enabled' else False
"""
if xpath is None:
xpath = name
field = {
name: {'xpath': xpath, 'astype': astype}
}
self._prockvargs(field, name, **kvargs)
self._fields.update(field)
return self
def int(self, name, xpath=None, **kvargs):
""" field is an integer """
return self.astype(name, xpath, int, **kvargs)
def flag(self, name, xpath=None, **kvargs):
"""
field is a flag, results in True/False if the xpath element exists or
not. Model this as a boolean type <bool>
"""
return self.astype(name, xpath, bool, **kvargs)
def table(self, name, table):
""" field is a RunstatTable """
self._fields.update({
name: {'table': table}
})
return self
| apache-2.0 | 5,510,952,686,028,726,000 | 28.136364 | 77 | 0.558502 | false | 4.056962 | false | false | false |
missionpinball/mpf | setup.py | 1 | 3976 | """Mission Pinball Framework (mpf) setup.py."""
import re
from setuptools import setup
# http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
VERSIONFILE = "mpf/_version.py"
VERSION_STRING_LONG = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
_MO = re.search(VSRE, VERSION_STRING_LONG, re.M)
if _MO:
VERSION_STRING = _MO.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
pin2dmd_requires = [
'pyusb==1.1.0'
]
linux_i2c_requires = [
'smbus2_asyncio==0.0.5'
]
rpi_requires = [
'apigpio-mpf==0.0.3'
]
cli_requires = [
'prompt_toolkit==3.0.8',
'asciimatics==1.12.0',
'terminaltables==3.1.0',
]
osc_requires = [
'python-osc==1.7.4'
]
irc_requires = [
'irc==19.0.1'
]
vpe_requires = [
'grpcio_tools==1.34.0',
'grpcio==1.34.0',
'protobuf==3.14.0',
]
crash_reporter_requires = [
'requests==2.22.0'
]
all_requires = (pin2dmd_requires + cli_requires + linux_i2c_requires + rpi_requires + osc_requires + irc_requires +
vpe_requires + crash_reporter_requires)
setup(
name='mpf',
version=VERSION_STRING,
description='Mission Pinball Framework',
long_description='''Let's build a pinball machine!
The Mission Pinball Framework (MPF) is an open source, cross-platform,
Python-based software framework for powering real pinball machines.
MPF is written in Python. It can run on Windows, OS X, and Linux
with the same code and configurations.
MPF interacts with real, physical pinball machines via modern pinball
controller hardware such as a Multimorphic P-ROC or P3-ROC, a FAST Pinball
controller, or Open Pinball Project hardware controllers. You can use MPF to
power your own custom-built machine or to update the software in existing
Williams, Bally, Stern, or Data East machines.
MPF is a work-in-progress that is not yet complete, though we're actively
developing it and checking in several commits a week. It's MIT licensed,
actively developed by fun people, and supported by a vibrant, pinball-loving
community.''',
url='https://missionpinball.org',
author='The Mission Pinball Framework Team',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment :: Arcade'
],
keywords='pinball',
include_package_data=True,
package_data={'': ['*.yaml', '*.png', '*.so', '*.pyd', '*.ogg', '*.wav']},
# MANIFEST.in picks up the rest
packages=['mpf'],
zip_safe=False,
install_requires=['ruamel.yaml==0.15.100',
'pyserial==3.5',
'pyserial-asyncio==0.4;platform_system=="Windows"',
'pyserial-asyncio==0.5;platform_system!="Windows"',
'sortedcontainers==2.3.0',
'psutil==5.7.3',
],
extras_require={
'all': all_requires,
'pin2dmd': pin2dmd_requires,
'linux_i2c': linux_i2c_requires,
'rpi': rpi_requires,
'cli': cli_requires,
'osc': osc_requires,
'irc': irc_requires,
'vpe': vpe_requires,
'crash_reporter': crash_reporter_requires,
},
tests_require=[],
test_suite="mpf.tests",
entry_points={
'console_scripts': [
'mpf = mpf.commands:run_from_command_line',
]
}
)
| mit | 4,193,858,183,606,862,000 | 27.604317 | 115 | 0.617203 | false | 3.427586 | false | false | false |
jddeal/python-cmr | tests/test_collection.py | 1 | 1877 | import unittest
from cmr.queries import CollectionQuery
class TestCollectionClass(unittest.TestCase):
def test_archive_center(self):
query = CollectionQuery()
query.archive_center("LP DAAC")
self.assertIn("archive_center", query.params)
self.assertEqual(query.params["archive_center"], "LP DAAC")
def test_keyword(self):
query = CollectionQuery()
query.keyword("AST_*")
self.assertIn("keyword", query.params)
self.assertEqual(query.params["keyword"], "AST_*")
def test_valid_formats(self):
query = CollectionQuery()
formats = [
"json", "xml", "echo10", "iso", "iso19115",
"csv", "atom", "kml", "native", "dif", "dif10",
"opendata", "umm_json", "umm_json_v1_1" "umm_json_v1_9"]
for _format in formats:
query.format(_format)
self.assertEqual(query._format, _format)
def test_invalid_format(self):
query = CollectionQuery()
with self.assertRaises(ValueError):
query.format("invalid")
query.format("jsonn")
query.format("iso19116")
def test_valid_concept_id(self):
query = CollectionQuery()
query.concept_id("C1299783579-LPDAAC_ECS")
self.assertEqual(query.params["concept_id"], ["C1299783579-LPDAAC_ECS"])
query.concept_id(["C1299783579-LPDAAC_ECS", "C1441380236-PODAAC"])
self.assertEqual(query.params["concept_id"], ["C1299783579-LPDAAC_ECS", "C1441380236-PODAAC"])
def test_invalid_concept_id(self):
query = CollectionQuery()
with self.assertRaises(ValueError):
query.concept_id("G1327299284-LPDAAC_ECS")
with self.assertRaises(ValueError):
query.concept_id(["C1299783579-LPDAAC_ECS", "G1327299284-LPDAAC_ECS"])
| mit | 3,611,592,017,431,405,600 | 31.929825 | 102 | 0.605221 | false | 3.582061 | true | false | false |
jreback/pandas | pandas/plotting/_matplotlib/hist.py | 1 | 11983 | from typing import TYPE_CHECKING
import numpy as np
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex
from pandas.core.dtypes.missing import isna, remove_na_arraylike
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import LinePlot, MPLPlot
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
set_ticks_props,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class HistPlot(LinePlot):
_kind = "hist"
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = self.data._convert(datetime=True)._get_numeric_data()
values = np.ravel(values)
values = values[~isna(values)]
_, self.bins = np.histogram(
values, bins=self.bins, range=self.kwds.get("range", None)
)
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(
cls,
ax,
y,
style=None,
bins=None,
bottom=0,
column_num=0,
stacking_id=None,
**kwds,
):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
# We allow weights to be a multi-dimensional array, e.g. a (10, 2) array,
# and each sub-array (10,) will be called in each iteration. If users only
# provide 1D array, we assume the same weights is used for all iterations
weights = kwds.get("weights", None)
if weights is not None and np.ndim(weights) != 1:
kwds["weights"] = weights[:, i]
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds["bottom"] = self.bottom
kwds["bins"] = self.bins
return kwds
def _post_plot_logic(self, ax: "Axes", data):
if self.orientation == "horizontal":
ax.set_xlabel("Frequency")
else:
ax.set_ylabel("Frequency")
@property
def orientation(self):
if self.kwds.get("orientation", None) == "horizontal":
return "horizontal"
else:
return "vertical"
class KdePlot(HistPlot):
_kind = "kde"
orientation = "vertical"
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
1000,
)
elif is_integer(self.ind):
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
self.ind,
)
else:
ind = self.ind
return ind
@classmethod
def _plot(
cls,
ax,
y,
style=None,
bw_method=None,
ind=None,
column_num=None,
stacking_id=None,
**kwds,
):
from scipy.stats import gaussian_kde
y = remove_na_arraylike(y)
gkde = gaussian_kde(y, bw_method=bw_method)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds["bw_method"] = self.bw_method
kwds["ind"] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel("Density")
def _grouped_plot(
plotf,
data,
column=None,
by=None,
numeric_only=True,
figsize=None,
sharex=True,
sharey=True,
layout=None,
rot=0,
ax=None,
**kwargs,
):
if figsize == "default":
# allowed to specify mpl default with 'default'
raise ValueError(
"figsize='default' is no longer supported. "
"Specify figure size by tuple instead"
)
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = create_subplots(
naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout
)
_axes = flatten_axes(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, ABCDataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_hist(
data,
column=None,
by=None,
ax=None,
bins=50,
figsize=None,
layout=None,
sharex=False,
sharey=False,
rot=90,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
legend=False,
**kwargs,
):
"""
Grouped histogram
Parameters
----------
data : Series/DataFrame
column : object, optional
by : object, optional
ax : axes, optional
bins : int, default 50
figsize : tuple, optional
layout : optional
sharex : bool, default False
sharey : bool, default False
rot : int, default 90
grid : bool, default True
legend: : bool, default False
kwargs : dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
collection of Matplotlib Axes
"""
if legend:
assert "label" not in kwargs
if data.ndim == 1:
kwargs["label"] = data.name
elif column is None:
kwargs["label"] = data.columns
else:
kwargs["label"] = column
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
if legend:
ax.legend()
if xrot is None:
xrot = rot
fig, axes = _grouped_plot(
plot_group,
data,
column=column,
by=by,
sharex=sharex,
sharey=sharey,
ax=ax,
figsize=figsize,
layout=layout,
rot=rot,
)
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
fig.subplots_adjust(
bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3
)
return axes
def hist_series(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
legend: bool = False,
**kwds,
):
import matplotlib.pyplot as plt
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is None:
if kwds.get("layout", None) is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop(
"figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)
)
if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError("passed axis not bound to passed figure")
values = self.dropna().values
if legend:
kwds["label"] = self.name
ax.hist(values, bins=bins, **kwds)
if legend:
ax.legend()
ax.grid(grid)
axes = np.array([ax])
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
else:
if "figure" in kwds:
raise ValueError(
"Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance will be created"
)
axes = _grouped_hist(
self,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
if hasattr(axes, "ndim"):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def hist_frame(
data,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
legend: bool = False,
**kwds,
):
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is not None:
axes = _grouped_hist(
data,
column=column,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
sharex=sharex,
sharey=sharey,
layout=layout,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndex)):
column = [column]
data = data[column]
# GH32590
data = data.select_dtypes(
include=(np.number, "datetime64", "datetimetz"), exclude="timedelta"
)
naxes = len(data.columns)
if naxes == 0:
raise ValueError(
"hist method requires numerical or datetime columns, nothing to plot."
)
fig, axes = create_subplots(
naxes=naxes,
ax=ax,
squeeze=False,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
)
_axes = flatten_axes(axes)
can_set_label = "label" not in kwds
for i, col in enumerate(data.columns):
ax = _axes[i]
if legend and can_set_label:
kwds["label"] = col
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
if legend:
ax.legend()
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
| bsd-3-clause | 6,666,785,963,615,121,000 | 25.106754 | 88 | 0.548026 | false | 3.672387 | false | false | false |
vedmathai/dateCorroborator | corroboratorPOC.py | 1 | 6019 | from subprocess import *
import re
import treetaggerwrapper
import sparqlQuerypy
from bs4 import BeautifulSoup
CONSTANTKEYVERBS="die, died, death, born, birth, sworn in" #Set of words that if present in the sentence, then don't discard the sentence, we are interested.
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
def jarWrapper(*args): # The helper function to use the jar file.
process = Popen(['java', '-jar']+list(args), stdout=PIPE, stderr=PIPE)
ret=[]
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith('\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
ret += stdout.split('\n')
if stderr != '':
ret += stderr.split('\n')
ret.remove('')
return ret
def returnProperty(word): #helper function to map the verb to a property. This will be small considering the number of date properties in DBpedia.
if word in ['death', 'die']: return 'http://dbpedia.org/ontology/deathDate'
if word in ['birth', 'born', 'bear']: return 'http://dbpedia.org/ontology/birthDate'
def normalizeAnnotations(sentence): # helper function to remove the references annotation, that appear as square brackets at the end of the sentence.
return re.sub(r'\[[0-9]*\]', ' ', sentence)
def sentenceSplitter(sentence): # helper regular function to correctly find end of sentences.
return re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', sentence)
def normaliseResult(result):
normRes=[]
for sentence in result:
sent=normalizeAnnotations(sentence)
normRes += sentenceSplitter(sent)
return normRes
def findAndGenericAnnotateTime(sentence): #Replacing heidelTime tagged Timex tags to a generic 'TIME' so that treeTagger can work its magic without hiccups.
return re.sub('<TIMEX3((?!<TIMEX3).)*</TIMEX3>', 'TIME', sentence)
def treetag(sentence, encoding = None): # TreeTagger helper function.
if encoding != None:
return treetaggerwrapper.make_tags(tagger.tag_text(unicode(sentence, "utf-8")))
else:
return treetaggerwrapper.make_tags(tagger.tag_text(sentence))
def returnKeyverbs(): #formats the key verbs above.
return '|'.join(verb for verb in CONSTANTKEYVERBS.split(', '))
def findSubVerbsTime(tagsentence): # The main helper function that figures out the subject in the sentence and finds the correct core verbs marked by an '*'
pos=[]
pos2=[]
seenSubject=False
seenVerb=False
lastfew=0
for i, tags in enumerate(tagsentence):
if tags.pos=='NP' or tags.pos=='PP':
pos += [tags.word]
seenSubject=True
lastfew+=1
if re.match(u'V..|V.', tags.pos) != None and seenSubject:
if not seenVerb:
subject = pos[-lastfew:]
pos2 += [[subject]]
if re.match(u'VB.', tags.pos) != None:
pos2[-1] += [tags.word]
else:
pos2[-1] += [tags.word+'*']
seenVerb=True
if re.match(u'V..|V.', tags.pos) == None and seenVerb:
seenVerb=False
seenSubject=False
lastfew=0
return pos2
def lemmatizeMainVerb(item):
for verb in item[1:]:
if '*' in verb:
return treetag(verb)[0].lemma
def listTimes(sentence): # uses beautiful soup to get the date information.
soup = BeautifulSoup(sentence, 'html.parser')
return soup.find_all('timex3')
def main(args):
result = jarWrapper(*args)
for sentence in normaliseResult(result):
sent=findAndGenericAnnotateTime(sentence)
m = re.match(r"(?P<first_part>.*) (?P<predicate>%s) (?P<second_part>.*)"%(returnKeyverbs()), sent) #scans the sentences for this pattern.
if m!=None:
left=treetag(m.group('first_part'), "utf-8")
middle=treetag(m.group('predicate'), "utf-8")
right=treetag(m.group('second_part'), "utf-8")
tagsentence = left + middle + right
if 'TIME' in m.group('first_part') or 'TIME' in m.group('second_part'): #Skip sentence if not date details.
subVerbTime = findSubVerbsTime(tagsentence)
for item in subVerbTime:
subject=" ".join(thing for thing in item[0])
if subject.lower() in ['he','she', 'it']:
subject=previousSubject
annotate = sparqlQuerypy.findAnnotation(subject)
annotatedSubject = annotate[0]['s']['value']
previousSubject = subject #heuristic that subject of this pronoun is in deed the previous subject, (not well thought through!)
verbLemma=lemmatizeMainVerb(item)
if verbLemma != None: prop=returnProperty(verbLemma)
timexList = listTimes(sentence)
i=0
while timexList[i]['type']not in ["DATE","TIME"]:
i+=1
time= timexList[i]['value']
date= sparqlQuerypy.findDate(annotatedSubject, prop)
if len(date) != 0:
date= date[0]['z']['value']
print '- - - - - - - - - - - - - - - - \n \n'
print sentence
print ' '
print 'The subject is:', subject
print 'The annotated subject is:', annotatedSubject
print 'The property is:', prop
print 'Date according to dbpedia:', date
print 'Date mined from the text:', time
print '\n \n'
if __name__=='__main__':
args = ['de.unihd.dbs.heideltime.standalone.jar', 'input']
result = jarWrapper(*args)
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
main(args)
| gpl-3.0 | -6,094,858,273,225,851,000 | 39.668919 | 157 | 0.587805 | false | 3.816741 | false | false | false |
fluggo/Canvas | fluggo/editor/model/connectors.py | 1 | 9949 | # This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2012 Brian J. Crowell <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from fluggo import logging
from fluggo.editor import plugins
from fluggo.editor.model import sources
_log = logging.getLogger(__name__)
class VideoSourceRefConnector(plugins.VideoStream):
'''Resolves a reference into a video stream.
This class publishes alerts for any error that happens when finding the
stream.'''
def __init__(self, asset_list, ref, model_obj=None):
plugins.VideoStream.__init__(self)
self.asset_list = asset_list
self.ref = ref
self.model_obj = model_obj
self.asset = None
self.source = None
self.stream = None
self._error = None
self.connect()
# TODO: Handle sources appearing, disappearing, and going online/offline
# TODO: Also potentially handle transforms
def set_ref(self, ref):
self.ref = ref
self.connect()
def _clear(self):
self.set_base_filter(None, new_range=(None, None))
self.set_format(None)
def connect(self):
try:
if self.asset:
self.asset = None
if self.source:
self.unfollow_alerts(self.source)
self.source = None
if self.stream:
self.unfollow_alerts(self.stream)
self.stream = None
if self._error:
self.hide_alert(self._error)
self._error = None
if not self.ref:
self._clear()
return
# TODO: Handle ad-hoc sources
if not isinstance(self.ref, sources.AssetStreamRef):
self._clear()
return
# Handle missing sources, failure to bring online, and missing streams
try:
self.asset = self.asset_list[self.ref.asset_path]
except KeyError:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '", which doesn\'t exist.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
if not self.asset.is_source:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '" which is not a video source.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
try:
self.source = self.asset.get_source()
except:
self._clear()
self._error = plugins.Alert('Error while getting source from asset',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.source)
if self.source.offline:
try:
self.source.bring_online()
except:
self._clear()
self._error = plugins.Alert('Error while bringing source online',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
if self.source.offline:
self._clear()
if not len(self.source.alerts):
self._error = plugins.Alert('Unable to bring source "' + self.ref.asset_path + '" online.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
try:
self.stream = self.source.get_stream(self.ref.stream)
except KeyError:
self._clear()
self._error = plugins.Alert('Can\'t find stream "' + self.ref.stream + '" in source "' + self.ref.asset_path + '".',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.stream)
self.set_format(None)
self.set_base_filter(self.stream, new_range=self.stream.defined_range)
self.set_format(self.stream.format)
except:
_log.debug('Error while resolving reference', exc_info=True)
self._clear()
self._error = plugins.Alert('Error while resolving reference', model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
class AudioSourceRefConnector(plugins.AudioStream):
# Really, this has almost the exact same behavior as the above; maybe
# combine the two
'''Resolves a reference into an audio stream.
This class publishes alerts for any error that happens when finding the
stream.'''
def __init__(self, asset_list, ref, model_obj=None):
plugins.AudioStream.__init__(self)
self.asset_list = asset_list
self.ref = ref
self.model_obj = model_obj
self.asset = None
self.stream = None
self._error = None
self.connect()
# TODO: Handle sources appearing, disappearing, and going online/offline
# TODO: Also potentially handle transforms
def set_ref(self, ref):
self.ref = ref
self.connect()
def _clear(self):
self.set_base_filter(None, new_range=(None, None))
self.set_format(None)
def connect(self):
try:
if self.asset:
self.asset = None
if self.source:
self.unfollow_alerts(self.source)
self.source = None
if self.stream:
self.unfollow_alerts(self.stream)
self.stream = None
if self._error:
self.hide_alert(self._error)
self._error = None
if not self.ref:
self._clear()
return
# TODO: Handle ad-hoc sources
if not isinstance(self.ref, sources.AssetStreamRef):
self._clear()
return
# Handle missing sources, failure to bring online, and missing streams
try:
self.asset = self.asset_list[self.ref.asset_path]
except KeyError:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '", which doesn\'t exist.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
if not self.asset.is_source:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '" which is not an audio source.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
try:
self.source = self.asset.get_source()
except:
self._clear()
self._error = plugins.Alert('Error while getting source from asset',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.source)
if self.source.offline:
try:
self.source.bring_online()
except:
self._clear()
self._error = plugins.Alert('Error while bringing source online',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
if self.source.offline:
self._clear()
if not len(self.source.alerts):
self._error = plugins.Alert('Unable to bring source "' + self.ref.asset_path + '" online.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
try:
self.stream = self.source.get_stream(self.ref.stream)
except KeyError:
self._clear()
self._error = plugins.Alert('Can\'t find stream "' + self.ref.stream + '" in source "' + self.ref.asset_path + '".',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.stream)
self.set_format(None)
self.set_base_filter(self.stream, new_range=self.stream.defined_range)
self.set_format(self.stream.format)
except:
_log.debug('Error while resolving reference', exc_info=True)
self._clear()
self._error = plugins.Alert('Error while resolving reference', model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
| gpl-3.0 | -7,344,997,437,159,359,000 | 35.577206 | 145 | 0.555232 | false | 4.335076 | false | false | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/utils/version.py | 1 | 2539 | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
major = get_major_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
"Returns major version from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
major = '.'.join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| mit | -5,298,214,203,842,086,000 | 29.7375 | 79 | 0.624655 | false | 3.835347 | false | false | false |
dorianpula/learn-django | charleston/models.py | 1 | 5628 | from datetime import datetime
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.encoding import smart_str
from tagging.fields import TagField
from markdown import markdown
class Category(models.Model):
"""Categories of stories."""
title = models.CharField(max_length=250, help_text="Maximum 250 characters.")
slug = models.SlugField(unique=True,
help_text="Suggested value automatically generated from title. Must be unique.")
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/categories/{}/".format(self.slug)
def live_entry_set(self):
return self.entry_set.filter(status=Entry.LIVE_STATUS)
class LiveEntryManager(models.Manager):
"""Gets only the entries that have a live status."""
def get_queryset(self):
return super(LiveEntryManager, self).get_queryset().filter(status=self.model.LIVE_STATUS)
class Entry(models.Model):
"""Entry or blog post model."""
title = models.CharField(max_length=250)
excerpt = models.TextField(blank=True)
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
# Authors, comments and the like.
author = models.ForeignKey(User)
enable_comments = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
# Status to enable different types of entries
LIVE_STATUS = 1
DRAFT_STATUS = 2
HIDDEN_STATUS = 3
STATUS_CHOICES = (
(LIVE_STATUS, 'Live'),
(DRAFT_STATUS, 'Draft'),
(HIDDEN_STATUS, 'Hidden'),
)
status = models.IntegerField(choices=STATUS_CHOICES, default=LIVE_STATUS)
# Now for the categories and tags
categories = models.ManyToManyField(Category)
tags = TagField()
# Separate HTML rendered entries to allow for fast loading. (Space vs. processor tradeoff)
excerpt_html = models.TextField(editable=False, blank=True)
body_html = models.TextField(editable=False, blank=True)
# Hook in the nice manager we've written above.
live = LiveEntryManager()
objects = models.Manager()
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""Recreate the HTML from Markdown before saving the entry."""
self.body_html = markdown(self.body)
if self.excerpt:
self.excerpt_html = markdown(self.excerpt)
super(Entry, self).save(force_insert, force_update, using, update_fields)
def __unicode__(self):
return self.title
def get_absolute_url(self):
"""Gets the absolute URL for an entry."""
return reverse("charleston_entry_detail",
kwargs={"year": self.pub_date.strftime("%Y"),
"month": self.pub_date.strftime("%b").lower(),
"day": self.pub_date.strftime("%d"),
"slug": self.slug})
class Meta:
verbose_name_plural = "Entries"
ordering = ["-pub_date"]
class Link(models.Model):
"""Links model hyperlinks to various URLs both external and internal."""
title = models.CharField(max_length=250)
description = models.TextField(blank=True)
description_html = models.TextField(blank=True)
url = models.URLField(unique=True)
posted_by = models.ForeignKey(User)
pub_date = models.DateTimeField(default=datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
tags = TagField()
# Allow for commenting and posting to external sites
enable_comments = models.BooleanField(default=True)
post_elsewhere = models.BooleanField(default=True)
# Extra link metadata
via_name = models.CharField('Via', max_length=250, blank=True,
help_text='The name of the person whose site you spotted the link on. Optional.')
via_url = models.URLField('Via URL', blank=True,
help_text='The URL of the site where you spotted the link. Optional.')
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.title
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Saves a link. Updates the rendered description HTML and make sure the link gets posted elsewhere.
"""
if self.description:
self.description_html = markdown(self.description)
# Update delicious
if not self.id and self.post_elsewhere:
import pydelicious
pydelicious.add(settings.DELICIOUS_USER, settings.DELICIOUS_PASSWORD,
smart_str(self.url), smart_str(self.title), smart_str(self.tags))
super(Link, self).save(force_insert=force_insert, force_update=force_update, using=using,
update_fields=update_fields)
def get_absolute_url(self):
"""Gets the absolute URL of the link."""
return reverse("charleston_link_detail",
kwargs={"year": self.pub_date.strftime("%Y"),
"month": self.pub_date.strftime("%b").lower(),
"day": self.pub_date.strftime("%d"),
"slug": self.slug})
| bsd-2-clause | 5,318,329,303,083,099,000 | 33.740741 | 114 | 0.63344 | false | 4.14128 | false | false | false |
The-Compiler/qutebrowser | qutebrowser/completion/models/listcategory.py | 1 | 3679 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion category that uses a list of tuples as a data source."""
import re
from typing import Iterable, Tuple
from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QWidget
from qutebrowser.completion.models import util
from qutebrowser.utils import qtutils, log
class ListCategory(QSortFilterProxyModel):
"""Expose a list of items as a category for the CompletionModel."""
def __init__(self,
name: str,
items: Iterable[Tuple[str, ...]],
sort: bool = True,
delete_func: util.DeleteFuncType = None,
parent: QWidget = None):
super().__init__(parent)
self.name = name
self.srcmodel = QStandardItemModel(parent=self)
self._pattern = ''
# ListCategory filters all columns
self.columns_to_filter = [0, 1, 2]
self.setFilterKeyColumn(-1)
for item in items:
self.srcmodel.appendRow([QStandardItem(x) for x in item])
self.setSourceModel(self.srcmodel)
self.delete_func = delete_func
self._sort = sort
def set_pattern(self, val):
"""Setter for pattern.
Args:
val: The value to set.
"""
self._pattern = val
val = re.sub(r' +', r' ', val) # See #1919
val = re.escape(val)
val = val.replace(r'\ ', '.*')
rx = QRegExp(val, Qt.CaseInsensitive)
self.setFilterRegExp(rx)
self.invalidate()
sortcol = 0
self.sort(sortcol)
def lessThan(self, lindex, rindex):
"""Custom sorting implementation.
Prefers all items which start with self._pattern. Other than that, uses
normal Python string sorting.
Args:
lindex: The QModelIndex of the left item (*left* < right)
rindex: The QModelIndex of the right item (left < *right*)
Return:
True if left < right, else False
"""
qtutils.ensure_valid(lindex)
qtutils.ensure_valid(rindex)
left = self.srcmodel.data(lindex)
right = self.srcmodel.data(rindex)
if left is None or right is None: # pragma: no cover
log.completion.warning("Got unexpected None value, "
"left={!r} right={!r} "
"lindex={!r} rindex={!r}"
.format(left, right, lindex, rindex))
return False
leftstart = left.startswith(self._pattern)
rightstart = right.startswith(self._pattern)
if leftstart and not rightstart:
return True
elif rightstart and not leftstart:
return False
elif self._sort:
return left < right
else:
return False
| gpl-3.0 | 2,326,568,685,677,522,000 | 33.064815 | 79 | 0.613754 | false | 4.051762 | false | false | false |
abhipec/pec | emailApp/emailApp/migrations/0001_squashed_0009_dashboard.py | 1 | 2395 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'emailApp', '0001_initial'), (b'emailApp', '0002_email_textcleaned'), (b'emailApp', '0003_email_removedcontent'), (b'emailApp', '0004_auto_20150329_0757'), (b'emailApp', '0005_auto_20150329_1216'), (b'emailApp', '0006_auto_20150329_1251'), (b'emailApp', '0007_auto_20150329_1252'), (b'emailApp', '0008_auto_20150403_1346'), (b'emailApp', '0009_dashboard')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('messageId', models.SlugField(unique=True, max_length=100)),
('sender', models.EmailField(max_length=254)),
('timeStamp', models.DateTimeField()),
('subject', models.CharField(max_length=998, null=True)),
('textPlain', models.TextField(null=True, blank=True)),
('textHtml', models.TextField(null=True, blank=True)),
('removedContentHtml', models.TextField(null=True, blank=True)),
('removedContentPlain', models.TextField(null=True, blank=True)),
('textCleanHtml', models.TextField(null=True, blank=True)),
('textCleanPlain', models.TextField(null=True, blank=True)),
('category', models.CharField(default=b'', max_length=15, choices=[(b'NULL', b'Not categorized'), (b'promotional', b'Promotional'), (b'spam', b'Spam'), (b'human', b'Human'), (b'notification', b'Notification'), (b'others', b'Others')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.TextField(null=True, blank=True)),
('timeStamp', models.DateTimeField()),
('validTill', models.DateTimeField()),
('source', models.OneToOneField(to='emailApp.Email')),
],
options={
},
bases=(models.Model,),
),
]
| mit | 4,397,749,009,530,095,600 | 48.895833 | 374 | 0.56952 | false | 3.945634 | false | false | false |
mwalercz/virus-total-helper | server/dispatcher.py | 1 | 1180 | import inspect
from server.customhttp import HTTPResponse
class NoSuchUrl(Exception):
def __init__(self, url):
self.url = url
class Dispatcher:
def __init__(self, urls, scheduler, deque):
self.deque = deque
self.urls = urls
self.scheduler = scheduler
def dispatch(self, request):
fun = self._pick_handler_function(request.command, request.path)
return self._execute_handler_function(request, fun)
def _pick_handler_function(self, command, path):
key = command + path
if key in self.urls:
return self.urls[key]
else:
raise NoSuchUrl(key)
def _execute_handler_function(self, request, fun):
parameter_number = len(inspect.signature(fun).parameters)
if parameter_number == 2:
request.scheduler = self.scheduler
request.deque = self.deque
return fun(request, HTTPResponse())
else:
raise ArgumentLookupError(fun)
class ArgumentLookupError(Exception):
def __init__(self, fun):
self.fun = fun
def __str__(self):
return repr('cant find proper params in' + self.fun)
| bsd-3-clause | 8,067,938,445,552,639,000 | 26.44186 | 72 | 0.616949 | false | 4.244604 | false | false | false |
blitzmann/Pyfa | gui/builtinContextMenus/spoolUp.py | 1 | 3085 | # noinspection PyPackageRequirements
import wx
import eos.config
import gui.mainFrame
from eos.utils.spoolSupport import SpoolType, SpoolOptions
from gui import globalEvents as GE
from gui.contextMenu import ContextMenu
from service.settings import ContextMenuSettings
from service.fit import Fit
class SpoolUp(ContextMenu):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.settings = ContextMenuSettings.getInstance()
self.cycleMap = {}
self.resetId = None
def display(self, srcContext, selection):
if not self.settings.get('spoolup'):
return False
if srcContext not in ("fittingModule") or self.mainFrame.getActiveFit() is None:
return False
self.mod = selection[0]
return self.mod.item.group.name in ("Precursor Weapon", "Mutadaptive Remote Armor Repairer")
def getText(self, itmContext, selection):
return "Spoolup Cycles"
def getSubMenu(self, context, selection, rootMenu, i, pitem):
m = wx.Menu()
if "wxMSW" in wx.PlatformInfo:
bindmenu = rootMenu
else:
bindmenu = m
isNotDefault = self.mod.spoolType is not None and self.mod.spoolAmount is not None
cycleDefault = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], True))[0]
cycleCurrent = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], False))[0]
cycleMin = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, 0, True))[0]
cycleMax = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, 1, True))[0]
for cycle in range(cycleMin, cycleMax + 1):
menuId = ContextMenu.nextID()
# Show default only for current value and when not overriden
if not isNotDefault and cycle == cycleDefault:
text = "{} (default)".format(cycle)
else:
text = "{}".format(cycle)
item = wx.MenuItem(m, menuId, text, kind=wx.ITEM_CHECK)
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
item.Check(isNotDefault and cycle == cycleCurrent)
self.cycleMap[menuId] = cycle
self.resetId = ContextMenu.nextID()
item = wx.MenuItem(m, self.resetId, "Reset")
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
return m
def handleSpoolChange(self, event):
if event.Id == self.resetId:
self.mod.spoolType = None
self.mod.spoolAmount = None
elif event.Id in self.cycleMap:
cycles = self.cycleMap[event.Id]
self.mod.spoolType = SpoolType.CYCLES
self.mod.spoolAmount = cycles
fitID = self.mainFrame.getActiveFit()
Fit.getInstance().recalc(fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
SpoolUp.register()
| gpl-3.0 | -3,779,389,863,283,970,600 | 36.621951 | 153 | 0.65705 | false | 3.681384 | false | false | false |
SpectoLabs/hoverpy | hoverpy/decorators.py | 1 | 1513 | from .hp import HoverPy
class capture(object):
def __init__(self, dbpath="requests.db", capture=True, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class simulate(object):
def __init__(self, dbpath="requests.db", capture=False, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class spy(object):
def __init__(self, dbpath="requests.db", capture=False, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(spy=True, capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class modify(object):
def __init__(self, middleware, **kwargs):
self.middleware = middleware
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(modify=True, middleware=self.middleware, **self.kwargs):
return f(*args)
return wrapped_f
| apache-2.0 | 4,016,808,002,218,339,300 | 27.54717 | 92 | 0.576999 | false | 3.820707 | false | false | false |
christ2go/pycal | src/tree/treeprint.py | 1 | 1219 | def write(x):
print(x,end="")
class treeprinter:
"""
Prints an abstract syntax tree
as a XML-Document
tree has to be instaneceof node
(tree is the AST to be printed)
"""
def __init__(self,tree):
self.tree = tree
def printt(self):
if not self.tree:
raise "Tree Exception - Tree not initialized"
return False
self.recprint(self.tree)
def writeattr(self,node):
for key in node.attr:
write(" "+key+"='"+str(node.attr[key])+"'")
def recprint(self,node,ident=0):
if node != None:
delim = "\t"
write(delim*ident)
write("<")
write(node.name.replace(" ","_"))
write("")
self.writeattr(node)
if len(node.children) != 0:
write(">")
write("\n")
for item in node.children:
self.recprint(item,ident+1)
# write("\n")
write(delim*ident)
write("</"+node.name.replace(" ","_")+">")
write("\n")
else:
write(" />"+"\n")
| apache-2.0 | -6,523,374,612,603,958,000 | 28.731707 | 58 | 0.437244 | false | 4.262238 | false | false | false |
aksampath123/Python_Development_Projects | Dungeon_game/character.py | 1 | 1226 | import random
from combat import Combat
class Character(Combat):
attack_limit = 10
experience = 0
base_hit_points = 10
def attack(self):
roll = random.randint(1, self.attack_limit)
if self.weapon == 'sword':
roll += 1
elif self.weapon == 'axe':
roll += 2
elif self.weapon == 'bow':
roll += 0
return roll > 4
def get_weapon(self):
weapon_choice = input("Enter weapon of choice, [S]word, [A]xe or [B]ow: ").lower()
if weapon_choice in 'sab':
if weapon_choice == 's':
return 'sword'
elif weapon_choice == 'a':
return 'axe'
else:
return 'bow'
else:
return self.get_weapon()
def __init__(self, **kwargs):
self.name = input("Enter name: ")
self.weapon = self.get_weapon()
self.hit_points = self.base_hit_points
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "Character Name: {}, Weapon: {}, HP: {}, XP: {}".format(self.name, self.weapon, self.hit_points, self.experience)
def rest(self):
if self.hit_points < self.base_hit_points:
self.hit_points +=1
def level_up(self):
return self.experience >= 5
| unlicense | -2,503,536,181,508,784,600 | 23.039216 | 124 | 0.579119 | false | 3.340599 | false | false | false |
OSGConnect/bandwidth-monitors | submit/testnetwork.py | 1 | 2452 | #!/usr/bin/env python
import urllib
import urllib2
import time
import getopt
import sys
import os
import timeit
import platform
import subprocess
import re
REFERENCE_URL = 'http://stash.osgconnect.net/+sthapa/100MB_ref'
WSGI_URL = 'http://web-dev.ci-connect.net/~sthapa/record_network_test.wsgi'
def download_file():
"""
Download file and then remove it
"""
webref = urllib2.urlopen(REFERENCE_URL)
foo = webref.read()
def get_host_info():
"""
GET host information
"""
host_info = {}
if 'OSG_SITE_NAME' in os.environ:
host_info['site'] = os.environ['OSG_SITE_NAME']
elif 'GLIDEIN_RESOURCE_NAME' in os.env:
host_info['site'] = os.envron['GLIDEIN_RESOURCE_NAME']
host_info['hostname'] = platform.node()
return host_info
def send_record(test_record = None):
"""
Send record to wsgi
"""
if test_record is None:
return
try:
temp = test_record.copy()
if 'latency' in temp:
del temp['latency']
bandwidth_req = WSGI_URL + '?' + urllib.urlencode(temp)
req = urllib2.urlopen(bandwidth_req)
temp = test_record.copy()
if 'bandwidth' in temp:
del temp['bandwidth']
latency_req = WSGI_URL + '?' + urllib.urlencode(temp)
req = urllib2.urlopen(latency_req)
except Exception, e:
pass
def get_latency():
"""
Test ping time latency to stash
"""
try:
ping_output = subprocess.check_output(['/bin/ping', '-c', '10', 'stash.osgconnect.net'])
except AttributeError:
process = subprocess.Popen(['/bin/ping', '-c', '10', 'stash.osgconnect.net'], stdout=subprocess.PIPE)
ping_output = process.communicate()[0]
ping_regex = re.compile(r'rtt.*=\s+[\d.]+/([\d.]+)')
match = ping_regex.search(ping_output)
if match:
return float(match.group(1))
return 0.0
def main():
test_record = get_host_info()
test_record['date'] = time.time()
download_times = timeit.Timer('download_file()', "from __main__ import download_file").repeat(repeat = 5, number = 1)
avg_time = 0.0
records = 0
for x in download_times:
if x < 0.005:
continue
avg_time += x
records += 1
test_record['bandwidth'] = float(100 * 2**20) / (avg_time / float(records))
test_record['latency'] = get_latency()
send_record(test_record)
if __name__ == "__main__":
main()
| apache-2.0 | -1,105,823,344,387,599,200 | 27.511628 | 121 | 0.597879 | false | 3.448664 | true | false | false |
miltonsarria/dsp-python | images/5_tf_deepNet_text.py | 1 | 5124 | import os
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
#apagar wanings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import pickle
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
#files
neg_file='/home/sarria/data/lexicon/neg.txt'
pos_file='/home/sarria/data/lexicon/pos.txt'
def create_lexicon(pos,neg):
lexicon = []
for fi in [pos,neg]:
with open(fi,'r') as fh:
contents = fh.readlines()
for l in contents[:hm_lines]:
all_words=word_tokenize(l.lower())
lexicon +=list(all_words)
lexicon=[lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
l2=[]
for w in w_counts:
if 1000>w_counts[w]>50:
l2.append(w)
return l2
def sample_handling(sample,lexicon,classification):
featureset=[]
with open(sample,'r') as fh:
contents = fh.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value]+=1
featureset.append([features,classification])
return featureset
def create_feature_sets_labels(pos_file,neg_file,test_size=0.1):
lexicon = create_lexicon(pos_file,neg_file)
print('Lexicon: ', len(lexicon))
features=[]
features+=sample_handling(pos_file,lexicon,[1,0])
features+=sample_handling(neg_file,lexicon,[0,1])
random.shuffle(features)
features = np.array(features)
testing_size=int(test_size*len(features))
train_x=list(features[:,0][:-testing_size])
train_y=list(features[:,1][:-testing_size])
test_x=list(features[:,0][-testing_size:])
test_y=list(features[:,1][-testing_size:])
return train_x,train_y,test_x,test_y
#####################################################################################
#####################################################################################
#####################################################################################
print('Preparando datos...')
train_x,train_y,test_x,test_y=create_feature_sets_labels(pos_file,neg_file)
#with open ('/home/sarria/data/lexicon/sentimen_set.pickle','wb') as fh:
# pickle.dump([train_x,train_y,test_x,test_y],fh)
print('Done!!')
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_clases = 2
batch_size = 100
#dims = 28 x 28 = 784
x = tf.placeholder('float',[None,len(train_x[0])])
y = tf.placeholder('float')
def nn_model(data):
#entrada*weights + biases
hidden_1_layer={'weights':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_clases])),
'biases':tf.Variable(tf.random_normal([n_clases]))}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']),hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3,output_layer['weights']),output_layer['biases'])
return output
def train_nn(x):
out = nn_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
epochs=10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
epoch_loss = 0
i=0
while i<len(train_x):
start =i
end = i+batch_size
e_x = np.array(train_x[start:end])
e_y = np.array(train_y[start:end])
_,c=sess.run([optimizer,cost],feed_dict={x: e_x, y: e_y})
epoch_loss +=c
i+=batch_size
print('Epoca',epoch, ' completa, loss: ', epoch_loss)
correct=tf.equal(tf.argmax(out,1),tf.argmax(y,1))
acc=tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy: ', acc.eval({x:test_x,y:test_y}))
train_nn(x)
| mit | 5,958,327,614,851,862,000 | 31.846154 | 92 | 0.556206 | false | 3.355599 | true | false | false |
EvilCult/moviecatcher | View/ResultView.py | 1 | 4486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter
import tkinter.messagebox
from Bl import Play
from Lib import Tools
class GUI :
def __init__ (self, master) :
self.master = master
self.Tools = Tools.Tools()
self.listRst = ''
self.resRst = ''
self.getDetail = ''
def showList (self, searchKey) :
rstWindow = tkinter.Toplevel()
rstWindow.title('资源列表')
rstWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
rstWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
titleFrame = tkinter.Frame(rstWindow, bd = 0, bg="#444")
titleFrame.pack(expand = True, fill = 'both')
titleLabel = tkinter.Label(titleFrame, text = '关键词 :「 ' + searchKey + ' 」的搜索结果', fg = '#ddd', bg="#444", font = ("Helvetica", "12"))
titleLabel.grid(row = 1, column = 1, pady = 10)
titleFrame.grid_columnconfigure(0, weight=1)
titleFrame.grid_columnconfigure(2, weight=1)
self.frame = tkinter.Frame(rstWindow, bd = 0, bg="#222")
self.frame.pack(expand = True, fill = 'both')
self.window = tkinter.Listbox(self.frame, height = 14, width = 40, bd = 0, bg="#222", fg = '#ddd', selectbackground = '#116cd6', highlightthickness = 0)
self.window.grid(row = 0, column = 0, padx = 10, pady = 10)
self.window.bind('<Double-Button-1>', self.__getMovDetails)
try :
self.window.delete(0, 100)
except :
pass
def updateList (self) :
if self.listRst != '' :
idx = 0
for x in self.listRst :
self.window.insert(idx, x['title'])
idx += 1
else :
self.timer = self.frame.after(50, self.updateList)
def showRes (self) :
self.resWindow = tkinter.Toplevel()
self.resWindow.title(self.target['title'])
self.resWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.resWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
self.resWindow.config(background='#444')
self.resFrame = tkinter.Frame(self.resWindow, bd = 0, bg="#444")
self.resFrame.grid(row = 0, column = 0, sticky = '')
btnZone = tkinter.Frame(self.resWindow, bd = 10, bg="#444")
btnZone.grid(row = 1, column = 0, sticky = '')
self.resList = tkinter.Listbox(self.resFrame, height = 8, width = 50, bd = 0, bg="#222", fg = '#ddd',selectbackground = '#116cd6', highlightthickness = 0)
self.resList.grid(row = 0, sticky = '')
viewBtn = tkinter.Button(btnZone, text = '查看连接', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskShow)
viewBtn.grid(row = 0, column = 0, padx = 5)
watchBtn = tkinter.Button(btnZone, text = '在线观看', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskWatch)
watchBtn.grid(row = 0, column = 1, padx = 5)
dlBtn = tkinter.Button(btnZone, text = '离线下载', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskDownload)
dlBtn.grid(row = 0, column = 2, padx = 5)
def updateRes (self) :
if self.resRst != '' :
if len(self.resRst) > 0:
idx = 0
for x in self.resRst :
self.resList.insert(idx, x[0])
idx += 1
else :
self.resList.insert(0, '该资源已被和谐,暂时无法播放。')
else :
self.timer = self.resFrame.after(50, self.updateRes)
def __getMovDetails (self, event) :
idx = int(self.window.curselection()[0])
self.target = self.listRst[idx]
self.getDetail(self.target)
def __getChoose (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('Notice', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
def __taskWatch (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.watchLink(target)
def __taskShow (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.showLink(target)
def __taskDownload (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.dlLink(target)
| mit | -3,049,180,664,325,927,000 | 29.906475 | 156 | 0.649674 | false | 2.656772 | false | false | false |
ramineni/myironic | ironic/tests/drivers/ilo/test_power.py | 1 | 9414 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for IloPower module."""
import mock
from oslo.utils import importutils
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
@mock.patch.object(ilo_common, 'get_ilo_object')
class IloPowerInternalMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerInternalMethodsTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = db_utils.create_test_node(
driver='fake_ilo',
driver_info=driver_info,
instance_uuid='instance_uuid_123')
CONF.set_override('power_retry', 2, 'ilo')
CONF.set_override('power_wait', 0, 'ilo')
def test__get_power_state(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
self.assertEqual(
states.POWER_ON, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'OFF'
self.assertEqual(
states.POWER_OFF, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'ERROR'
self.assertEqual(states.ERROR, ilo_power._get_power_state(self.node))
def test__get_power_state_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.get_host_power_status.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_power._get_power_state,
self.node)
ilo_mock_object.get_host_power_status.assert_called_once_with()
def test__set_power_state_invalid_state(self, get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
ilo_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_reboot_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.reset_server.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task,
states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_reboot_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['ON', 'OFF', 'ON']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_off_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task,
states.POWER_OFF)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.hold_pwr_btn.assert_called_once_with()
def test__set_power_state_on_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['OFF', 'ON']
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, target_state)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
def test__attach_boot_iso(self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
class IloPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ilo',
driver_info=driver_info)
def test_get_properties(self):
expected = ilo_common.COMMON_PROPERTIES
expected.update(ilo_deploy.COMMON_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(ilo_power, '_get_power_state')
def test_get_power_state(self, mock_get_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_get_power.return_value = states.POWER_ON
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
mock_get_power.assert_called_once_with(task.node)
@mock.patch.object(ilo_power, '_set_power_state')
def test_set_power_state(self, mock_set_power):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
mock_set_power.assert_called_once_with(task, states.POWER_ON)
@mock.patch.object(ilo_power, '_set_power_state')
@mock.patch.object(ilo_power, '_get_power_state')
def test_reboot(self, mock_get_power, mock_set_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_get_power.return_value = states.POWER_ON
mock_set_power.return_value = states.POWER_ON
task.driver.power.reboot(task)
mock_get_power.assert_called_once_with(task.node)
mock_set_power.assert_called_once_with(task, states.REBOOT)
| apache-2.0 | -3,160,510,754,953,371,000 | 44.259615 | 79 | 0.621521 | false | 3.718009 | true | false | false |
matt77hias/Clipping | src/surfacearea.py | 1 | 2427 | import numpy as np
###############################################################################
## Surface Area
## ---------------------------------
## Planar polygon
###############################################################################
# Theorem of Green
#------------------------------------------------------------------------------
# integral_contour(L dx + M dy) = integral_area((dM/dx - dL/dy) dx dy)
# contour = oriented, piecewise smooth, simple closed curve in a plane
# area = region bounded by perimeter
# L, M = functions of (x,y) defined on an open region containing area with continuous partial derivatives
#
# Application:
# Planimeter
# integral_contour(-y dx + x dy) = integral_area((dx/dx - -dy/dy) dx dy) = 2 area
def area(p_vs, n=None):
if (len(p_vs) < 3):
return 0.0
dim = p_vs[0].shape[0]
if dim == 2:
return _area2D(p_vs)
elif dim == 3:
return _area3D(p_vs, n=n)
def _area2D(p_vs):
area = 0.0
nb_p_vs = len(p_vs)
#for j in range(nb_p_vs):
# p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
# p_v2 = p_vs[j]
# area += + p_v1[0]*p_v2[1] - p_v2[0]*p_v1[1]
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[0] * (p_v3[1] - p_v1[1])
return 0.5 * abs(area)
def _area3D(p_vs, n):
area = 0.0
nb_p_vs = len(p_vs)
ax = abs(n[0])
ay = abs(n[1])
az = abs(n[2])
if (ax > ay and ax > az): lca = 0
elif (ay > az): lca = 1
else: lca = 2
an = np.sqrt(ax*ax + ay*ay + az*az)
if lca == 0:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[1] * (p_v3[2] - p_v1[2])
area *= (an / n[0])
elif lca == 1:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[2] * (p_v3[0] - p_v1[0])
area *= (an / n[1])
else:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[0] * (p_v3[1] - p_v1[1])
area *= (an / n[2])
return 0.5 * abs(area)
| gpl-3.0 | -9,147,105,581,252,938,000 | 28.962963 | 113 | 0.409971 | false | 2.502062 | false | false | false |
CCBatIIT/AlGDock | Pipeline/prep_complex_for_AlGDock.py | 1 | 3734 | # Prepares a complex for AlGDock
try:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ligand_mol2', default=None,
help='Input mol2 of the ligand with sybyl atom types')
parser.add_argument('receptor_pdb', default=None,
help='Input PDB of the receptor with AMBER atom types')
parser.add_argument('complex_tarball', default=None,
help='Prefix for the complex prmtop and inpcrd files')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
except:
import sys
class args:
ligand_mol2 = sys.argv[1]
receptor_pdb = sys.argv[2]
import os
for FN in [args.ligand_mol2, args.receptor_pdb]:
if not os.path.isfile(FN):
raise Exception('Input file %s not found!'%FN)
args.ligand_mol2 = os.path.abspath(args.ligand_mol2)
args.receptor_pdb = os.path.abspath(args.receptor_pdb)
args.complex_tarball = os.path.abspath(args.complex_tarball)
import os, inspect
dirs = {'script':os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))}
execfile(os.path.join(dirs['script'],'_external_paths.py'))
command_paths = findPaths(['sander'])
dirs['amber'] = os.path.abspath(os.path.dirname(command_paths['sander'])[:-4])
dirs['temp'] = args.complex_tarball + '.tmp'
if not os.path.isdir(dirs['temp']):
os.system('mkdir -p '+dirs['temp'])
os.chdir(dirs['temp'])
ligand_prefix = '.'.join(os.path.dirname(args.ligand_mol2).split('/')[-1].split('.')[:-1]) \
+ '.' + os.path.basename(args.ligand_mol2)[:-5]
# The receptor file name ends with '.pdb2pqr_amber.pqr',
# which is 18 characters long
receptor_prefix = os.path.basename(args.receptor_pdb)[:-18]
complex_prefix = ligand_prefix + '-' + receptor_prefix
if not os.path.isfile(ligand_prefix+'.mol2'):
print '\n*** Writing mol2 file with amber atom types ***'
command = dirs['amber']+'/bin/antechamber' + \
' -i {0} -fi mol2 -o {1}.mol2 -fo mol2 -rn LIG'.format(\
args.ligand_mol2,ligand_prefix)
os.system(command)
if not os.path.isfile(ligand_prefix+'.mol2'):
print command
raise Exception('Could not write mol2 file')
if not os.path.isfile(ligand_prefix+'.frcmod'):
print '\n*** Generating frcmod file ***'
command = dirs['amber']+'/bin/parmchk' +\
' -i {0}.mol2 -f mol2 -o {0}.frcmod -a Y -w Y'.format(ligand_prefix)
os.system(command)
if not (os.path.isfile(os.path.join(complex_prefix+'.prmtop')) and \
os.path.isfile(os.path.join(complex_prefix+'.inpcrd')) and \
os.path.isfile(os.path.join(complex_prefix+'.pdb'))):
print '\n*** Generating prmtop and inpcrd and pdb files ***'
tleap_F = open(complex_prefix+'.tleap','w')
tleap_F.write("""
source leaprc.ff14SB
set default PBRadii mbondi2
# Receptor
receptor = loadpdb {0}
# Ligand
source leaprc.gaff2
loadamberparams {1}.frcmod
ligand = loadmol2 {1}.mol2
saveoff ligand {1}.lib
loadoff {1}.lib
# Complex
complex = combine {{receptor, ligand}}
saveamberparm complex {2}.prmtop {2}.inpcrd
savepdb complex {2}.pdb
quit
""".format(args.receptor_pdb, ligand_prefix, complex_prefix))
tleap_F.close()
command = dirs['amber']+'/bin/tleap -f {0}.tleap'.format(complex_prefix)
os.system(command)
if os.path.isfile(os.path.join(complex_prefix+'.pdb')):
print '\n*** Setting fixed atoms in pdb file ***'
command = 'python {0}/label_fixed_atoms.py {1}'
command = command.format(dirs['script'], os.path.join(complex_prefix+'.pdb'))
os.system(command)
# Compresses the complex files in a tarball
import tarfile
tarF = tarfile.open(args.complex_tarball,'w:gz')
tarF_contents = [complex_prefix+'.'+ext for ext in ['prmtop', 'inpcrd', 'pdb']]
for FN in tarF_contents:
tarF.add(FN)
tarF.close()
os.chdir('..')
if not args.debug:
os.system('rm -rf '+dirs['temp'])
| mit | 8,426,347,556,647,662,000 | 32.63964 | 92 | 0.686931 | false | 2.872308 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/metric_trigger.py | 1 | 4030 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricTrigger(Model):
"""The trigger that results in a scaling action.
All required parameters must be populated in order to send to Azure.
:param metric_name: Required. the name of the metric that defines what the
rule monitors.
:type metric_name: str
:param metric_resource_uri: Required. the resource identifier of the
resource the rule monitors.
:type metric_resource_uri: str
:param time_grain: Required. the granularity of metrics the rule monitors.
Must be one of the predefined values returned from metric definitions for
the metric. Must be between 12 hours and 1 minute.
:type time_grain: timedelta
:param statistic: Required. the metric statistic type. How the metrics
from multiple instances are combined. Possible values include: 'Average',
'Min', 'Max', 'Sum'
:type statistic: str or ~azure.mgmt.monitor.models.MetricStatisticType
:param time_window: Required. the range of time in which instance data is
collected. This value must be greater than the delay in metric collection,
which can vary from resource-to-resource. Must be between 12 hours and 5
minutes.
:type time_window: timedelta
:param time_aggregation: Required. time aggregation type. How the data
that is collected should be combined over time. The default value is
Average. Possible values include: 'Average', 'Minimum', 'Maximum',
'Total', 'Count'
:type time_aggregation: str or
~azure.mgmt.monitor.models.TimeAggregationType
:param operator: Required. the operator that is used to compare the metric
data and the threshold. Possible values include: 'Equals', 'NotEquals',
'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'
:type operator: str or ~azure.mgmt.monitor.models.ComparisonOperationType
:param threshold: Required. the threshold of the metric that triggers the
scale action.
:type threshold: float
"""
_validation = {
'metric_name': {'required': True},
'metric_resource_uri': {'required': True},
'time_grain': {'required': True},
'statistic': {'required': True},
'time_window': {'required': True},
'time_aggregation': {'required': True},
'operator': {'required': True},
'threshold': {'required': True},
}
_attribute_map = {
'metric_name': {'key': 'metricName', 'type': 'str'},
'metric_resource_uri': {'key': 'metricResourceUri', 'type': 'str'},
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'statistic': {'key': 'statistic', 'type': 'MetricStatisticType'},
'time_window': {'key': 'timeWindow', 'type': 'duration'},
'time_aggregation': {'key': 'timeAggregation', 'type': 'TimeAggregationType'},
'operator': {'key': 'operator', 'type': 'ComparisonOperationType'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(self, **kwargs):
super(MetricTrigger, self).__init__(**kwargs)
self.metric_name = kwargs.get('metric_name', None)
self.metric_resource_uri = kwargs.get('metric_resource_uri', None)
self.time_grain = kwargs.get('time_grain', None)
self.statistic = kwargs.get('statistic', None)
self.time_window = kwargs.get('time_window', None)
self.time_aggregation = kwargs.get('time_aggregation', None)
self.operator = kwargs.get('operator', None)
self.threshold = kwargs.get('threshold', None)
| mit | 7,079,760,800,057,811,000 | 46.411765 | 86 | 0.644913 | false | 4.269068 | false | false | false |
robe16/kiosk.grandparent-message-board | src/axiscare/url_process.py | 1 | 2732 | from multiprocessing import Process
from bs4 import BeautifulSoup
import time
from google.google_gmail import get_gmail_lists, get_gmail_message_mime, delete_gmail_message
from config.cfg import put_config_axiscare_url
from log.log import log_general, log_error
def eml_list():
return get_gmail_lists()
def get_ids(id_list):
ids = []
for l in id_list:
ids.append(l['id'])
return ids
def get_emails(ids):
emls = []
for id in ids:
e = get_gmail_message_mime(id)
emls.append({'id': id, 'email': e})
return emls
def extract_url(eml):
#
for p in eml.get_payload():
if not isinstance(p.get_payload(), str):
for p2 in p.get_payload():
for h in p2._headers:
if h[0]== 'Content-Type' and h[1].startswith('text/html'):
payload = p2.get_payload()
soup = BeautifulSoup(payload, "html.parser")
a_all = soup.findAll("a")
for a in a_all:
href = a.attrs['href'].replace('3D', '').replace('\"', '')
if href.startswith('https://1000.axiscare.com'):
#Assumption that html version appears before pdf version
return href
#
return False
def process_emls(emls):
#
for e in emls:
#
url = extract_url(e['email'])
#
if url:
put_config_axiscare_url(url)
#Delete email
delete_gmail_message(e['id'])
return True
return False
def url_updater():
#
updatestatus = False
#
while True:
#
try:
eml_lists = eml_list()
#
if len(eml_lists) > 0:
#
eml_ids = get_ids(eml_lists)
#
if len(eml_ids) > 0:
#
emls = get_emails(eml_ids)
updatestatus = process_emls(emls)
#
if updatestatus:
msg_success = 'the url stored in config.json has been updated'
else:
msg_success = 'no new urls received'
log_general('Axiscare URL updater process completed - {msg_success}'.format(msg_success=msg_success))
#
except Exception as e:
log_error('Could not process emails to check for new URL notification - {error}'.format(error=e))
#
time.sleep(300) #5mins
def start_url_updater():
process_urlupdater = Process(target=url_updater)
process_urlupdater.start()
log_general('Axiscare URL updater process started')
| gpl-3.0 | -8,363,137,163,987,299,000 | 27.458333 | 113 | 0.515739 | false | 4.023564 | false | false | false |
dariocorral/panoanda | panoanda/hourOffset.py | 1 | 1849 | """
Created on Sun Sep 17 07:26:03 2017
@author: dariocorral
"""
from datetime import datetime, date, timedelta
import pytz
class Hour(object):
"""
Auxiliary class for converting GMT - NY - local time hours
"""
#Local time hour property
@property
def current_local(self):
"""
Returns local current hour
:return:integer
"""
return datetime.now().hour
#New York current hour property
@property
def current_NY(self):
"""
Returns New York current hour
:return:integer
"""
return datetime.now(tz=pytz.timezone('US/Eastern')).hour
#GMT current hour property
@property
def current_GMT(self):
"""
Returns GMT current hour
:return:integer
"""
return datetime.now(tz=pytz.timezone('utc')).hour
#New York hour - GMT hour
@property
def offset_NY_GMT(self):
"""
Returns New York current hour GMT current hour difference
:return: integer
"""
return self.current_NY - self.current_GMT
#New York hour - GMT hour
@property
def offset_local_GMT(self):
"""
Returns Local current hour vs GMT current hour difference
:return: integer
"""
return self.current_local - self.current_GMT
def hour_offset_calculate(self, hour, delta):
"""
Operate with hours
"""
year = date.today().year
month = date.today().month
day = date.today().day
dt_hour = datetime(year, month, day, hour)
dt_hour_offset = dt_hour + timedelta(hours= delta)
return dt_hour_offset.hour
| mit | -2,291,861,256,432,731,100 | 20.511628 | 66 | 0.53272 | false | 4.610973 | false | false | false |
olcf/pcircle | legacy/pcircle/path.py | 1 | 2783 | import os
class CopyType:
""" A fake enum, define three type of copy job """
FILE2FILE = 0
FILE2DIR = 1
DIR2DIR = 2
def copytype2str(t):
if t == CopyType.FILE2FILE:
return "file to file"
elif t == CopyType.FILE2DIR:
return "file(s) to dir"
elif t == CopyType.DIR2DIR:
return "dir to dir"
else:
return "Unknown type"
def cleanup_path(paths, removedir=True):
""" remove unreable files and directories from the input path collection,
skipped include two type of elements: unwanted directories if removedir is True
or unaccessible files/directories
"""
checked = []
skipped = []
for ele in paths:
ele = os.path.abspath(ele)
if os.path.exists(ele) and os.access(ele, os.R_OK):
if os.path.isdir(ele) and removedir:
skipped.append(ele)
else:
checked.append(ele)
else:
skipped.append(ele)
return checked, skipped
def identify_copytype(isrc, idest):
""" verify and return target destination
case 1: source: multiple files
destination is an existing directory
copytype: FILES2DIR
case 2: source: a file
destination can either be:
a file doesn't exist but writable
a file exists
then FILE2FILE
case 3: source: a directory
destination: a directory doesn't exist, but writable
then DIR2DIR
case 3 used to be the only mode FCP supports.
"""
if not os.path.isabs(idest):
idest = os.path.abspath(idest)
single_src_file = True if len(isrc) == 1 and os.path.isfile(isrc[0]) else False
single_src_dir = True if len(isrc) == 1 and os.path.isdir(isrc[0]) else False
dest_exist_dir = False
dest_exist_file = False
dest_parent_writable = False
if os.path.exists(idest):
if not os.access(idest, os.W_OK):
raise ValueError("Can't access %s" % idest)
if os.path.isfile(idest):
dest_exist_file = True
else:
dest_exist_dir = True
else:
# idest doesn't exist, check its parent
idest_parent = os.path.dirname(idest)
if os.path.exists(idest_parent) and os.access(idest_parent, os.W_OK):
dest_parent_writable = True
if single_src_file and (dest_exist_file or dest_parent_writable):
copytype = CopyType.FILE2FILE
elif single_src_dir and (dest_exist_dir or dest_parent_writable):
copytype = CopyType.DIR2DIR
elif not (single_src_dir or single_src_file) and dest_exist_dir:
copytype = CopyType.FILE2DIR
if copytype is None:
raise ValueError("Can't decide the type of copy operations")
return copytype
| apache-2.0 | -2,806,440,448,711,826,000 | 29.922222 | 83 | 0.614445 | false | 3.750674 | false | false | false |
phase-dev/phase | libmproxy/protocol/__init__.py | 1 | 3448 | from ..proxy import ServerConnection, AddressPriority
KILL = 0 # const for killed requests
class ConnectionTypeChange(Exception):
"""
Gets raised if the connetion type has been changed (e.g. after HTTP/1.1 101 Switching Protocols).
It's up to the raising ProtocolHandler to specify the new conntype before raising the exception.
"""
pass
class ProtocolHandler(object):
def __init__(self, c):
self.c = c
"""@type: libmproxy.proxy.ConnectionHandler"""
def handle_messages(self):
"""
This method gets called if a client connection has been made. Depending on the proxy settings,
a server connection might already exist as well.
"""
raise NotImplementedError # pragma: nocover
def handle_error(self, error):
"""
This method gets called should there be an uncaught exception during the connection.
This might happen outside of handle_messages, e.g. if the initial SSL handshake fails in transparent mode.
"""
raise error # pragma: nocover
class TemporaryServerChangeMixin(object):
"""
This mixin allows safe modification of the target server,
without any need to expose the ConnectionHandler to the Flow.
"""
def change_server(self, address, ssl):
if address == self.c.server_conn.address():
return
priority = AddressPriority.MANUALLY_CHANGED
if self.c.server_conn.priority > priority:
self.log("Attempt to change server address, "
"but priority is too low (is: %s, got: %s)" % (self.server_conn.priority, priority))
return
self.log("Temporarily change server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
address.host,
address.port
))
if not hasattr(self, "_backup_server_conn"):
self._backup_server_conn = self.c.server_conn
self.c.server_conn = None
else: # This is at least the second temporary change. We can kill the current connection.
self.c.del_server_connection()
self.c.set_server_address(address, priority)
if ssl:
self.establish_ssl(server=True)
def restore_server(self):
if not hasattr(self, "_backup_server_conn"):
return
self.log("Restore original server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
self._backup_server_conn.host,
self._backup_server_conn.port
))
self.c.del_server_connection()
self.c.server_conn = self._backup_server_conn
del self._backup_server_conn
from . import http, tcp
protocols = {
'http': dict(handler=http.HTTPHandler, flow=http.HTTPFlow),
'tcp': dict(handler=tcp.TCPHandler)
} # PyCharm type hinting behaves bad if this is a dict constructor...
def _handler(conntype, connection_handler):
if conntype in protocols:
return protocols[conntype]["handler"](connection_handler)
raise NotImplementedError # pragma: nocover
def handle_messages(conntype, connection_handler):
return _handler(conntype, connection_handler).handle_messages()
def handle_error(conntype, connection_handler, error):
return _handler(conntype, connection_handler).handle_error(error)
| gpl-3.0 | -1,467,776,239,148,009,500 | 32.803922 | 114 | 0.645012 | false | 4.104762 | false | false | false |
Davideddu/garden.namedboxes | __init__.py | 1 | 3796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __init__.py
# Copyright (C) 2014 Davide Depau <[email protected]>
#
# NamedBoxes is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NamedBoxes is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""A simple box layout with a label on top of it.
Use it like you would use a BoxLayout, but don't change the orientation
property. Add another layout inside of it instead, and change its
orientation.
You can change the title of the box, though (title property), the background
color (background_color property) and the horizontal alignment of the label
(title_align property).
"""
from kivy.lang import Builder
from kivy.properties import Property, NumericProperty, BoundedNumericProperty,\
ObjectProperty, StringProperty, DictProperty,\
ListProperty, OptionProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
kv = """
<NamedBox>:
orientation: "vertical"
content: content
padding: ["10dp", 0, "10dp", "10dp"]
canvas.before:
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
BoxLayout:
size_hint_y: None
height: lab.height
Label:
id: lab
size_hint_y: None
height: dp(45)#, self.texture_size[1] + dp(10))
markup: root.markup
text: root.title
text_size: self.width, self.height
valign: 'middle'
halign: root.title_align
BoxLayout:
orientation: "vertical"
id: content
"""
Builder.load_string(kv)
class NamedBox(BoxLayout):
"""BoxLayout with a background color and with a label on top of it.
Use it like you would use a BoxLayout, but don't change the orientation
property. Add another layout inside of it instead, and change its
orientation."""
background_color = ListProperty([.1, .1, .1, .5])
"""The background color for the box, in RGBA.
background_color is a ListProperty, defaults to [.1, .1, .1, .5].
"""
title_align = OptionProperty("center", options=("left", "right", "center"))
"""The horizontal alignment of the text in the title of the box.
title_align is an OptionProperty, defaults to "center" and can be one
of "left", "right", "center".
"""
title = StringProperty("<No title set>")
"""The title of the named box.
title is a StringProperty, defaults to "<No title set>".
"""
markup = BooleanProperty(False)
"""Sets whether the markup should be enabled for the title.
markup is a BooleanProperty, defaults to False.
"""
content = ObjectProperty(None)
def add_widget(self, widget):
if self.content:
self.content.add_widget(widget)
else:
super(NamedBox, self).add_widget(widget)
if __name__ == "__main__":
from kivy.app import App
from kivy.uix.button import Button
class NamedBoxApp(App):
def build(self):
root = BoxLayout(padding="100dp", spacing="100dp")
box = NamedBox(title="Named box")
box.add_widget(Button(text="Button"))
root.add_widget(box)
return root
NamedBoxApp().run() | mit | 8,088,089,841,062,187,000 | 29.620968 | 79 | 0.644889 | false | 4.055556 | false | false | false |
BlackLight/evesp | evesp/event/__init__.py | 1 | 2976 | class Event(object):
"""
Base class for events
Fabio Manganiello, 2015 <[email protected]>
"""
def __init__(self, component=None, **kwargs):
"""
Constructor
kwargs -- key-value associations for the attributes of the object
"""
self.__kwargs = kwargs
self.component = component
vars(self).update(kwargs)
def get(self, attr):
" Get an event attribute by name. Return None if the attribute doesn't exist "
return self.__kwargs[attr] if attr in self.__kwargs else None
def serialize(self):
" Serialize the event using pickle "
import pickle
return pickle.dumps(self)
@classmethod
def deserialize(cls, event):
" Deserialize and return the event object using pickle "
import pickle
obj = pickle.loads(event)
assert isinstance(obj, cls)
return obj
def to_json(self):
" Serialize as JSON "
import json
attrs = self.__kwargs
return json.dumps(attrs)
@classmethod
def from_json(cls, attrs):
" Deserialize and initialize from JSON "
import json
attrs = dict(json.loads(attrs))
return Event(**attrs)
def __eq__(self, event):
"""
Return true if event equals self.
Two events are considered "equal" if:
- Their types are the same, or one is a direct subclass of the other;
- All of their constructor parameters are equal, unless a certain attribute is an instance of AttributeValueAny.
"""
if not self.__same_classes(self, event):
return False
for (attr, value) in self.__kwargs.items():
if not self.__same_values(value, event.__kwargs[attr]):
return False
return True
@classmethod
def __same_classes(cls, obj1, obj2):
return True \
if (type(obj1) == Event or type(obj2) == Event) \
else type(obj1) == type(obj2)
@classmethod
def __same_values(cls, value1, value2):
if not cls.__same_classes(value1, value2) \
and not isinstance(value1, AttributeValueAny) \
and not isinstance(value2, AttributeValueAny):
return False
return value1 == value2
class StopEvent(Event):
"""
A special event used to asynchronously stop components, workers and sockets
Fabio Manganiello, 2015 <[email protected]>
"""
class AttributeValueAny(object):
"""
When an event attribute type is AttributeValueAny,
that attribute won't be taken into account when
two events are compared through == operator or
explicit __eq__ method invocation.
Fabio Manganiello, 2015 <[email protected]>
"""
def __eq__(self, value):
""" Always return True. Any value equals "any" """
return True
def __repr__(self):
return "__ANY__"
# vim:sw=4:ts=4:et:
| apache-2.0 | 6,722,185,896,443,257,000 | 27.615385 | 120 | 0.600806 | false | 4.300578 | false | false | false |
Shrews/PyGerrit | webapp/django/core/handlers/wsgi.py | 1 | 8460 | from threading import Lock
from pprint import pformat
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
def safe_copyfileobj(fsrc, fdst, length=16*1024, size=0):
"""
A version of shutil.copyfileobj that will not read more than 'size' bytes.
This makes it safe from clients sending more than CONTENT_LENGTH bytes of
data in the body.
"""
if not size:
return
while size > 0:
buf = fsrc.read(min(length, size))
if not buf:
break
fdst.write(buf)
size -= len(buf)
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_unicode(environ.get('PATH_INFO', u'/'))
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = u'/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<WSGIRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def get_full_path(self):
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + self.environ.get('QUERY_STRING', '')) or '')
def is_secure(self):
return 'wsgi.url_scheme' in self.environ \
and self.environ['wsgi.url_scheme'] == 'https'
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method == 'POST':
if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self.environ['wsgi.input'])
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
else:
self._post, self._files = http.QueryDict('', encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
buf = StringIO()
try:
# CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
content_length = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length > 0:
safe_copyfileobj(self.environ['wsgi.input'], buf,
size=content_length)
self._raw_post_data = buf.getvalue()
buf.close()
return self._raw_post_data
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.initLock.acquire()
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
self.initLock.release()
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(environ)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append(('Set-Cookie', str(c.output(header=''))))
start_response(status, response_headers)
return response
| apache-2.0 | -8,111,166,442,822,824,000 | 34.546218 | 134 | 0.590662 | false | 3.925754 | false | false | false |
petewarden/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py | 1 | 68032 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import convolutional
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_simple_tokens_int_mode",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_simple_tokens_int_mode_hard_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 6,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_special_tokens_int_mode",
# Mask tokens in the vocab data should be ingored, and mapped to 0 in
# from the input data.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
[""], [""], [""], ["[UNK]"], ["[UNK]"], ["[UNK]"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], [""], ["wind"], ["[UNK]"], ["and"], [""],
["fire"], ["and"], ["[UNK]"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],
},
{
"testcase_name":
"test_documents_int_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_documents_1d_input_int_mode",
"vocab_data":
np.array([
"fire earth earth", "earth earth", "wind wind", "and wind and"
]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_binary_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.BINARY
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_binary_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.BINARY
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_count_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_count_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 2],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_tokens_idf_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.TFIDF
},
"expected_output": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_idf_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.TFIDF
},
"expected_output": [[0., 0.847298, 0.847298, 0., 0.],
[0., 0., 0., 1.098612, 0.],
[0., 0., 0., 0., 2.197225],
[0.972955, 0.847298, 0., 0., 0.]],
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output):
cls = text_vectorization.TextVectorization
if kwargs.get("output_mode") == text_vectorization.INT:
expected_output_dtype = dtypes.int64
else:
expected_output_dtype = dtypes.float32
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# TextVectorization), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=dtypes.string,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
def test_list_inputs_1d(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_tensor_inputs(self):
vocab_data = constant_op.constant(
["two two two", "two three three", "three four four five"])
input_data = constant_op.constant(["two three", "four five"])
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_list_inputs_2d(self):
vocab_data = [
["two two two"], ["two three three"], ["three four four five"]]
input_data = [["two three"], ["four five"]]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_dataset_of_single_strings(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = dataset_ops.Dataset.from_tensor_slices(vocab_data) # unbatched
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_ds)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0", "a", "b", "c", "d", "e", "a", "b", "c", "d", "f"],
"expected": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]
},
{
"testcase_name": "2d",
"data": [["0", "a", "b", "c", "d"], ["e", "a", "b", "c", "d"], ["f"]],
"expected": [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 0, 0, 0, 0]]
},
{
"testcase_name":
"3d",
"data": [[["0", "a", "b"], ["c", "d"]], [["e", "a"], ["b", "c", "d"]],
[["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None, standardize=None, split=None, pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(ragged_factory_ops.constant(data))
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0 a b c d e a b c d f"],
"expected": [[1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]]
},
{
"testcase_name":
"3d",
"data": [[["0 a b"], ["c d"]], [["e a"], ["b c d"]], [["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling_with_split(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(ragged_factory_ops.constant(data, inner_shape=(1,)))
self.assertAllEqual(expected, output)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationPreprocessingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_summary_before_adapt(self):
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=text_vectorization.TFIDF)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# We are testing that model.summary() can be called without erroring out.
# (b/145726907)
model.summary()
def test_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array([[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_normalization_ragged_inputs(self):
input_array = ragged_factory_ops.constant([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}"]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth"]]
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array(
[[b"earth", b"wind", b"and", b"fire"],
[b"fire|", b"an<>d", b"{earth}", b"michigan@%$"]])
custom_standardization = gen_string_ops.string_lower
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardization,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting(self):
input_array = np.array([["earth wind and fire"],
["\tfire\tand\nearth michigan "]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_string_splitting(self):
input_array = np.array([["earth>wind>and fire"],
["\tfire>and\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
custom_split = lambda x: ragged_string_ops.string_split_v2(x, sep=">")
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=custom_split,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value_ragged_inputs(self):
input_array = ragged_factory_ops.constant([["earth", "wind", "and", "fire"],
["fire", "and", "earth"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth",
b"fire and", b"and earth",
b"fire and earth"]]
# pyformat: enable
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth", b"michigan",
b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multiple_ngram_values(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=(2, 3),
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_multiple_preprocessing_steps(self):
input_array = np.array([["earth wInD and firE"],
["\tfire\tand\nearth!! michig@n "]])
expected_output = [[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
],
[
b"fire",
b"and",
b"earth",
b"michign",
b"fire and",
b"and earth",
b"earth michign",
]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=2,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting_with_non_1d_array_fails(self):
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_string_splitting_with_non_1d_raggedarray_fails(self):
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
vocabulary=["a"],
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_standardization_with_invalid_standardize_arg(self):
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._standardize = "unsupported"
with self.assertRaisesRegex(ValueError,
".*is not a supported standardization.*"):
_ = layer(input_data)
def test_splitting_with_invalid_split_arg(self):
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._split = "unsupported"
with self.assertRaisesRegex(ValueError, ".*is not a supported splitting.*"):
_ = layer(input_data)
def test_vocab_setting_via_init(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_init_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_with_oov_via_setter(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
strategy = one_device_strategy.OneDeviceStrategy("/cpu:0")
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationOutputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x5 tensor with a padding value in the
# second example.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5], [5, 4, 2, 1, 0]]
# This test doesn't explicitly set an output shape, so the 2nd dimension
# should stay 'None'.
expected_output_shape = [None, None]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x6 tensor with a padding value in the
# second example, since output_sequence_length is set to 6.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5, 0], [5, 4, 2, 1, 0, 0]]
output_sequence_length = 6
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_strips(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_dynamically_strips_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
# Create an input array that has 1 element in the first example and 2 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array_2 = np.array([["wind"], ["fire and"]])
expected_output_2 = [[3, 0, 0], [5, 4, 0]]
output_dataset = model.predict(input_array_2)
self.assertAllEqual(expected_output_2, output_dataset)
def test_binary_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_binary_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_set_vocabulary_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.set_vocabulary(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_adapt_after_build(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_set_state_variables_after_build(self):
state_variables = {
text_vectorization._VOCAB_NAME: ["earth", "wind", "and", "fire"]
}
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer._set_state_variables(state_variables)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_multiple_adapts(self):
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
adapt_data = ["earth", "earth", "earth", "earth", "wind", "wind", "wind"]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_bag_output_soft_maximum_set_state_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.build(input_data.shape)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_soft_maximum_set_vocabulary_after_call_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.adapt(vocab_data)
_ = layer(input_data)
with self.assertRaisesRegex(RuntimeError, "vocabulary cannot be changed"):
layer.set_vocabulary(vocab_data)
def test_bag_output_soft_maximum_set_state_variables_after_call_fails(self):
state_variables = {
text_vectorization._VOCAB_NAME: ["earth", "wind", "and", "fire"]
}
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.adapt(["earth", "wind"])
_ = layer(input_data)
with self.assertRaisesRegex(RuntimeError, "vocabulary cannot be changed"):
layer._set_state_variables(state_variables)
def test_count_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0],
[2, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_tfidf_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0, 0],
[ 1, .4, 0, 0, .6, 0]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_set_oov_weight(self):
vocab_data = ["[UNK]", "earth", "wind", "and", "fire"]
idf_weights = [.1, .4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ .2, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_accept_1D_input(self):
input_array = np.array(["earth wind and fire",
"fire and earth michigan"])
layer = text_vectorization.TextVectorization(
standardize=None, split=None, output_mode="int")
layer.adapt(input_array)
_ = layer(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "count_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "binary_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.BINARY
}, {
"testcase_name": "binary_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.BINARY
}, {
"testcase_name": "tfidf_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.TFIDF
}, {
"testcase_name": "tfidf_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.TFIDF
})
def test_end_to_end_bagged_modeling(self, output_mode, pad_to_max_tokens):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [.5, .25, .2, .125]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens)
if output_mode == text_vectorization.TFIDF:
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
else:
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
def test_end_to_end_vocab_modeling(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
output_sequence_length = 6
max_tokens = 5
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
embedded_data = embeddings.Embedding(
input_dim=max_tokens + 1, output_dim=32)(
int_data)
output_data = convolutional.Conv1D(
250, 3, padding="valid", activation="relu", strides=1)(
embedded_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=4,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_setting_vocab_without_idf_weights_fails_in_tfidf_mode(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TFIDF"):
layer.set_vocabulary(vocab_data)
def test_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_set_tfidf_in_non_tfidf_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3, 4]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY)
with self.assertRaisesRegex(ValueError,
"`idf_weights` should only be set if"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, "max_tokens.*"):
_ = text_vectorization.TextVectorization(max_tokens=0)
def test_non_string_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "dtype of string.*"):
_ = text_vectorization.TextVectorization(dtype=dtypes.int64)
def test_unknown_standardize_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"standardize arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(standardize="unsupported_value")
def test_unknown_split_arg_fails(self):
with self.assertRaisesRegex(ValueError, "split arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(split="unsupported_value")
def test_unknown_output_mode_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"output_mode arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(output_mode="unsupported_value")
def test_unknown_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*unsupported_value"):
_ = text_vectorization.TextVectorization(ngrams="unsupported_value")
def test_float_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*2.9"):
_ = text_vectorization.TextVectorization(ngrams=2.9)
def test_float_tuple_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*(1.3, 2.9)"):
_ = text_vectorization.TextVectorization(ngrams=(1.3, 2.9))
def test_non_int_output_sequence_length_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "output_sequence_length.*2.0"):
_ = text_vectorization.TextVectorization(
output_mode="int", output_sequence_length=2.0)
def test_non_none_output_sequence_length_fails_if_output_type_not_int(self):
with self.assertRaisesRegex(ValueError,
"`output_sequence_length` must not be set"):
_ = text_vectorization.TextVectorization(
output_mode="count", output_sequence_length=2)
# Custom functions for the custom callable serialization test. Declared here
# to avoid multiple registrations from run_all_keras_modes().
@generic_utils.register_keras_serializable(package="Test")
def custom_standardize_fn(x):
return gen_string_ops.string_lower(x)
@generic_utils.register_keras_serializable(package="Test")
def custom_split_fn(x):
return ragged_string_ops.string_split_v2(x, sep=">")
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationSavingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def tearDown(self):
keras.backend.clear_session()
gc.collect()
super(TextVectorizationSavingTest, self).tearDown()
def test_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_when_nested(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
outer_input = keras.Input(shape=(None,), dtype=dtypes.string)
outer_output = model(outer_input)
outer_model = keras.Model(inputs=outer_input, outputs=outer_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
outer_model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_with_tfidf(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
vocab_data = ["earth", "wind", "and", "fire"]
# pylint: enable=bad-whitespace
# pyformat: enable
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllClose(new_output_dataset, expected_output)
def test_serialization_with_custom_callables(self):
input_array = np.array([["earth>wind>and Fire"],
["\tfire>And\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardize_fn,
split=custom_split_fn,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
serialized_model_data = model.get_config()
new_model = keras.Model.from_config(serialized_model_data)
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(expected_output, new_output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationE2ETest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_keras_vocab_trimming_example(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[1, 2, 1],
[3, 1, 0]]
# pyformat: enable
max_tokens = 3
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(input_data, int_data)
output = model.predict(input_array)
self.assertAllEqual(expected_output, output)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,295,234,925,009,800,400 | 39.08957 | 81 | 0.594367 | false | 3.510604 | true | false | false |
Sapphirine/Predicting-The-United-States-Presidential-Election-Results-Using-TwitterSentiment | src/calculate_state_prob.py | 1 | 1641 | import pandas as pd
import csv
#Open file to save Trump tweets sentiment used to estimate probability
csvfile=open("/home/ubuntu/project/output_data/trump_pos_sentiment.csv", "w")
csvwriter=csv.writer(csvfile, delimiter=",")
#Assign header row
csvwriter.writerow(["Index"]+["State"]+["Sentiment"])
#Initialize counter for tweets
index=0
#Open Trump results and load in file
with open("/home/ubuntu/project/output_data/trump_tweets_results.csv","r") as infile:
for line in infile:
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[str(line).split(",")[1].strip()])
index+=1
#Open Clinton results, flip sentiment and load in file
with open("/home/ubuntu/project/output_data/clinton_tweets_results.csv","r") as infile:
for line in infile:
if str(line).split(",")[1].rstrip()=="1.0":
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[0.0])
index+=1
else:
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[1.0])
index+=1
#Close csv file
csvfile.close()
#Load data into data frame
data=pd.DataFrame.from_csv("/home/ubuntu/project/output_data/trump_pos_sentiment.csv")
#print data
#Group sentiment by state
grouped_data=data.groupby("State")["Sentiment"].mean()
#aggregations = {
# "Sentiment":'mean'
#}
#grouped_data=data.groupby("State").agg(aggregations)
#grouped_data=data.groupby(["State", "Sentiment"]).mean()
print grouped_data
#Load into data frame
prob = pd.DataFrame(grouped_data)
#load into csv file
prob.to_csv("/home/ubuntu/project/output_data/trump_win_prob.csv", sep=",", encoding="utf-8")
| apache-2.0 | -6,014,421,773,318,569,000 | 33.1875 | 103 | 0.678245 | false | 3.125714 | false | false | false |
jaantollander/CrowdDynamics | crowddynamics/core/motion/tests/test_motion.py | 1 | 2448 | import numpy as np
from hypothesis import given
from crowddynamics.core.motion.adjusting import force_adjust, torque_adjust
from crowddynamics.core.motion.contact import force_contact
from crowddynamics.core.motion.fluctuation import force_fluctuation, \
torque_fluctuation
from crowddynamics.core.motion.helbing import \
force_social_helbing
from crowddynamics.testing import reals
SIZE = 10
@given(mass=reals(min_value=0, shape=SIZE),
scale=reals(min_value=0, shape=SIZE))
def test_force_fluctuation(mass, scale):
ans = force_fluctuation(mass, scale)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (SIZE, 2)
@given(mass=reals(min_value=0),
tau_adj=reals(min_value=0, exclude_zero='near'),
v0=reals(min_value=0),
e0=reals(shape=2),
v=reals(shape=2))
def test_force_adjust(mass, tau_adj, v0, e0, v):
ans = force_adjust(mass, tau_adj, v0, e0, v)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(h=reals(),
n=reals(shape=2),
a=reals(min_value=0),
b=reals(min_value=0, exclude_zero='near'))
def test_force_social_helbing(h, n, a, b):
ans = force_social_helbing(h, n, a, b)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(h=reals(),
n=reals(shape=2),
v=reals(shape=2),
t=reals(shape=2),
mu=reals(min_value=0),
kappa=reals(min_value=0),
damping=reals(min_value=0))
def test_force_contact(h, n, v, t, mu, kappa, damping):
ans = force_contact(h, n, v, t, mu, kappa, damping)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(inertia_rot=reals(0, shape=SIZE), scale=reals(0, shape=SIZE))
def test_torque_fluctuation(inertia_rot, scale):
ans = torque_fluctuation(inertia_rot, scale)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (SIZE,)
@given(inertia_rot=reals(0),
tau_rot=reals(0, exclude_zero='near'),
phi_0=reals(),
phi=reals(),
omega_0=reals(),
omega=reals())
def test_torque_adjust(inertia_rot, tau_rot, phi_0, phi, omega_0, omega):
ans = torque_adjust(inertia_rot, tau_rot, phi_0, phi, omega_0,
omega)
assert isinstance(ans, float)
| gpl-3.0 | 1,157,326,579,588,317,200 | 30.384615 | 75 | 0.658497 | false | 2.843206 | true | false | false |
stoq/kiwi | examples/validation/datatypes.py | 1 | 1096 | import datetime
from gi.repository import Gtk
from kiwi.currency import currency
from kiwi.ui.widgets.entry import ProxyEntry
from kiwi.ui.widgets.label import ProxyLabel
window = Gtk.Window()
window.connect('delete-event', Gtk.main_quit)
window.set_border_width(6)
vbox = Gtk.VBox()
window.add(vbox)
data_types = [
(True, bool),
(42, int),
(22.0 / 7.0, float),
(3000, int),
('THX', str),
(datetime.datetime.now(), datetime.datetime),
(datetime.date.today(), datetime.date),
(datetime.time(11, 38, 00), datetime.time),
(currency('50.1'), currency),
]
for data, data_type in data_types:
hbox = Gtk.HBox(True)
vbox.pack_start(hbox, False, False, 6)
label = ProxyLabel(data_type.__name__.capitalize())
label.set_bold(True)
hbox.pack_start(label, True, True, 0)
label = ProxyLabel(data_type=data_type)
label.update(data)
hbox.pack_start(label, False, False, 6)
entry = ProxyEntry(data_type=data_type)
entry.update(data)
entry.validate()
hbox.pack_start(entry, False, False, 6)
window.show_all()
Gtk.main()
| lgpl-2.1 | -7,734,861,046,727,665,000 | 22.826087 | 55 | 0.666058 | false | 3.036011 | false | false | false |
h2020-endeavour/iSDX | xctrl/flowmodmsg.py | 1 | 2846 | #!/usr/bin/env python
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
class FlowModMsgBuilder(object):
def __init__(self, participant, key):
self.participant = participant
self.key = key
self.flow_mods = []
def add_flow_mod(self, mod_type, rule_type, priority, match, action, datapath = None, cookie = None):
if cookie is None:
cookie = (len(self.flow_mods)+1, 65535)
fm = {
"cookie": cookie,
"datapath": datapath,
"mod_type": mod_type,
"rule_type": rule_type,
"priority": priority,
"match": match,
"action": action
}
self.flow_mods.append(fm)
return cookie
def delete_flow_mod(self, mod_type, rule_type, cookie, cookie_mask):
fm = {
"cookie": (cookie, cookie_mask),
"mod_type": mod_type,
"rule_type": rule_type,
}
self.flow_mods.append(fm)
def get_msg(self):
msg = {
"auth_info": {
"participant" : self.participant,
"key" : self.key
},
"flow_mods": self.flow_mods
}
return msg
def reset_flow_mod(self):
self.flow_mods = []
# request body format:
# {"auth_info": {
# "participant": 1,
# "key": "xyz"
# }
# "flow_mods": [
# { "cookie": (1, 2**16-1),
# "mod_type": "insert/remove",
# "rule_type": "inbound/outbound/main",
# "priority": 1,
# "match" : {
# "eth_type" : 0x0806,
# "arp_tpa" : ("172.1.0.0", "255.255.255.0"),
# "in_port" : 5,
# "eth_dst" : "ff:ff:ff:ff:ff:ff",
# "eth_src" : "80:23:ff:98:10:01",
# "ipv4_src" : "192.168.1.1",
# "ipv4_dst" : "192.168.1.2",
# "tcp_src" : 80,
# "tcp_dst" : 179,
# "udp_src" : 23,
# "udp_dst" : 22,
# },
# "action" : {
# "fwd": ["inbound"/"outbound"/"main-in"/main-out"],
# "set_eth_src": "80:23:ff:98:10:01",
# "set_eth_dst": ("00:00:00:00:00:01","00:00:00:00:03:ff")
# }
# },
# { "cookie": (2, 2**16-1),
# "mod_type": "insert/remove",
# "rule_type": "inbound/outbound/main",
# "match" : {"tcp_dst" : 80},
# "action" : {"fwd": [3]}
# }
# ...]
# }
| apache-2.0 | 162,722,739,256,632,800 | 30.977528 | 105 | 0.382642 | false | 3.487745 | false | false | false |
ben-jones/centinel | centinel/vpn/openvpn.py | 1 | 2675 | #!/usr/bin/python
# openvpn.py: library to handle starting and stopping openvpn instances
import subprocess
import threading
import time
class OpenVPN():
def __init__(self, config_file=None, auth_file=None, timeout=10):
self.started = False
self.stopped = False
self.error = False
self.notifications = ""
self.auth_file = auth_file
self.config_file = config_file
self.thread = threading.Thread(target=self._invoke_openvpn)
self.thread.setDaemon(1)
self.timeout = timeout
def _invoke_openvpn(self):
if self.auth_file is None:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file]
else:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file,
'--auth-user-pass', self.auth_file]
self.process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.kill_switch = self.process.terminate
self.starting = True
while True:
line = self.process.stdout.readline().strip()
if not line:
break
self.output_callback(line, self.process.terminate)
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
def start(self, timeout=None):
"""Start openvpn and block until the connection is opened or there is
an error
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
print "openvpn started"
else:
print "openvpn not started"
print self.notifications
def stop(self, timeout=None):
"""Stop openvpn"""
if not timeout:
timeout = self.timeout
self.kill_switch()
self.thread.join(timeout)
if self.stopped:
print "stopped"
else:
print "not stopped"
print self.notifications
| mit | -5,255,765,130,357,951,000 | 31.621951 | 77 | 0.541308 | false | 4.503367 | true | false | false |
DTOcean/dtocean-core | dtocean_core/utils/moorings.py | 1 | 21601 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
def get_component_dict(component_type,
data_table,
rope_data=None,
sand_data=None,
soft_data=None,
check_keys=None):
valid_components = ["cable",
"chain",
"drag anchor",
"forerunner assembly",
"pile",
"rope",
"shackle",
"swivel"]
if component_type not in valid_components:
valid_str = ", ".join(valid_components)
errStr = ("Argument system_type must be one of '{}' not "
"'{}'").format(valid_str, component_type)
raise ValueError(errStr)
if component_type in ["drag anchor", "pile"]:
system_type = "foundation system"
else:
system_type = "mooring system"
compdict = {}
if check_keys is None: check_keys = []
key_ids = data_table["Key Identifier"]
for key_id in key_ids:
# Check for duplicates
if key_id in check_keys:
errStr = "Key identifier {} has been duplicated".format(key_id)
raise KeyError(errStr)
# Start building the value dictionary
data_dict = {"item1": system_type,
"item2": component_type}
record = data_table.loc[data_table['Key Identifier'] == key_id]
# Build shared items
data_dict["item3"] = record.iloc[0]["Name"]
# Build component specific items
if component_type in ["chain", "forerunner assembly"]:
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Diameter"],
record.iloc[0]["Connecting Length"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type in ["shackle", "swivel"]:
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Nominal Diameter"],
record.iloc[0]["Connecting Length"]]
data_dict["item7"] = [record.iloc[0]["Dry Unit Mass"],
record.iloc[0]["Wet Unit Mass"]]
data_dict["item11"] = record.iloc[0]["Cost"]
elif component_type == "pile":
data_dict["item5"] = [record.iloc[0]["Yield Stress"],
record.iloc[0]["Youngs Modulus"]]
data_dict["item6"] = [record.iloc[0]["Diameter"],
record.iloc[0]["Wall Thickness"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type == "drag anchor":
if sand_data is None or soft_data is None:
errStr = ("Arguments 'sand_data' and 'soft_data' must be "
"supplied if component_type is 'drag anchor'")
raise ValueError(errStr)
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Width"],
record.iloc[0]["Depth"],
record.iloc[0]["Height"],
record.iloc[0]["Connecting Size"]]
data_dict["item7"] = [record.iloc[0]["Dry Unit Mass"],
record.iloc[0]["Wet Unit Mass"]]
# Add anchor coefficients
sand_coeffs = sand_data.loc[sand_data['Key Identifier'] == key_id]
soft_coeffs = sand_data.loc[soft_data['Key Identifier'] == key_id]
sand_df = sand_coeffs[['Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']]
soft_df = soft_coeffs[['Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']]
data_dict["item9"] = {'sand': sand_df.values.tolist()[0],
'soft': soft_df.values.tolist()[0]}
data_dict["item11"] = record.iloc[0]["Cost"]
elif component_type == "rope":
# Build rope axial stiffness list
if rope_data is None:
errStr = ("Argument 'rope_data' must be supplied if "
"component_type is 'rope'")
raise ValueError(errStr)
rope_array = rope_data[key_id]
data_dict["item4"] = [record.iloc[0]["Material"]]
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
rope_array.tolist()]
data_dict["item6"] = [record.iloc[0]["Diameter"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type == "cable":
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Min Bend Radius"]]
data_dict["item6"] = [record.iloc[0]["Diameter"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
else:
errStr = "RUN FOR THE HILLS!!!!1!!"
raise RuntimeError(errStr)
compdict[key_id] = data_dict
check_keys.append(key_id)
return compdict
def get_moorings_tables(compdict):
cable_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Min Bend Radius',
'Diameter',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
chain_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Diameter',
'Connecting Length',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
forerunner_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Diameter',
'Connecting Length',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
shackle_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Nominal Diameter',
'Connecting Length',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
swivel_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Nominal Diameter',
'Connecting Length',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
pile_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Yield Stress',
'Youngs Modulus',
'Diameter',
'Wall Thickness',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
anchor_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Connecting Size',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
anchor_sand_df = pd.DataFrame(columns=[
'Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2'])
anchor_soft_df = pd.DataFrame(columns=[
'Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2'])
rope_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Material',
'Min Break Load',
'Diameter',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
rope_dict = {}
for key_id, data_dict in compdict.iteritems():
values = []
columns = []
# Get component type
component_type = data_dict["item2"]
# Build shared items
columns.append("Key Identifier")
values.append(key_id)
columns.append("Name")
values.append(data_dict["item3"])
# Build component specific items
if component_type in ["chain", "forerunner assembly"]:
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Connecting Length")
values.append(data_dict["item6"][1])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
if component_type == "chain":
chain_df = chain_df.append(record, ignore_index=True)
else:
forerunner_df = forerunner_df.append(record, ignore_index=True)
elif component_type in ["shackle", "swivel"]:
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Width")
values.append(data_dict["item6"][0])
columns.append("Depth")
values.append(data_dict["item6"][0])
columns.append("Height")
values.append(data_dict["item6"][0])
columns.append("Nominal Diameter")
values.append(data_dict["item6"][0])
columns.append("Connecting Length")
values.append(data_dict["item6"][1])
columns.append("Dry Unit Mass")
values.append(data_dict["item7"][0])
columns.append("Wet Unit Mass")
values.append(data_dict["item7"][1])
columns.append("Cost")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
if component_type == "shackle":
shackle_df = shackle_df.append(record, ignore_index=True)
else:
swivel_df = swivel_df.append(record, ignore_index=True)
elif component_type == "pile":
columns.append("Yield Stress")
values.append(data_dict["item5"][0])
columns.append("Youngs Modulus")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Wall Thickness")
values.append(data_dict["item6"][1])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
pile_df = pile_df.append(record, ignore_index=True)
elif component_type == "drag anchor":
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Width")
values.append(data_dict["item6"][0])
columns.append("Depth")
values.append(data_dict["item6"][1])
columns.append("Height")
values.append(data_dict["item6"][2])
columns.append("Connecting Size")
values.append(data_dict["item6"][3])
columns.append("Dry Unit Mass")
values.append(data_dict["item7"][0])
columns.append("Wet Unit Mass")
values.append(data_dict["item7"][1])
columns.append("Cost")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
anchor_df = anchor_df.append(record, ignore_index=True)
# Anchor coefficients
coef_cols = ['Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']
sand_list = [key_id]
soft_list = [key_id]
sand_list.extend(data_dict["item9"]["sand"])
soft_list.extend(data_dict["item9"]["soft"])
# Fix error in data
if len(sand_list) == 4: sand_list.append(0.)
if len(soft_list) == 4: soft_list.append(0.)
sand_record = pd.Series(sand_list, index=coef_cols)
soft_record = pd.Series(soft_list, index=coef_cols)
anchor_sand_df = anchor_sand_df.append(sand_record,
ignore_index=True)
anchor_soft_df = anchor_sand_df.append(soft_record,
ignore_index=True)
elif component_type == "rope":
columns.append("Material")
values.append(data_dict["item4"][0])
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
rope_df = rope_df.append(record, ignore_index=True)
# Collect the rope axial stress data
rope_dict[key_id] = data_dict["item5"][1]
elif component_type == "cable":
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Min Bend Radius")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
cable_df = cable_df.append(record, ignore_index=True)
else:
errStr = ("The blue meanies are coming! Or, there was an unknown "
"component type: {}").format(component_type)
raise RuntimeError(errStr)
tables = {"cable": cable_df,
"chain": chain_df,
"forerunner assembly": forerunner_df,
"shackle": shackle_df,
"swivel": swivel_df,
"pile": pile_df,
"drag anchor": anchor_df,
"drag anchor sand": anchor_sand_df,
"drag anchor soft": anchor_soft_df,
"rope": rope_df,
"rope axial stiffness": rope_dict}
return tables
| gpl-3.0 | -1,758,551,616,762,855,000 | 39.679849 | 81 | 0.432711 | false | 4.878275 | false | false | false |
Letractively/portable-movie-organizer | movie-organizer/MovieDataEditor.py | 1 | 9673 | #
# portable-movie-organizer
#
# Copyright (c) 2010 Ali Aafee
#
# This file is part of portable-movie-organizer.
#
# portable-movie-organizer is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# portable-movie-organizer is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with portable-movie-organizer.
# If not, see <http://www.gnu.org/licenses/>.
import wx
import FieldDataList
import ImdbAPI
import os.path
import thread
dirName = os.path.dirname(os.path.abspath(__file__))
dirName, fileName = os.path.split(dirName)
resDir = os.path.join(dirName, 'res')
class MovieDataEditor(wx.Dialog):
def __init__(self, parent, postersPath, catchPath, title='Edit Movie Metadata'):
self.title = title
self.postersPath = postersPath
self.catchPath = catchPath
self._init_ctrls(parent)
def _init_ctrls(self, parent):
wx.Dialog.__init__(self, name='MovieEditor', parent=parent,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
title=self.title, size=wx.Size(640,480))
self.fieldWindow = wx.ScrolledWindow(self, size=wx.Size(200,200), style=wx.HSCROLL)
self.fieldWindow.SetScrollbars(0,10,0,65)
gridSizer = wx.FlexGridSizer(7,4,10,10)
gridSizer.AddGrowableCol(1,1)
gridSizer.AddGrowableCol(3,1)
labelWidth = -1
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
self.lblTitle = wx.StaticText(self.fieldWindow, label='Title', size=wx.Size(labelWidth,-1))
self.txtTitle = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblTitle, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtTitle, 1, wx.EXPAND)
self.lblSort = wx.StaticText(self.fieldWindow, label='Sort', size=wx.Size(labelWidth,-1))
self.txtSort = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblSort, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtSort, 1, wx.EXPAND)
self.lblImage = wx.StaticText(self.fieldWindow, label='Poster', size=wx.Size(labelWidth,-1))
self.txtImage = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblImage, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtImage, 1, wx.EXPAND)
self.lblReleased = wx.StaticText(self.fieldWindow, label='Released', size=wx.Size(labelWidth,-1))
self.txtReleased = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblReleased, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtReleased, 1, wx.EXPAND)
self.lblRuntime = wx.StaticText(self.fieldWindow, label='Runtime', size=wx.Size(labelWidth,-1))
self.txtRuntime = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblRuntime, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtRuntime, 1, wx.EXPAND)
self.lblRated = wx.StaticText(self.fieldWindow, label='Rated', size=wx.Size(labelWidth,-1))
self.txtRated = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblRated, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtRated, 1, wx.EXPAND)
self.lblSummary = wx.StaticText(self.fieldWindow, label='Summary', size=wx.Size(labelWidth,-1))
self.txtSummary = wx.TextCtrl(self.fieldWindow, style=wx.TE_MULTILINE, size=wx.Size(-1,80))
gridSizer.Add(self.lblSummary, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtSummary, 1, wx.EXPAND)
self.lblGenres = wx.StaticText(self.fieldWindow, label='Genres', size=wx.Size(labelWidth,-1))
self.lstGenres = FieldDataList.FieldDataList(self.fieldWindow, size=wx.Size(-1,100))
gridSizer.Add(self.lblGenres, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstGenres, 1, wx.EXPAND)
self.lblActors = wx.StaticText(self.fieldWindow, label='Actors', size=wx.Size(labelWidth,-1))
self.lstActors = FieldDataList.FieldDataList(self.fieldWindow, size=wx.Size(-1,100))
gridSizer.Add(self.lblActors, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstActors, 1, wx.EXPAND)
self.lblDirectors = wx.StaticText(self.fieldWindow, label='Directors', size=wx.Size(labelWidth,-1))
self.lstDirectors = FieldDataList.FieldDataList(self.fieldWindow)
gridSizer.Add(self.lblDirectors, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstDirectors, 1, wx.EXPAND)
self.lblFiles = wx.StaticText(self.fieldWindow, label='Files', size=wx.Size(labelWidth,-1))
self.lstFiles = FieldDataList.FieldDataList(self.fieldWindow)
gridSizer.Add(self.lblFiles, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstFiles, 1, wx.EXPAND)
gridSizer.Add(wx.StaticText(self.fieldWindow, label=''))
self.fieldWindow.SetSizer(gridSizer)
self.fieldWindow.Layout()
self.btnSizer = self.CreateButtonSizer(wx.CANCEL)
self.btnSave = wx.Button(self, label="Save")
self.btnSave.Bind(wx.EVT_BUTTON, self.OnSave)
self.btnSizer.Add(self.btnSave)
self.mainTb = self._create_main_tb(self)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.mainTb, 0, wx.ALL | wx.ALIGN_LEFT | wx.EXPAND, 0 )
vbox.Add(self.fieldWindow, 1, wx.EXPAND)
vbox.Add(wx.StaticText(self,label=""))
vbox.Add(self.btnSizer, 0, wx.ALIGN_CENTER)
self.SetSizer(vbox)
self.Layout()
def _create_main_tb(self, parent):
tb = wx.ToolBar(parent, style=wx.TB_TEXT|wx.TB_NODIVIDER|wx.TB_HORIZONTAL|wx.TB_FLAT)
tb.SetToolBitmapSize((21, 21))
self.tb_search = wx.NewId()
tb.DoAddTool(
bitmap=wx.Bitmap(os.path.join(resDir,'web.png'), wx.BITMAP_TYPE_PNG),
#bitmap=wx.ArtProvider.GetBitmap(wx.ART_FIND),
bmpDisabled=wx.NullBitmap,
id=self.tb_search,
kind=wx.ITEM_NORMAL,
label='',
longHelp='',
shortHelp='Get Metadata from IMDB')
self.Bind(wx.EVT_TOOL, self.OnGetMetadata,
id=self.tb_search)
self.statusText = wx.StaticText(tb, label="")
tb.AddControl(self.statusText)
tb.Realize()
return tb
def SetData(self, data):
self.txtTitle.SetValue(data['title'])
self.txtSort.SetValue(data['sort'])
self.txtImage.SetValue(data['image'])
self.txtReleased.SetValue(data['released'])
self.txtRuntime.SetValue(data['runtime'])
self.txtRated.SetValue(data['rated'])
self.txtSummary.SetValue(data['summary'])
self.lstGenres.DeleteAllItems()
self.lstGenres.AddValues(data['genres'])
self.lstActors.DeleteAllItems()
self.lstActors.AddValues(data['actors'])
self.lstDirectors.DeleteAllItems()
self.lstDirectors.AddValues(data['directors'])
self.lstFiles.DeleteAllItems()
self.lstFiles.AddValues(data['files'])
def GetData(self):
data = {}
data['title'] = self.txtTitle.GetValue()
data['sort'] = self.txtSort.GetValue()
data['image'] = self.txtImage.GetValue()
data['released'] = self.txtReleased.GetValue()
data['runtime'] = self.txtRuntime.GetValue()
data['rated'] = self.txtRated.GetValue()
data['summary'] = self.txtSummary.GetValue()
data['genres'] = self.lstGenres.GetValues()
data['actors'] = self.lstActors.GetValues()
data['directors'] = self.lstDirectors.GetValues()
data['files'] = self.lstFiles.GetValues()
return data
def OnSave(self, event):
if self.txtTitle.GetValue() == '':
msg = wx.MessageDialog(self,
'Movie metadata cannot be saved without a Title. Cannot continue',
'Movie Title Missing', wx.OK|wx.ICON_INFORMATION)
msg.ShowModal()
msg.Destroy()
else:
self.EndModal(wx.ID_OK)
def OnGetMetadata(self, event):
title = self.txtTitle.GetValue()
year = self.txtReleased.GetValue()
if title=='':
dlg = wx.MessageDialog(self,
"Enter the title of the movie. Optionally enter the year(approximate).",
"Get metadata from IMDB",
wx.OK|wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
return
self.mainTb.EnableTool(self.tb_search, False)
self.statusText.SetLabel("Getting metadata from IMDB...")
thread.start_new_thread(self._get_metadata, (title, year, self.postersPath, self.catchPath))
def _get_metadata(self, title, year, postersPath, catchPath):
try:
metadata = ImdbAPI.GetMetadata(title, year, postersPath, catchPath)
wx.CallAfter(self._done_get_metadata, metadata)
except wx._core.PyDeadObjectError, e:
print "dialog closed before thread could complete"
def _done_get_metadata(self, metadata):
self.statusText.SetLabel("")
if metadata != None:
print "Success"
self.txtTitle.SetValue(metadata['title'])
self.txtImage.SetValue(metadata['image'])
self.txtReleased.SetValue(metadata['released'])
self.txtRuntime.SetValue(metadata['runtime'])
self.txtRated.SetValue(metadata['rated'])
self.txtSummary.SetValue(metadata['summary'])
print "Genres"
self.lstGenres.DeleteAllItems()
self.lstGenres.AddValuesSimple(metadata['genres'])
print "Actors"
self.lstActors.DeleteAllItems()
self.lstActors.AddValuesSimple(metadata['actors'])
print "Directors"
self.lstDirectors.DeleteAllItems()
self.lstDirectors.AddValuesSimple(metadata['directors'])
else:
dlg = wx.MessageDialog(self,
"No results were found for the given title and year. (this may be due to a network error)",
"Get metadata from IMDB",
wx.OK|wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
self.mainTb.EnableTool(self.tb_search, True)
| gpl-3.0 | -6,006,860,567,625,273,000 | 33.546429 | 101 | 0.721493 | false | 2.933879 | false | false | false |
IsCoolEntertainment/debpkg_libcloud | libcloud/compute/drivers/openstack.py | 1 | 57541 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack driver
"""
try:
import simplejson as json
except ImportError:
import json
import warnings
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import next
from libcloud.utils.py3 import urlparse
import base64
from xml.etree import ElementTree as ET
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack import OpenStackDriverMixin
from libcloud.common.types import MalformedResponseError
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import NodeSize, NodeImage
from libcloud.compute.base import NodeDriver, Node, NodeLocation
from libcloud.pricing import get_size_price
from libcloud.common.base import Response
from libcloud.utils.xml import findall
__all__ = [
'OpenStack_1_0_Response',
'OpenStack_1_0_Connection',
'OpenStack_1_0_NodeDriver',
'OpenStack_1_0_SharedIpGroup',
'OpenStack_1_0_NodeIpAddresses',
'OpenStack_1_1_Response',
'OpenStack_1_1_Connection',
'OpenStack_1_1_NodeDriver',
'OpenStackNodeDriver'
]
ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
DEFAULT_API_VERSION = '1.1'
class OpenStackResponse(Response):
node_driver = None
def success(self):
i = int(self.status)
return i >= 200 and i <= 299
def has_content_type(self, content_type):
content_type_value = self.headers.get('content-type') or ''
content_type_value = content_type_value.lower()
return content_type_value.find(content_type.lower()) > -1
def parse_body(self):
if self.status == httplib.NO_CONTENT or not self.body:
return None
if self.has_content_type('application/xml'):
try:
return ET.XML(self.body)
except:
raise MalformedResponseError(
'Failed to parse XML',
body=self.body,
driver=self.node_driver)
elif self.has_content_type('application/json'):
try:
return json.loads(self.body)
except:
raise MalformedResponseError(
'Failed to parse JSON',
body=self.body,
driver=self.node_driver)
else:
return self.body
def parse_error(self):
text = None
body = self.parse_body()
if self.has_content_type('application/xml'):
text = "; ".join([err.text or '' for err in body.getiterator()
if err.text])
elif self.has_content_type('application/json'):
values = body.values()
if len(values) > 0 and 'message' in values[0]:
text = ';'.join([fault_data['message'] for fault_data
in values])
else:
text = body
else:
# while we hope a response is always one of xml or json, we have
# seen html or text in the past, its not clear we can really do
# something to make it more readable here, so we will just pass
# it along as the whole response body in the text variable.
text = body
return '%s %s %s' % (self.status, self.error, text)
class OpenStackComputeConnection(OpenStackBaseConnection):
# default config for http://devstack.org/
service_type = 'compute'
service_name = 'nova'
service_region = 'RegionOne'
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': self.default_content_type}
if method == "GET":
self._add_cache_busting_to_params(params)
return super(OpenStackComputeConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin):
"""
Base OpenStack node driver. Should not be used directly.
"""
api_name = 'openstack'
name = 'OpenStack'
website = 'http://openstack.org/'
NODE_STATE_MAP = {
'BUILD': NodeState.PENDING,
'REBUILD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'SUSPENDED': NodeState.TERMINATED,
'DELETED': NodeState.TERMINATED,
'QUEUE_RESIZE': NodeState.PENDING,
'PREP_RESIZE': NodeState.PENDING,
'VERIFY_RESIZE': NodeState.RUNNING,
'PASSWORD': NodeState.PENDING,
'RESCUE': NodeState.PENDING,
'REBOOT': NodeState.REBOOTING,
'HARD_REBOOT': NodeState.REBOOTING,
'SHARE_IP': NodeState.PENDING,
'SHARE_IP_NO_CONFIG': NodeState.PENDING,
'DELETE_IP': NodeState.PENDING,
'UNKNOWN': NodeState.UNKNOWN
}
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is OpenStackNodeDriver:
if api_version == '1.0':
cls = OpenStack_1_0_NodeDriver
elif api_version == '1.1':
cls = OpenStack_1_1_NodeDriver
else:
raise NotImplementedError(
"No OpenStackNodeDriver found for API version %s" %
(api_version))
return super(OpenStackNodeDriver, cls).__new__(cls)
def __init__(self, *args, **kwargs):
OpenStackDriverMixin.__init__(self, **kwargs)
super(OpenStackNodeDriver, self).__init__(*args, **kwargs)
def destroy_node(self, node):
uri = '/servers/%s' % (node.id)
resp = self.connection.request(uri, method='DELETE')
# The OpenStack and Rackspace documentation both say this API will
# return a 204, but in-fact, everyone everywhere agrees it actually
# returns a 202, so we are going to accept either, and someday,
# someone will fix either the implementation or the documentation to
# agree.
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def reboot_node(self, node):
return self._reboot_node(node, reboot_type='HARD')
def list_nodes(self):
return self._to_nodes(
self.connection.request('/servers/detail').object)
def list_images(self, location=None, ex_only_active=True):
"""
@inherits: L{NodeDriver.list_images}
@param ex_only_active: True if list only active
@type ex_only_active: C{bool}
"""
return self._to_images(
self.connection.request('/images/detail').object, ex_only_active)
def list_sizes(self, location=None):
return self._to_sizes(
self.connection.request('/flavors/detail').object)
def list_locations(self):
return [NodeLocation(0, '', '', self)]
def _ex_connection_class_kwargs(self):
return self.openstack_connection_kwargs()
def ex_get_node_details(self, node_id):
"""
Lists details of the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: L{Node}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s' % (node_id)
resp = self.connection.request(uri, method='GET')
if resp.status == httplib.NOT_FOUND:
return None
return self._to_node_from_obj(resp.object)
def ex_soft_reboot_node(self, node):
"""
Soft reboots the specified server
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
return self._reboot_node(node, reboot_type='SOFT')
def ex_hard_reboot_node(self, node):
"""
Hard reboots the specified server
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
return self._reboot_node(node, reboot_type='HARD')
class OpenStackNodeSize(NodeSize):
"""
NodeSize class for the OpenStack.org driver.
Following the example of OpenNebula.org driver
and following guidelines:
https://issues.apache.org/jira/browse/LIBCLOUD-119
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
vcpus=None):
super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram,
disk=disk,
bandwidth=bandwidth,
price=price, driver=driver)
self.vcpus = vcpus
def __repr__(self):
return (('<OpenStackNodeSize: id=%s, name=%s, ram=%s, disk=%s, '
'bandwidth=%s, price=%s, driver=%s, vcpus=%s, ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name, self.vcpus))
class OpenStack_1_0_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_0_NodeDriver
super(OpenStack_1_0_Response, self).__init__(*args, **kwargs)
class OpenStack_1_0_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_0_Response
default_content_type = 'application/xml; charset=UTF-8'
accept_format = 'application/xml'
XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
class OpenStack_1_0_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
Extra node attributes:
- password: root password, available after create.
- hostId: represents the host your cloud server runs on
- imageId: id of image
- flavorId: id of flavor
"""
connectionCls = OpenStack_1_0_Connection
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE
super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs)
def _to_images(self, object, ex_only_active):
images = []
for image in findall(object, 'image', self.XML_NAMESPACE):
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, element):
return NodeImage(id=element.get('id'),
name=element.get('name'),
driver=self.connection.driver,
extra={'updated': element.get('updated'),
'created': element.get('created'),
'status': element.get('status'),
'serverId': element.get('serverId'),
'progress': element.get('progress'),
'minDisk': element.get('minDisk'),
'minRam': element.get('minRam')
}
)
def _change_password_or_name(self, node, name=None, password=None):
uri = '/servers/%s' % (node.id)
if not name:
name = node.name
body = {'xmlns': self.XML_NAMESPACE,
'name': name}
if password is not None:
body['adminPass'] = password
server_elm = ET.Element('server', body)
resp = self.connection.request(
uri, method='PUT', data=ET.tostring(server_elm))
if resp.status == httplib.NO_CONTENT and password is not None:
node.extra['password'] = password
return resp.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
"""
Create a new node
@inherits: L{NodeDriver.create_node}
@keyword ex_metadata: Key/Value metadata to associate with a node
@type ex_metadata: C{dict}
@keyword ex_files: File Path => File contents to create on
the node
@type ex_files: C{dict}
@keyword ex_shared_ip_group_id: The server is launched into
that shared IP group
@type ex_shared_ip_group_id: C{str}
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
attributes = {'xmlns': self.XML_NAMESPACE,
'name': name,
'imageId': str(image.id),
'flavorId': str(size.id)}
if 'ex_shared_ip_group' in kwargs:
# Deprecate this. Be explicit and call the variable
# ex_shared_ip_group_id since user needs to pass in the id, not the
# name.
warnings.warn('ex_shared_ip_group argument is deprecated.'
' Please use ex_shared_ip_group_id')
if 'ex_shared_ip_group_id' in kwargs:
shared_ip_group_id = kwargs['ex_shared_ip_group_id']
attributes['sharedIpGroupId'] = shared_ip_group_id
server_elm = ET.Element('server', attributes)
metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {}))
if metadata_elm:
server_elm.append(metadata_elm)
files_elm = self._files_to_xml(kwargs.get("ex_files", {}))
if files_elm:
server_elm.append(files_elm)
resp = self.connection.request("/servers",
method='POST',
data=ET.tostring(server_elm))
return self._to_node(resp.object)
def ex_set_password(self, node, password):
"""
Sets the Node's root password.
This will reboot the instance to complete the operation.
L{Node.extra['password']} will be set to the new value if the
operation was successful.
@param node: node to set password
@type node: L{Node}
@param password: new password.
@type password: C{str}
@rtype: C{bool}
"""
return self._change_password_or_name(node, password=password)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
This will reboot the instance to complete the operation.
@param node: node to set name
@type node: L{Node}
@param name: new name
@type name: C{str}
@rtype: C{bool}
"""
return self._change_password_or_name(node, name=name)
def ex_resize(self, node, size):
"""
Change an existing server flavor / scale the server up or down.
@param node: node to resize.
@type node: L{Node}
@param size: new size.
@type size: L{NodeSize}
@rtype: C{bool}
"""
elm = ET.Element(
'resize',
{'xmlns': self.XML_NAMESPACE,
'flavorId': str(size.id)}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_confirm_resize(self, node):
"""
Confirm a resize request which is currently in progress. If a resize
request is not explicitly confirmed or reverted it's automatically
confirmed after 24 hours.
For more info refer to the API documentation: http://goo.gl/zjFI1
@param node: node for which the resize request will be confirmed.
@type node: L{Node}
@rtype: C{bool}
"""
elm = ET.Element(
'confirmResize',
{'xmlns': self.XML_NAMESPACE},
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Revert a resize request which is currently in progress.
All resizes are automatically confirmed after 24 hours if they have
not already been confirmed explicitly or reverted.
For more info refer to the API documentation: http://goo.gl/AizBu
@param node: node for which the resize request will be reverted.
@type node: L{Node}
@rtype: C{bool}
"""
elm = ET.Element(
'revertResize',
{'xmlns': self.XML_NAMESPACE}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_rebuild(self, node_id, image_id):
"""
Rebuilds the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param image_id: ID of the image which should be used
@type image_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove those ifs in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if isinstance(image_id, NodeImage):
image_id = image_id.id
elm = ET.Element(
'rebuild',
{'xmlns': self.XML_NAMESPACE,
'imageId': image_id}
)
resp = self.connection.request("/servers/%s/action" % node_id,
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_create_ip_group(self, group_name, node_id=None):
"""
Creates a shared IP group.
@param group_name: group name which should be used
@type group_name: C{str}
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
group_elm = ET.Element(
'sharedIpGroup',
{'xmlns': self.XML_NAMESPACE,
'name': group_name}
)
if node_id:
ET.SubElement(
group_elm,
'server',
{'id': node_id}
)
resp = self.connection.request('/shared_ip_groups',
method='POST',
data=ET.tostring(group_elm))
return self._to_shared_ip_group(resp.object)
def ex_list_ip_groups(self, details=False):
"""
Lists IDs and names for shared IP groups.
If details lists all details for shared IP groups.
@param details: True if details is required
@type details: C{bool}
@rtype: C{list} of L{OpenStack_1_0_SharedIpGroup}
"""
uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups'
resp = self.connection.request(uri,
method='GET')
groups = findall(resp.object, 'sharedIpGroup',
self.XML_NAMESPACE)
return [self._to_shared_ip_group(el) for el in groups]
def ex_delete_ip_group(self, group_id):
"""
Deletes the specified shared IP group.
@param group_id: group id which should be used
@type group_id: C{str}
@rtype: C{bool}
"""
uri = '/shared_ip_groups/%s' % group_id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_share_ip(self, group_id, node_id, ip, configure_node=True):
"""
Shares an IP address to the specified server.
@param group_id: group id which should be used
@type group_id: C{str}
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param ip: ip which should be used
@type ip: C{str}
@param configure_node: configure node
@type configure_node: C{bool}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if configure_node:
str_configure = 'true'
else:
str_configure = 'false'
elm = ET.Element(
'shareIp',
{'xmlns': self.XML_NAMESPACE,
'sharedIpGroupId': group_id,
'configureServer': str_configure},
)
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='PUT',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_unshare_ip(self, node_id, ip):
"""
Removes a shared IP address from the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param ip: ip which should be used
@type ip: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_list_ip_addresses(self, node_id):
"""
List all server addresses.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips' % node_id
resp = self.connection.request(uri,
method='GET')
return self._to_ip_addresses(resp.object)
def _metadata_to_xml(self, metadata):
if len(metadata) == 0:
return None
metadata_elm = ET.Element('metadata')
for k, v in list(metadata.items()):
meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)})
meta_elm.text = str(v)
return metadata_elm
def _files_to_xml(self, files):
if len(files) == 0:
return None
personality_elm = ET.Element('personality')
for k, v in list(files.items()):
file_elm = ET.SubElement(personality_elm,
'file',
{'path': str(k)})
file_elm.text = base64.b64encode(b(v))
return personality_elm
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, ['reboot', ('type', reboot_type)])
return resp.status == httplib.ACCEPTED
def _node_action(self, node, body):
if isinstance(body, list):
attr = ' '.join(['%s="%s"' % (item[0], item[1])
for item in body[1:]])
body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr)
uri = '/servers/%s/action' % (node.id)
resp = self.connection.request(uri, method='POST', data=body)
return resp
def _to_nodes(self, object):
node_elements = findall(object, 'server', self.XML_NAMESPACE)
return [self._to_node(el) for el in node_elements]
def _to_node_from_obj(self, obj):
return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0])
def _to_node(self, el):
def get_ips(el):
return [ip.get('addr') for ip in el]
def get_meta_dict(el):
d = {}
for meta in el:
d[meta.get('key')] = meta.text
return d
public_ip = get_ips(findall(el, 'addresses/public/ip',
self.XML_NAMESPACE))
private_ip = get_ips(findall(el, 'addresses/private/ip',
self.XML_NAMESPACE))
metadata = get_meta_dict(findall(el, 'metadata/meta',
self.XML_NAMESPACE))
n = Node(id=el.get('id'),
name=el.get('name'),
state=self.NODE_STATE_MAP.get(
el.get('status'), NodeState.UNKNOWN),
public_ips=public_ip,
private_ips=private_ip,
driver=self.connection.driver,
extra={
'password': el.get('adminPass'),
'hostId': el.get('hostId'),
'imageId': el.get('imageId'),
'flavorId': el.get('flavorId'),
'uri': "https://%s%s/servers/%s" % (
self.connection.host,
self.connection.request_path, el.get('id')),
'metadata': metadata,
})
return n
def _to_sizes(self, object):
elements = findall(object, 'flavor', self.XML_NAMESPACE)
return [self._to_size(el) for el in elements]
def _to_size(self, el):
vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None
return OpenStackNodeSize(id=el.get('id'),
name=el.get('name'),
ram=int(el.get('ram')),
disk=int(el.get('disk')),
# XXX: needs hardcode
vcpus=vcpus,
bandwidth=None,
# Hardcoded
price=self._get_size_price(el.get('id')),
driver=self.connection.driver)
def ex_limits(self):
"""
Extra call to get account's limits, such as
rates (for example amount of POST requests per day)
and absolute limits like total amount of available
RAM to be used by servers.
@return: dict with keys 'rate' and 'absolute'
@rtype: C{dict}
"""
def _to_rate(el):
rate = {}
for item in list(el.items()):
rate[item[0]] = item[1]
return rate
def _to_absolute(el):
return {el.get('name'): el.get('value')}
limits = self.connection.request("/limits").object
rate = [_to_rate(el) for el in findall(limits, 'rate/limit',
self.XML_NAMESPACE)]
absolute = {}
for item in findall(limits, 'absolute/limit',
self.XML_NAMESPACE):
absolute.update(_to_absolute(item))
return {"rate": rate, "absolute": absolute}
def ex_save_image(self, node, name):
"""Create an image for node.
@param node: node to use as a base for image
@type node: L{Node}
@param name: name for new image
@type name: C{str}
@rtype: L{NodeImage}
"""
image_elm = ET.Element(
'image',
{'xmlns': self.XML_NAMESPACE,
'name': name,
'serverId': node.id}
)
return self._to_image(
self.connection.request("/images", method="POST",
data=ET.tostring(image_elm)).object)
def ex_delete_image(self, image):
"""Delete an image for node.
@param image: the image to be deleted
@type image: L{NodeImage}
@rtype: C{bool}
"""
uri = '/images/%s' % image.id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def _to_shared_ip_group(self, el):
servers_el = findall(el, 'servers', self.XML_NAMESPACE)
if servers_el:
servers = [s.get('id')
for s in findall(servers_el[0], 'server',
self.XML_NAMESPACE)]
else:
servers = None
return OpenStack_1_0_SharedIpGroup(id=el.get('id'),
name=el.get('name'),
servers=servers)
def _to_ip_addresses(self, el):
public_ips = [ip.get('addr') for ip in findall(
findall(el, 'public', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
private_ips = [ip.get('addr') for ip in findall(
findall(el, 'private', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips)
def _get_size_price(self, size_id):
try:
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
except KeyError:
return 0.0
class OpenStack_1_0_SharedIpGroup(object):
"""
Shared IP group info.
"""
def __init__(self, id, name, servers=None):
self.id = str(id)
self.name = name
self.servers = servers
class OpenStack_1_0_NodeIpAddresses(object):
"""
List of public and private IP addresses of a Node.
"""
def __init__(self, public_addresses, private_addresses):
self.public_addresses = public_addresses
self.private_addresses = private_addresses
class OpenStack_1_1_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_1_NodeDriver
super(OpenStack_1_1_Response, self).__init__(*args, **kwargs)
class OpenStackNetwork(object):
"""
A Virtual Network.
"""
def __init__(self, id, name, cidr, driver, extra=None):
self.id = str(id)
self.name = name
self.cidr = cidr
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return '<OpenStackNetwork id="%s" name="%s" cidr="%s">' % (self.id,
self.name, self.cidr,)
class OpenStackSecurityGroup(object):
"""
A Security Group.
"""
def __init__(self, id, tenant_id, name, description, driver, rules=None,
extra=None):
"""
Constructor.
@keyword id: Group id.
@type id: C{str}
@keyword tenant_id: Owner of the security group.
@type tenant_id: C{str}
@keyword name: Human-readable name for the security group. Might
not be unique.
@type name: C{str}
@keyword description: Human-readable description of a security
group.
@type description: C{str}
@keyword rules: Rules associated with this group.
@type description: C{list} of L{OpenStackSecurityGroupRule}
@keyword extra: Extra attributes associated with this group.
@type extra: C{dict}
"""
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.driver = driver
self.rules = rules or []
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroup id=%s tenant_id=%s name=%s \
description=%s>' % (self.id, self.tenant_id, self.name,
self.description))
class OpenStackSecurityGroupRule(object):
"""
A Rule of a Security Group.
"""
def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port,
driver, ip_range=None, group=None, tenant_id=None,
extra=None):
"""
Constructor.
@keyword id: Rule id.
@type id: C{str}
@keyword parent_group_id: ID of the parent security group.
@type parent_group_id: C{str}
@keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc).
@type ip_protocol: C{str}
@keyword from_port: Port at start of range.
@type from_port: C{int}
@keyword to_port: Port at end of range.
@type to_port: C{int}
@keyword ip_range: CIDR for address range.
@type ip_range: C{str}
@keyword group: Name of a source security group to apply to rule.
@type group: C{str}
@keyword tenant_id: Owner of the security group.
@type tenant_id: C{str}
@keyword extra: Extra attributes associated with this rule.
@type extra: C{dict}
"""
self.id = id
self.parent_group_id = parent_group_id
self.ip_protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.driver = driver
self.ip_range = ''
self.group = {}
if group is None:
self.ip_range = ip_range
else:
self.group = {'name': group, 'tenant_id': tenant_id}
self.tenant_id = tenant_id
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroupRule id=%s parent_group_id=%s \
ip_protocol=%s from_port=%s to_port=%s>' % (self.id,
self.parent_group_id, self.ip_protocol, self.from_port,
self.to_port))
class OpenStack_1_1_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_1_1_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
"""
connectionCls = OpenStack_1_1_Connection
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs)
def create_node(self, **kwargs):
"""Create a new node
@inherits: L{NodeDriver.create_node}
@keyword ex_metadata: Key/Value metadata to associate with a node
@type ex_metadata: C{dict}
@keyword ex_files: File Path => File contents to create on
the no de
@type ex_files: C{dict}
@keyword ex_keyname: Name of existing public key to inject into
instance
@type ex_keyname: C{str}
@keyword ex_userdata: String containing user data
see
https://help.ubuntu.com/community/CloudInit
@type ex_userdata: C{str}
@keyword networks: The server is launched into a set of Networks.
@type networks: L{OpenStackNetwork}
@keyword ex_security_groups: List of security groups to assign to
the node
@type ex_security_groups: C{list} of L{OpenStackSecurityGroup}
"""
server_params = self._create_args_to_params(None, **kwargs)
resp = self.connection.request("/servers",
method='POST',
data={'server': server_params})
create_response = resp.object['server']
server_resp = self.connection.request(
'/servers/%s' % create_response['id'])
server_object = server_resp.object['server']
# adminPass is not always present
# http://docs.openstack.org/essex/openstack-compute/admin/
# content/configuring-compute-API.html#d6e1833
server_object['adminPass'] = create_response.get('adminPass', None)
return self._to_node(server_object)
def _to_images(self, obj, ex_only_active):
images = []
for image in obj['images']:
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, api_image):
server = api_image.get('server', {})
return NodeImage(
id=api_image['id'],
name=api_image['name'],
driver=self,
extra=dict(
updated=api_image['updated'],
created=api_image['created'],
status=api_image['status'],
progress=api_image.get('progress'),
metadata=api_image.get('metadata'),
serverId=server.get('id'),
minDisk=api_image.get('minDisk'),
minRam=api_image.get('minRam'),
)
)
def _to_nodes(self, obj):
servers = obj['servers']
return [self._to_node(server) for server in servers]
def _to_sizes(self, obj):
flavors = obj['flavors']
return [self._to_size(flavor) for flavor in flavors]
def _create_args_to_params(self, node, **kwargs):
server_params = {
'name': kwargs.get('name'),
'metadata': kwargs.get('ex_metadata', {}),
'personality': self._files_to_personality(kwargs.get("ex_files",
{}))
}
if 'ex_keyname' in kwargs:
server_params['key_name'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
server_params['user_data'] = base64.b64encode(
b(kwargs['ex_userdata'])).decode('ascii')
if 'networks' in kwargs:
networks = kwargs['networks']
networks = [{'uuid': network.id} for network in networks]
server_params['networks'] = networks
if 'ex_security_groups' in kwargs:
server_params['security_groups'] = []
for security_group in kwargs['ex_security_groups']:
name = security_group.name
server_params['security_groups'].append({'name': name})
if 'name' in kwargs:
server_params['name'] = kwargs.get('name')
else:
server_params['name'] = node.name
if 'image' in kwargs:
server_params['imageRef'] = kwargs.get('image').id
else:
server_params['imageRef'] = node.extra.get('imageId')
if 'size' in kwargs:
server_params['flavorRef'] = kwargs.get('size').id
else:
server_params['flavorRef'] = node.extra.get('flavorId')
return server_params
def _files_to_personality(self, files):
rv = []
for k, v in list(files.items()):
rv.append({'path': k, 'contents': base64.b64encode(b(v))})
return rv
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, 'reboot', type=reboot_type)
return resp.status == httplib.ACCEPTED
def ex_set_password(self, node, password):
"""
Changes the administrator password for a specified server.
@param node: Node to rebuild.
@type node: L{Node}
@param password: The administrator password.
@type password: C{str}
@rtype: C{bool}
"""
resp = self._node_action(node, 'changePassword', adminPass=password)
node.extra['password'] = password
return resp.status == httplib.ACCEPTED
def ex_rebuild(self, node, image):
"""
Rebuild a Node.
@param node: Node to rebuild.
@type node: L{Node}
@param image: New image to use.
@type image: L{NodeImage}
@rtype: C{bool}
"""
server_params = self._create_args_to_params(node, image=image)
resp = self._node_action(node, 'rebuild', **server_params)
return resp.status == httplib.ACCEPTED
def ex_resize(self, node, size):
"""
Change a node size.
@param node: Node to resize.
@type node: L{Node}
@type size: L{NodeSize}
@param size: New size to use.
@rtype: C{bool}
"""
server_params = self._create_args_to_params(node, size=size)
resp = self._node_action(node, 'resize', **server_params)
return resp.status == httplib.ACCEPTED
def ex_confirm_resize(self, node):
"""
Confirms a pending resize action.
@param node: Node to resize.
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'confirmResize')
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Cancels and reverts a pending resize action.
@param node: Node to resize.
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'revertResize')
return resp.status == httplib.ACCEPTED
def ex_save_image(self, node, name, metadata=None):
"""
Creates a new image.
@param node: Node
@type node: L{Node}
@param name: The name for the new image.
@type name: C{str}
@param metadata: Key and value pairs for metadata.
@type metadata: C{dict}
@rtype: L{NodeImage}
"""
optional_params = {}
if metadata:
optional_params['metadata'] = metadata
resp = self._node_action(node, 'createImage', name=name,
**optional_params)
image_id = self._extract_image_id_from_url(resp.headers['location'])
return self.ex_get_image(image_id=image_id)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
@param node: Node
@type node: L{Node}
@param name: The name of the server.
@type name: C{str}
@rtype: L{Node}
"""
return self._update_node(node, name=name)
def ex_get_metadata(self, node):
"""
Get a Node's metadata.
@param node: Node
@type node: L{Node}
@return: Key/Value metadata associated with node.
@rtype: C{dict}
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,),
method='GET',).object['metadata']
def ex_set_metadata(self, node, metadata):
"""
Sets the Node's metadata.
@param node: Node
@type node: L{Node}
@param metadata: Key/Value metadata to associate with a node
@type metadata: C{dict}
@rtype: C{dict}
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,), method='PUT',
data={'metadata': metadata}
).object['metadata']
def ex_update_node(self, node, **node_updates):
"""
Update the Node's editable attributes. The OpenStack API currently
supports editing name and IPv4/IPv6 access addresses.
The driver currently only supports updating the node name.
@param node: Node
@type node: L{Node}
@keyword name: New name for the server
@type name: C{str}
@rtype: L{Node}
"""
potential_data = self._create_args_to_params(node, **node_updates)
updates = {'name': potential_data['name']}
return self._update_node(node, **updates)
def _to_networks(self, obj):
networks = obj['networks']
return [self._to_network(network) for network in networks]
def _to_network(self, obj):
return OpenStackNetwork(id=obj['id'],
name=obj['label'],
cidr=obj.get('cidr', None),
driver=self)
def ex_list_networks(self):
"""
Get a list of Networks that are available.
@rtype: C{list} of L{OpenStackNetwork}
"""
return self._to_networks(
self.connection.request('/os-networksv2').object)
def ex_create_network(self, name, cidr):
"""
Create a new Network
@param name: Name of network which should be used
@type name: C{str}
@param cidr: cidr of network which should be used
@type cidr: C{str}
@rtype: L{OpenStackNetwork}
"""
return self._to_network(self.connection.request(
'/os-networksv2', method='POST',
data={'network': {'cidr': cidr, 'label': name}}
).object['network'])
def ex_delete_network(self, network):
"""
Get a list of NodeNetorks that are available.
@param network: Network which should be used
@type network: L{OpenStackNetwork}
@rtype: C{bool}
"""
resp = self.connection.request('/os-networksv2/%s' % (network.id),
method='DELETE')
return resp.status == httplib.ACCEPTED
def _to_security_group_rules(self, obj):
return [self._to_security_group_rule(security_group_rule) for
security_group_rule in obj]
def _to_security_group_rule(self, obj):
ip_range = group = tenant_id = None
if obj['group'] == {}:
ip_range = obj['ip_range'].get('cidr', None)
else:
group = obj['group'].get('name', None)
tenant_id = obj['group'].get('tenant_id', None)
return OpenStackSecurityGroupRule(id=obj['id'],
parent_group_id=
obj['parent_group_id'],
ip_protocol=obj['ip_protocol'],
from_port=obj['from_port'],
to_port=obj['to_port'],
driver=self,
ip_range=ip_range,
group=group,
tenant_id=tenant_id)
def _to_security_groups(self, obj):
security_groups = obj['security_groups']
return [self._to_security_group(security_group) for security_group in
security_groups]
def _to_security_group(self, obj):
return OpenStackSecurityGroup(id=obj['id'],
tenant_id=obj['tenant_id'],
name=obj['name'],
description=obj.get('description', ''),
rules=self._to_security_group_rules(
obj.get('rules', [])),
driver=self)
def ex_list_security_groups(self):
"""
Get a list of Security Groups that are available.
@rtype: C{list} of L{OpenStackSecurityGroup}
"""
return self._to_security_groups(
self.connection.request('/os-security-groups').object)
def ex_get_node_security_groups(self, node):
"""
Get Security Groups of the specified server.
@rtype: C{list} of L{OpenStackSecurityGroup}
"""
return self._to_security_groups(
self.connection.request('/servers/%s/os-security-groups' %
(node.id)).object)
def ex_create_security_group(self, name, description):
"""
Create a new Security Group
@param name: Name of the new Security Group
@type name: C{str}
@param description: Description of the new Security Group
@type description: C{str}
@rtype: L{OpenStackSecurityGroup}
"""
return self._to_security_group(self.connection.request(
'/os-security-groups', method='POST',
data={'security_group': {'name': name, 'description': description}}
).object['security_group'])
def ex_delete_security_group(self, security_group):
"""
Delete a Security Group.
@param security_group: Security Group should be deleted
@type security_group: L{OpenStackSecurityGroup}
@rtype: C{bool}
"""
resp = self.connection.request('/os-security-groups/%s' %
(security_group.id),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_create_security_group_rule(self, security_group, ip_protocol,
from_port, to_port, cidr=None,
source_security_group=None):
"""
Create a new Rule in a Security Group
@param security_group: Security Group in which to add the rule
@type security_group: L{OpenStackSecurityGroup}
@param ip_protocol: Protocol to which this rule applies
Examples: tcp, udp, ...
@type ip_protocol: C{str}
@param from_port: First port of the port range
@type from_port: C{int}
@param to_port: Last port of the port range
@type to_port: C{int}
@param cidr: CIDR notation of the source IP range for this rule
@type cidr: C{str}
@param source_security_group: Existing Security Group to use as the
source (instead of CIDR)
@type source_security_group: L{OpenStackSecurityGroup
@rtype: L{OpenStackSecurityGroupRule}
"""
source_security_group_id = None
if type(source_security_group) == OpenStackSecurityGroup:
source_security_group_id = source_security_group.id
return self._to_security_group_rule(self.connection.request(
'/os-security-group-rules', method='POST',
data={'security_group_rule': {
'ip_protocol': ip_protocol,
'from_port': from_port,
'to_port': to_port,
'cidr': cidr,
'group_id': source_security_group_id,
'parent_group_id': security_group.id}}
).object['security_group_rule'])
def ex_delete_security_group_rule(self, rule):
"""
Delete a Rule from a Security Group.
@param rule: Rule should be deleted
@type rule: L{OpenStackSecurityGroupRule}
@rtype: C{bool}
"""
resp = self.connection.request('/os-security-group-rules/%s' %
(rule.id), method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_get_size(self, size_id):
"""
Get a NodeSize
@param size_id: ID of the size which should be used
@type size_id: C{str}
@rtype: L{NodeSize}
"""
return self._to_size(self.connection.request(
'/flavors/%s' % (size_id,)) .object['flavor'])
def ex_get_image(self, image_id):
"""
Get a NodeImage
@param image_id: ID of the image which should be used
@type image_id: C{str}
@rtype: L{NodeImage}
"""
return self._to_image(self.connection.request(
'/images/%s' % (image_id,)).object['image'])
def ex_delete_image(self, image):
"""
Delete a NodeImage
@param image: image witch should be used
@type image: L{NodeImage}
@rtype: C{bool}
"""
resp = self.connection.request('/images/%s' % (image.id,),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def _node_action(self, node, action, **params):
params = params or None
return self.connection.request('/servers/%s/action' % (node.id,),
method='POST', data={action: params})
def _update_node(self, node, **node_updates):
"""
Updates the editable attributes of a server, which currently include
its name and IPv4/IPv6 access addresses.
"""
return self._to_node(
self.connection.request(
'/servers/%s' % (node.id,), method='PUT',
data={'server': node_updates}
).object['server']
)
def _to_node_from_obj(self, obj):
return self._to_node(obj['server'])
def _to_node(self, api_node):
public_networks_labels = ['public', 'internet']
public_ips, private_ips = [], []
for label, values in api_node['addresses'].items():
ips = [v['addr'] for v in values]
if label in public_networks_labels:
public_ips.extend(ips)
else:
private_ips.extend(ips)
return Node(
id=api_node['id'],
name=api_node['name'],
state=self.NODE_STATE_MAP.get(api_node['status'],
NodeState.UNKNOWN),
public_ips=public_ips,
private_ips=private_ips,
driver=self,
extra=dict(
hostId=api_node['hostId'],
# Docs says "tenantId", but actual is "tenant_id". *sigh*
# Best handle both.
tenantId=api_node.get('tenant_id') or api_node['tenantId'],
imageId=api_node['image']['id'],
flavorId=api_node['flavor']['id'],
uri=next(link['href'] for link in api_node['links'] if
link['rel'] == 'self'),
metadata=api_node['metadata'],
password=api_node.get('adminPass', None),
created=api_node['created'],
updated=api_node['updated'],
key_name=api_node.get('key_name', None),
),
)
def _to_size(self, api_flavor, price=None, bandwidth=None):
# if provider-specific subclasses can get better values for
# price/bandwidth, then can pass them in when they super().
if not price:
price = self._get_size_price(str(api_flavor['id']))
return OpenStackNodeSize(
id=api_flavor['id'],
name=api_flavor['name'],
ram=api_flavor['ram'],
disk=api_flavor['disk'],
vcpus=api_flavor['vcpus'],
bandwidth=bandwidth,
price=price,
driver=self,
)
def _get_size_price(self, size_id):
try:
return get_size_price(
driver_type='compute',
driver_name=self.api_name,
size_id=size_id,
)
except KeyError:
return(0.0)
def _extract_image_id_from_url(self, location_header):
path = urlparse.urlparse(location_header).path
image_id = path.split('/')[-1]
return image_id
def ex_rescue(self, node, password=None):
# Requires Rescue Mode extension
"""
Rescue a node
@param node: node
@type node: L{Node}
@param password: password
@type password: C{str}
@rtype: L{Node}
"""
if password:
resp = self._node_action(node, 'rescue', adminPass=password)
else:
resp = self._node_action(node, 'rescue')
password = json.loads(resp.body)['adminPass']
node.extra['password'] = password
return node
def ex_unrescue(self, node):
"""
Unrescue a node
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'unrescue')
return resp.status == httplib.ACCEPTED
| apache-2.0 | -4,493,782,231,637,883,000 | 32.37645 | 79 | 0.53063 | false | 4.163302 | false | false | false |
Cinntax/home-assistant | homeassistant/components/zha/core/channels/security.py | 1 | 6806 | """
Security channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.security as security
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ZigbeeChannel
from .. import registries
from ..const import (
CLUSTER_COMMAND_SERVER,
SIGNAL_ATTR_UPDATED,
WARNING_DEVICE_MODE_EMERGENCY,
WARNING_DEVICE_SOUND_HIGH,
WARNING_DEVICE_SQUAWK_MODE_ARMED,
WARNING_DEVICE_STROBE_HIGH,
WARNING_DEVICE_STROBE_YES,
)
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasAce.cluster_id)
class IasAce(ZigbeeChannel):
"""IAS Ancillary Control Equipment channel."""
pass
@registries.CHANNEL_ONLY_CLUSTERS.register(security.IasWd.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasWd.cluster_id)
class IasWd(ZigbeeChannel):
"""IAS Warning Device channel."""
@staticmethod
def set_bit(destination_value, destination_bit, source_value, source_bit):
"""Set the specified bit in the value."""
if IasWd.get_bit(source_value, source_bit):
return destination_value | (1 << destination_bit)
return destination_value
@staticmethod
def get_bit(value, bit):
"""Get the specified bit from the value."""
return (value & (1 << bit)) != 0
async def squawk(
self,
mode=WARNING_DEVICE_SQUAWK_MODE_ARMED,
strobe=WARNING_DEVICE_STROBE_YES,
squawk_level=WARNING_DEVICE_SOUND_HIGH,
):
"""Issue a squawk command.
This command uses the WD capabilities to emit a quick audible/visible pulse called a
"squawk". The squawk command has no effect if the WD is currently active
(warning in progress).
"""
value = 0
value = IasWd.set_bit(value, 0, squawk_level, 0)
value = IasWd.set_bit(value, 1, squawk_level, 1)
value = IasWd.set_bit(value, 3, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0001,
CLUSTER_COMMAND_SERVER,
[value],
)
async def start_warning(
self,
mode=WARNING_DEVICE_MODE_EMERGENCY,
strobe=WARNING_DEVICE_STROBE_YES,
siren_level=WARNING_DEVICE_SOUND_HIGH,
warning_duration=5, # seconds
strobe_duty_cycle=0x00,
strobe_intensity=WARNING_DEVICE_STROBE_HIGH,
):
"""Issue a start warning command.
This command starts the WD operation. The WD alerts the surrounding area by audible
(siren) and visual (strobe) signals.
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
6/10ths of a second.
"""
value = 0
value = IasWd.set_bit(value, 0, siren_level, 0)
value = IasWd.set_bit(value, 1, siren_level, 1)
value = IasWd.set_bit(value, 2, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0000,
CLUSTER_COMMAND_SERVER,
[value, warning_duration, strobe_duty_cycle, strobe_intensity],
)
@registries.BINARY_SENSOR_CLUSTERS.register(security.IasZone.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasZone.cluster_id)
class IASZoneChannel(ZigbeeChannel):
"""Channel for the IASZone Zigbee cluster."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
if command_id == 0:
state = args[0] & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", state
)
self.debug("Updated alarm state: %s", state)
elif command_id == 1:
self.debug("Enroll requested")
res = self._cluster.enroll_response(0, 0)
self._zha_device.hass.async_create_task(res)
async def async_configure(self):
"""Configure IAS device."""
# Xiaomi devices don't need this and it disrupts pairing
if self._zha_device.manufacturer == "LUMI":
self.debug("finished IASZoneChannel configuration")
return
from zigpy.exceptions import DeliveryError
self.debug("started IASZoneChannel configuration")
await self.bind()
ieee = self.cluster.endpoint.device.application.ieee
try:
res = await self._cluster.write_attributes({"cie_addr": ieee})
self.debug(
"wrote cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
res[0],
)
except DeliveryError as ex:
self.debug(
"Failed to write cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
str(ex),
)
self.debug("finished IASZoneChannel configuration")
await self.get_attribute_value("zone_type", from_cache=False)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == 2:
value = value & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", value
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.get_attribute_value("zone_status", from_cache=from_cache)
await self.get_attribute_value("zone_state", from_cache=from_cache)
await super().async_initialize(from_cache)
| apache-2.0 | 7,192,171,406,314,994,000 | 34.612565 | 99 | 0.626286 | false | 3.56499 | false | false | false |
SRabbelier/Melange | scripts/gci_statistic_seeder.py | 1 | 7157 | #!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts an interactive shell which allows to create statistic entities.
Usage is simple:
In order to seed all available statistics, just type:
>>> seed_all()
In order to seed one statistic:
>>> seed_one(link_id)
where link_id is for the desired statistic
In order to change program in scope:
>>> set_program(key_name)
where key_name represents a new program
In order to terminate the script:
>>> exit()
"""
__authors__ = [
'"Daniel Hans" <[email protected]>',
]
import sys
import interactive
interactive.setup()
from django.utils import simplejson
from soc.logic import dicts
from soc.modules.gci.logic.models.program import logic as program_logic
from soc.modules.statistic.logic.models.statistic import logic as \
statistic_logic
from soc.modules.statistic.models.statistic import Statistic
SUCCESS_MSG_FMT = 'Statistic %s has been sucessfully added.'
FAILURE_MSG_FMT = 'An error occured while adding %s statistic.'
DOES_NOT_EXISTS_MSG_FMT = 'Statistic %s does not exists.'
VISUALIZATION_SETS = {
"cumulative_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
],
"cumulative_countries": [
"Table"
],
"single_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
"ImageChartP",
"ImageChartP3",
"PieChart",
"ScatterChart"
],
"single_countries": [
"Table",
"GeoMap"
]
}
STATISTIC_PROPERTIES = {
"mentors_per_continent": (
"Mentors Per Continent",
{
"type": "per_field",
"field": "continent",
"model": "gci_mentor",
"subsets": [("all", {}), ("referenced", {}), ("no-referenced", {})],
"filter": "property_filter",
"params": {
"ref_logic": "gci_task",
"ref_field": "mentors",
"program_field": "program",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("continent", "string", "Continent"),
("all_mentors", "number", "Mentors"),
("pro_mentors", "number", "Mentors with tasks"),
("nop_mentors", "number", "Mentors without tasks")],
"options": {
'Mentors Per Continent (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Mentors Per Continent (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Mentors Per Continent (with tasks)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Mentors Per Continent (without tasks)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"org_admin"),
"students_per_age": (
"Students Per Age",
{
"type": "per_field",
"field": "age",
"model": "gci_student",
"transformer": "remove-out-of-range",
"filter": "property_filter",
"params": {
"program_field": "scope",
"property_conditions": {
"status": ['active', 'inactive']
},
}
},
{
"description": [("age", "number", "Age"),
("number", "number", "Number")],
"options": {
'Organization Admins Per Age': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
}
STATISTICS_LIST = [k for k in STATISTIC_PROPERTIES]
NAMES_DICT = dict((k, v) for k, (v, _, _, _)
in STATISTIC_PROPERTIES.iteritems())
INSTRUCTIONS_DICT = dict((k, v) for k, (_, v, _, _)
in STATISTIC_PROPERTIES.iteritems())
CHARTS_DICT = dict((k, v) for k, (_, _, v, _)
in STATISTIC_PROPERTIES.iteritems())
ACCESS_DICT = dict((k, v) for k, (_, _, _, v)
in STATISTIC_PROPERTIES.iteritems())
def _getCommonProperties():
"""Returns properties that are common for all statistic entities.
"""
program = program_logic.getFromKeyName(program_keyname)
properties = {
'access_for_other_programs': 'invisible',
'scope': program,
'scope_path': program_keyname,
}
return properties
def _getSpecificProperties(link_id):
"""Returns properties that are specific to a particular statistic.
"""
properties = {
'link_id': link_id,
'name': NAMES_DICT[link_id],
'chart_json': simplejson.dumps(CHARTS_DICT[link_id]),
'instructions_json': simplejson.dumps(INSTRUCTIONS_DICT[link_id]),
'read_access': ACCESS_DICT[link_id]
}
return properties
def _seedStatistic(properties):
"""Saves a new statistic entity, described by properties, in data store.
"""
entity = statistic_logic.updateOrCreateFromFields(properties, silent=True)
if entity:
print SUCCESS_MSG_FMT % properties['link_id']
else:
print FALIURE_MSG_FMT % properties['link_id']
def exit():
"""Terminates the script.
"""
sys.exit(0)
def seedOne(link_id):
"""Seeds a single statistic to the data store.
Args:
link_id: link_id of the statistic that should be added.
"""
if link_id not in STATISTICS_LIST:
print DOES_NOT_EXISTS_MSG_FMT % link_id
else:
properties = _getCommonProperties()
new_properties = _getSpecificProperties(link_id)
properties.update(new_properties)
_seedStatistic(properties)
def seedAll():
"""Seeds all available statistics to the data store.
"""
properties = _getCommonProperties()
for statistic in STATISTICS_LIST:
new_properties = _getSpecificProperties(statistic)
properties.update(new_properties)
_seedStatistic(properties)
def setProgram(keyname):
"""Sets program key name.
"""
program_keyname = keyname
def main(args):
context = {
'exit': exit,
'seed_all': seedAll,
'seed_one': seedOne,
'statistics_list': STATISTICS_LIST,
'set_program': setProgram,
}
interactive.remote(args, context)
program_keyname = 'melange/gcirunthrough'
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: %s app_id [host]" % (sys.argv[0],)
sys.exit(1)
main(sys.argv[1:])
| apache-2.0 | -4,528,827,782,894,037,000 | 25.3125 | 76 | 0.586139 | false | 3.723725 | false | false | false |
shuque/pydig | pydiglib/rr_svcb.py | 1 | 3490 | """
SVCB and HTTPS RR Types class.
"""
import socket
import struct
from .name import name_from_wire_message
# SVCB (Service Binding RR) Parameter Types
SVCB_PARAM = {
0: "mandatory",
1: "alpn",
2: "no-default-alpn",
3: "port",
4: "ipv4hint",
5: "echconfig",
6: "ipv6hint",
}
class RdataSVCB:
"""SVCB RR RDATA Class"""
def __init__(self, pkt, offset, rdlen):
self.pkt = pkt
self.offset = offset
self.end_rdata = offset + rdlen
self.rdata = pkt[offset:self.end_rdata]
self.priority = None
self.targetname = None
self.params = [] # list(key=value strings)
self.decode()
def decode(self):
self.priority, = struct.unpack("!H", self.rdata[:2])
d, self.offset = name_from_wire_message(self.pkt, self.offset+2)
self.targetname = d.text()
self.decode_params(self.pkt[self.offset:self.end_rdata])
def decode_params(self, params_wire):
lastkey = None
while params_wire:
pkey, plen = struct.unpack('!HH', params_wire[:4])
pdata = params_wire[4:4+plen]
pdata_text = None
if lastkey is not None:
if not pkey > lastkey:
print("ERROR: HTTPS RR keys are not in ascending order")
else:
lastkey = pkey
if pkey in SVCB_PARAM:
pkey_text = SVCB_PARAM[pkey]
else:
pkey_text = "key{:d}".format(pkey)
if pkey == 0: ## mandatory
keylist = []
while pdata:
key = struct.unpack("!H", pdata[:2])
keylist.append(str(key))
pdata = pdata[2:]
pdata_text = ','.join(keylist)
elif pkey == 1: ## alpn
alpn_list = []
while pdata:
alpn_len = pdata[0]
alpn = pdata[1:1+alpn_len].decode()
alpn_list.append(alpn)
pdata = pdata[1+alpn_len:]
pdata_text = ','.join(alpn_list)
elif pkey == 3: ## port
port = struct.unpack("!H", pdata[:2])
pdata_text = str(port)
elif pkey == 4: ## ipv4hint
ip4list = []
while pdata:
ip4 = socket.inet_ntop(socket.AF_INET, pdata[:4])
ip4list.append(ip4)
pdata = pdata[4:]
pdata_text = ','.join(ip4list)
elif pkey == 6: ## ipv6hint
ip6list = []
while pdata:
ip6 = socket.inet_ntop(socket.AF_INET6, pdata[:16])
ip6list.append(ip6)
pdata = pdata[16:]
pdata_text = ','.join(ip6list)
else:
pdata_text = pdata.hex()
if not pdata_text:
self.params.append(pkey_text)
else:
self.params.append(("{}={}".format(pkey_text, pdata_text)))
params_wire = params_wire[4+plen:]
def __str__(self):
return "%s %s %s" % (self.priority,
self.targetname,
" ".join(self.params))
| gpl-2.0 | -2,864,752,358,938,569,000 | 32.883495 | 77 | 0.439542 | false | 3.961407 | false | false | false |
hbs/python-oauth2 | oauth2/__init__.py | 1 | 24945 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
self.method = method
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(self, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(v)
url = (base_url.scheme, base_url.netloc, base_url.path, base_url.params,
urllib.urlencode(query, True), base_url.fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if hasattr(value, '__iter__'):
items.extend((key, item) for item in value)
else:
items.append((key, value))
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
items.extend(self._split_url_string(query).items())
encoded_str = urllib.urlencode(sorted(items))
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
is_multipart = method == 'POST' and headers.get('Content-Type',
DEFAULT_CONTENT_TYPE) != DEFAULT_CONTENT_TYPE
if body and method == "POST" and not is_multipart:
parameters = dict(parse_qsl(body))
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_CONTENT_TYPE)
if is_multipart:
headers.update(req.to_header())
else:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
from hashlib import sha1 as sha
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | -6,773,699,701,043,492,000 | 32.938776 | 134 | 0.616837 | false | 4.37862 | false | false | false |
GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/rules/races/race011_troll.py | 1 | 1219 | from toee import *
import race_defs
###################################################
def GetCategory():
return "Core 3.5 Ed Classes"
print "Registering race: Troll"
raceEnum = race_troll
raceSpec = race_defs.RaceSpec()
raceSpec.modifier_name = "Troll" # Python modifier to be applied
raceSpec.flags = 2
raceSpec.hit_dice = dice_new("6d8")
raceSpec.level_modifier = 5 # basic level modifier
raceSpec.stat_modifiers = [12, 4, 12, -4, -2, -4] # str, dex, con, int, wis, cha
raceSpec.natural_armor = 5
raceSpec.proto_id = 13016
raceSpec.help_topic = "TAG_TROLL"
raceSpec.height_male = [100, 120]
raceSpec.height_female = [100, 120]
raceSpec.weight_male = [870, 1210]
raceSpec.weight_female = [800, 1200]
raceSpec.feats = [feat_simple_weapon_proficiency, feat_martial_weapon_proficiency_all]
raceSpec.material_offset = 0 # offset into rules/material_ext.mes file
###################################################
def RegisterRace():
raceSpec.register(raceEnum)
def GetFavoredClass(obj = OBJ_HANDLE_NULL):
return stat_level_fighter
def GetLevelModifier(obj = OBJ_HANDLE_NULL):
return 5 | mit | -6,833,448,202,129,764,000 | 32.888889 | 96 | 0.604594 | false | 3.207895 | false | false | false |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/util/modules.py | 1 | 1377 | #!/usr/bin/env python
"""Compiled modules may be out of date or missing"""
import os, sys
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Peter Maxwell"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Peter Maxwell"
__email__ = "[email protected]"
__status__ = "Production"
class ExpectedImportError(ImportError):
pass
def fail(msg):
print >>sys.stderr, msg
raise ExpectedImportError
def importVersionedModule(name, globals, min_version, alt_desc):
if os.environ.has_key('COGENT_PURE_PYTHON'):
fail('Not using compiled module "%s". Will use %s.' %
(name, alt_desc))
try:
m = __import__(name, globals)
except ImportError:
fail('Compiled module "%s" not found. Will use %s.' %
(name, alt_desc))
version = getattr(m, 'version_info', (0, 0))
desc = '.'.join(str(n) for n in version)
min_desc = '.'.join(str(n) for n in min_version)
max_desc = str(min_version[0])+'.x'
if version < min_version:
fail('Compiled module "%s" is too old as %s < %s. '
'Will use %s.' % (name, desc, min_desc, alt_desc))
if version[0] > min_version[0]:
fail('Compiled module "%s" is too new as %s > %s. '
'Will use %s.' % (name, desc, max_desc, alt_desc))
return m
| mit | 7,593,111,555,751,964,000 | 32.585366 | 66 | 0.580973 | false | 3.232394 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.