repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Venturi/oldcms | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 1004 | 9544 | # The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| apache-2.0 |
rajmarndi/python-beaver | docs/conf.py | 1 | 8186 | # -*- coding: utf-8 -*-
#
# beaver documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 21 11:21:22 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'beaver'
copyright = u'2013, Jose Diaz-Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '36.1.0'
# The full version, including alpha/beta/rc tags.
release = '36.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'beaverdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'beaver.tex', u'beaver Documentation',
u'Jose Diaz-Gonzalez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'beaver', u'beaver Documentation',
[u'Jose Diaz-Gonzalez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'beaver', u'beaver Documentation',
u'Jose Diaz-Gonzalez', 'beaver', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
phaller0513/aima-python | envgui.py | 22 | 11683 | # ______________________________________________________________________________
# GUI - Graphical User Interface for Environments
# If you do not have Tkinter installed, either get a new installation of Python
# (Tkinter is standard in all new releases), or delete the rest of this file
# and muddle through without a GUI.
#
# Excerpted from:
# http://web.media.mit.edu/~havasi/MAS.S60/code/lemmatizer_learning/aima/agents.py
# Catherine Havasi
# 2012-04-10
#
# Revised:
# William H. Hooper
# 2016-07-13
# Python 2 -> 3
import tkinter as tk # pip install tkinter
from tkinter import ttk
from PIL import ImageTk, Image # pip install pillow
import os
class EnvGUI(tk.Tk, object):
def __init__(self, env, title='AIMA GUI', cellsize=200, n=10):
# Initialize window
super(EnvGUI, self).__init__()
self.title(title)
# Create components
w = env.width
h = env.height
self.canvas = EnvCanvas(self, env, cellsize, w, h)
toolbar = EnvToolbar(self, env, self.canvas)
for w in [self.canvas, toolbar]:
w.pack(side="bottom", fill="x", padx="3", pady="3")
def getCanvas(self):
return self.canvas
class EnvToolbar(tk.Frame, object):
def __init__(self, parent, env, canvas):
super(EnvToolbar, self).__init__(parent, relief='raised', bd=2)
# Initialize instance variables
self.env = env
self.canvas = canvas
self.running = False
self.speed = 1.0
# Create buttons and other controls
for txt, cmd in [#('Step >', self.env.step),
('Step >', self.step),
('Run >>', self.run),
('Stop [ ]', self.stop),
('List things', self.list_things),
('List agents', self.list_agents)]:
ttk.Button(self, text=txt, command=cmd).pack(side='left')
tk.Label(self, text='Speed').pack(side='left')
scale = tk.Scale(self, orient='h',
from_=(1.0), to=100.0, resolution=1.0,
command=self.set_speed)
scale.set(self.speed)
scale.pack(side='left')
def step(self):
self.env.step()
self.canvas.update()
def run(self):
print('run')
self.running = True
self.background_run()
def stop(self):
print('stop')
self.running = False
def background_run(self):
if self.running:
self.step()
# ms = int(1000 * max(float(self.speed), 0.5))
# ms = max(int(1000 * float(self.delay)), 1)
delay_sec = 10.0 / max(self.speed, 1.0) # avoid division by zero
ms = int(1000.0 * delay_sec) # seconds to milliseconds
self.after(ms, self.background_run)
def list_things(self):
print("Things in the environment:")
for obj in self.env.things:
print("%s at %s" % (obj, obj.location))
def list_agents(self):
print("Agents in the environment:")
for agt in self.env.agents:
print("%s at %s" % (agt, agt.location))
def set_speed(self, speed):
self.speed = float(speed)
class Empty:
pass
class EnvCanvas(tk.Canvas, object):
def __init__(self, parent, env, cellwidth, w, h):
self.env = env
cellheight = cellwidth
canvwidth = cellwidth * w # (cellwidth + 1 ) * n
canvheight = cellheight * h # (cellwidth + 1) * n
super(EnvCanvas, self).__init__(parent, background="white",)
# Initialize instance variables
self.env = env
self.cellwidth = cellwidth
self.cellheight = cellheight
self.w = w
self.h = h
# print(
# "cellwidth, canvwidth, camvheight = %d, %d, %d" % \
# (self.cellwidth, canvwidth, canvheight))
# Set up image dictionary.
# Ugly hack: we need to keep a reference to each ImageTk.PhotoImage,
# or it will be garbage collected. This dictionary maps image files
# that have been opened to their PhotoImage objects
self.fnMap = { Empty: 'images/default.png'}
self.images = {}
cwd = os.getcwd()
default = self.get_image(self.fnMap[Empty])
self.cells = [[0 for x in range(w)] for y in range(h)]
for x in range(w):
for y in range(h):
cell = ttk.Frame(self)
contents = ttk.Label(cell, image=default)
contents.pack(side="bottom", fill="both", expand="yes")
cell.grid(row=y, column=x)
self.cells[y][x] = cell
# Bind canvas events.
# self.bind('<Button-1>', self.user_left) ## What should this do?
# self.bind('<Button-2>', self.user_edit_objects)
# self.bind('<Button-3>', self.user_add_object)
self.pack()
def mapImageNames(self, fnMap):
self.fnMap.update(fnMap)
def get_image(self, concat):
"""concat = 'filename1[+filename2]'
Try to find the image in the images dictionary.
If it's not there: open each file, create it,
and paste it over the composite.
When all names are processed, stick the composite
image in the dictionary, and return the image in
a form usable by the canvas."""
if concat in self.images:
tk_image = self.images[concat]
else:
filenames = concat.split('+')
fn0 = filenames[0]
pil_image = Image.open(fn0)
for fni in filenames[1:]:
pi = Image.open(fni)
#tki = ImageTk.PhotoImage(pi)
pil_image.paste(pi, mask=pi)
pil_image = pil_image.resize((self.cellwidth, self.cellheight),
Image.ANTIALIAS)
tk_image = ImageTk.PhotoImage(pil_image)
self.images[concat] = tk_image
return tk_image
def update(self):
'''Create a tiled image of the XY Environment,
based on the things in each cell.'''
env = self.env
for x in range(self.w):
for y in range(self.h):
cell = self.cells[y][x]
filenames = ''
tList = env.list_things_at((x, y))
for thing in tList:
tclass = thing.__class__
tname = self.fnMap[tclass]
if filenames == '':
filenames = tname
elif not tname in filenames:
filenames += '+' + tname
if filenames == '':
filenames = self.fnMap[Empty]
bg = self.get_image(filenames)
# contents = ttk.Label(cell, image=bg)
contents = cell.winfo_children()[0]
contents.config(image=bg)
contents.pack(side="bottom", fill="both", expand="yes")
def user_left(self, event):
print('left at %d, %d' % self.event_cell(event))
def user_edit_objects(self, event):
"""Choose an object within radius and edit its fields."""
pass
def user_add_object(self, event):
"""Pops up a menu of Object classes; you choose the
one you want to put in this square."""
cell = self.event_cell(event)
xy = self.cell_topleft_xy(cell)
menu = tk.Menu(self, title='Edit (%d, %d)' % cell)
# Generalize object classes available,
# and why is self.run the command?
# for (txt, cmd) in [('Wumpus', self.run), ('Pit', self.run)]:
# menu.add_command(label=txt, command=cmd)
obj_classes = self.env.object_classes()
def class_cmd(oclass):
def cmd():
obj = oclass()
self.env.add_object(obj, cell)
# what about drawing it on the canvas?
print(
"Drawing object %s at %s %s" % (obj, cell, xy))
tk_image = self.get_image(oclass.image_file)
self.canvas.create_image(xy, anchor="nw", image=tk_image)
return cmd
for oclass in obj_classes:
menu.add_command(label=oclass.__name__, command=class_cmd(oclass))
menu.tk_popup(event.x + self.winfo_rootx(),
event.y + self.winfo_rooty())
def event_cell(self, event):
return self.xy_cell(event.x, event.y)
def xy_cell(self, x, y):
"""Given an (x, y) on the canvas, return the row and column
of the cell containing it."""
w = self.cellwidth
return x / w, y / w
def cell_topleft_xy(self, row, column):
"""Given a (row, column) tuple, return the (x, y) coordinates
of the cell(row, column)'s top left corner."""
w = self.cellwidth
return w * row, w * column
# a Text User Interface for the Agent program
class EnvTUI(object):
def __init__(self, env, title='AIMA GUI', cellsize=200, n=10):
# Initialize window
# Create components
w = env.width
h = env.height
self.env = env
self.grid = [['.' for x in range(w)] for y in range(h)]
self.fnMap = { Empty: '.'}
def mapImageNames(self, fnMap):
self.fnMap.update(fnMap)
def displayString(self):
display = ''
first = True
for y in range(len(self.grid)):
if not first:
display += '\n'
first = False
for x in range(len(self.grid[y])):
tList = self.env.list_things_at((x, y))
tname = self.fnMap[Empty]
for thing in tList:
tclass = thing.__class__
tname = self.fnMap[tclass]
display += tname
return display
def help(self):
for line in [
'Commands are:',
' h: print this help message',
' s n: advance n steps',
' t: list things',
' a: list agents',
' an empty string advances n steps',
]:
print(line)
def list_things(self, MyClass=object):
print(MyClass.__name__ + 's in the environment:')
for obj in self.env.things:
if isinstance(obj, MyClass):
print("%s at %s" % (obj, obj.location))
def list_agents(self):
print("Agents in the environment:")
for agt in self.env.agents:
print("%s at %s" % (agt, agt.location))
def step(self, n=1):
for s in range(n):
self.env.step()
print(str(n) + ' step(s) later:')
print(self.displayString())
def mainloop(self):
print(self.displayString())
reply = ''
n = 1
while reply != 'q':
reply = input('Command (h for help): ').strip()
if reply == '':
self.step(n)
continue
if reply == 'h':
self.help()
continue
if reply == 'q':
continue
if reply == 'a':
self.list_agents()
continue
if reply == 't':
self.list_things()
continue
if reply[0] == 's':
command = reply.split()
try:
arg = command[1]
except:
arg = str(n)
try:
n = int(arg)
self.step(n)
except:
print('"' + arg + '" is not an integer')
continue
| mit |
nathanielvarona/airflow | airflow/migrations/versions/338e90f54d61_more_logging_into_task_isntance.py | 8 | 1453 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""More logging into task_instance
Revision ID: 338e90f54d61
Revises: 13eb55f81627
Create Date: 2015-08-25 06:09:20.460147
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '338e90f54d61'
down_revision = '13eb55f81627'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('task_instance', sa.Column('operator', sa.String(length=1000), nullable=True))
op.add_column('task_instance', sa.Column('queued_dttm', sa.DateTime(), nullable=True))
def downgrade(): # noqa: D103
op.drop_column('task_instance', 'queued_dttm')
op.drop_column('task_instance', 'operator')
| apache-2.0 |
georgtroska/root | interpreter/llvm/src/utils/lit/lit/run.py | 23 | 9442 | import os
import threading
import time
import traceback
try:
import Queue as queue
except ImportError:
import queue
try:
import win32api
except ImportError:
win32api = None
try:
import multiprocessing
except ImportError:
multiprocessing = None
import lit.Test
###
# Test Execution Implementation
class LockedValue(object):
def __init__(self, value):
self.lock = threading.Lock()
self._value = value
def _get_value(self):
self.lock.acquire()
try:
return self._value
finally:
self.lock.release()
def _set_value(self, value):
self.lock.acquire()
try:
self._value = value
finally:
self.lock.release()
value = property(_get_value, _set_value)
class TestProvider(object):
def __init__(self, queue_impl, canceled_flag):
self.canceled_flag = canceled_flag
# Create a shared queue to provide the test indices.
self.queue = queue_impl()
def queue_tests(self, tests, num_jobs):
for i in range(len(tests)):
self.queue.put(i)
for i in range(num_jobs):
self.queue.put(None)
def cancel(self):
self.canceled_flag.value = 1
def get(self):
# Check if we are canceled.
if self.canceled_flag.value:
return None
# Otherwise take the next test.
return self.queue.get()
class Tester(object):
def __init__(self, run_instance, provider, consumer):
self.run_instance = run_instance
self.provider = provider
self.consumer = consumer
def run(self):
while True:
item = self.provider.get()
if item is None:
break
self.run_test(item)
self.consumer.task_finished()
def run_test(self, test_index):
test = self.run_instance.tests[test_index]
try:
self.run_instance.execute_test(test)
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print('\nCtrl-C detected, goodbye.')
os.kill(0,9)
self.consumer.update(test_index, test)
class ThreadResultsConsumer(object):
def __init__(self, display):
self.display = display
self.lock = threading.Lock()
def update(self, test_index, test):
self.lock.acquire()
try:
self.display.update(test)
finally:
self.lock.release()
def task_finished(self):
pass
def handle_results(self):
pass
class MultiprocessResultsConsumer(object):
def __init__(self, run, display, num_jobs):
self.run = run
self.display = display
self.num_jobs = num_jobs
self.queue = multiprocessing.Queue()
def update(self, test_index, test):
# This method is called in the child processes, and communicates the
# results to the actual display implementation via an output queue.
self.queue.put((test_index, test.result))
def task_finished(self):
# This method is called in the child processes, and communicates that
# individual tasks are complete.
self.queue.put(None)
def handle_results(self):
# This method is called in the parent, and consumes the results from the
# output queue and dispatches to the actual display. The method will
# complete after each of num_jobs tasks has signalled completion.
completed = 0
while completed != self.num_jobs:
# Wait for a result item.
item = self.queue.get()
if item is None:
completed += 1
continue
# Update the test result in the parent process.
index,result = item
test = self.run.tests[index]
test.result = result
self.display.update(test)
def run_one_tester(run, provider, display):
tester = Tester(run, provider, display)
tester.run()
###
class Run(object):
"""
This class represents a concrete, configured testing run.
"""
def __init__(self, lit_config, tests):
self.lit_config = lit_config
self.tests = tests
def execute_test(self, test):
result = None
start_time = time.time()
try:
result = test.config.test_format.execute(test, self.lit_config)
# Support deprecated result from execute() which returned the result
# code and additional output as a tuple.
if isinstance(result, tuple):
code, output = result
result = lit.Test.Result(code, output)
elif not isinstance(result, lit.Test.Result):
raise ValueError("unexpected result from test execution")
except KeyboardInterrupt:
raise
except:
if self.lit_config.debug:
raise
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
result = lit.Test.Result(lit.Test.UNRESOLVED, output)
result.elapsed = time.time() - start_time
test.setResult(result)
def execute_tests(self, display, jobs, max_time=None,
use_processes=False):
"""
execute_tests(display, jobs, [max_time])
Execute each of the tests in the run, using up to jobs number of
parallel tasks, and inform the display of each individual result. The
provided tests should be a subset of the tests available in this run
object.
If max_time is non-None, it should be a time in seconds after which to
stop executing tests.
The display object will have its update method called with each test as
it is completed. The calls are guaranteed to be locked with respect to
one another, but are *not* guaranteed to be called on the same thread as
this method was invoked on.
Upon completion, each test in the run will have its result
computed. Tests which were not actually executed (for any reason) will
be given an UNRESOLVED result.
"""
# Choose the appropriate parallel execution implementation.
consumer = None
if jobs != 1 and use_processes and multiprocessing:
try:
task_impl = multiprocessing.Process
queue_impl = multiprocessing.Queue
canceled_flag = multiprocessing.Value('i', 0)
consumer = MultiprocessResultsConsumer(self, display, jobs)
except:
# multiprocessing fails to initialize with certain OpenBSD and
# FreeBSD Python versions: http://bugs.python.org/issue3770
# Unfortunately the error raised also varies by platform.
self.lit_config.note('failed to initialize multiprocessing')
consumer = None
if not consumer:
task_impl = threading.Thread
queue_impl = queue.Queue
canceled_flag = LockedValue(0)
consumer = ThreadResultsConsumer(display)
# Create the test provider.
provider = TestProvider(queue_impl, canceled_flag)
# Queue the tests outside the main thread because we can't guarantee
# that we can put() all the tests without blocking:
# https://docs.python.org/2/library/multiprocessing.html
# e.g: On Mac OS X, we will hang if we put 2^15 elements in the queue
# without taking any out.
queuer = task_impl(target=provider.queue_tests, args=(self.tests, jobs))
queuer.start()
# Install a console-control signal handler on Windows.
if win32api is not None:
def console_ctrl_handler(type):
provider.cancel()
return True
win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
# Install a timeout handler, if requested.
if max_time is not None:
def timeout_handler():
provider.cancel()
timeout_timer = threading.Timer(max_time, timeout_handler)
timeout_timer.start()
# If not using multiple tasks, just run the tests directly.
if jobs == 1:
run_one_tester(self, provider, consumer)
else:
# Otherwise, execute the tests in parallel
self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
queuer.join()
# Cancel the timeout handler.
if max_time is not None:
timeout_timer.cancel()
# Update results for any tests which weren't run.
for test in self.tests:
if test.result is None:
test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs):
# Start all of the tasks.
tasks = [task_impl(target=run_one_tester,
args=(self, provider, consumer))
for i in range(jobs)]
for t in tasks:
t.start()
# Allow the consumer to handle results, if necessary.
consumer.handle_results()
# Wait for all the tasks to complete.
for t in tasks:
t.join()
| lgpl-2.1 |
keiserlab/e3fp | e3fp/fingerprint/array_ops.py | 1 | 9285 | """Various array operations.
Author: Seth Axen
E-mail: [email protected]
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
QUATERNION_DTYPE = np.float64
X_AXIS, Y_AXIS, Z_AXIS = np.identity(3, dtype=np.float64)
EPS = 1e-12 # epsilon, a number close to 0
# Vector Algebra Methods
def as_unit(v, axis=1):
"""Return array of unit vectors parallel to vectors in `v`.
Parameters
----------
v : ndarray of float
axis : int, optional
Axis along which to normalize length.
Returns
-------
ndarray of float : Unit vector of `v`, i.e. `v` divided by its
magnitude along `axis`.
"""
u = np.array(v, dtype=np.float64, copy=True)
if u.ndim == 1:
sqmag = u.dot(u)
if sqmag >= EPS:
u /= sqmag ** 0.5
else:
if axis == 1:
sqmag = np.einsum("...ij,...ij->...i", u, u)
else:
sqmag = np.einsum("...ij,...ij->...j", u, u)
sqmag[sqmag < EPS] = 1.0
u /= np.expand_dims(np.sqrt(sqmag), axis)
return u
def make_distance_matrix(coords):
"""Build pairwise distance matrix from coordinates.
Parameters
----------
coords : ndarray of float
an Mx3 array of cartesian coordinates.
Returns
-------
ndarray of float : square symmetrical distance matrix
"""
return squareform(pdist(coords))
def make_transform_matrix(center, y=None, z=None):
"""Make 4x4 homogenous transformation matrix.
Given Nx4 array A where A[:, 4] = 1., the transform matrix M should be
used with dot(M, A.T).T. Order of operations is 1. translation, 2. align
`y` x `z` plane to yz-plane 3. align `y` to y-axis.
Parameters
----------
center : 1x3 array of float
Coordinate that should be centered after transformation.
y : None or 1x3 array of float
Vector that should lie on the y-axis after transformation
z : None or 1x3 array of float
Vector that after transformation should lie on yz-plane in direction
of z-axis.
Returns
-------
4x4 array of float
4x4 homogenous transformation matrix.
"""
translate = np.identity(4, dtype=np.float64)
translate[:3, 3] = -np.asarray(center, dtype=np.float64)
if y is not None:
y = np.atleast_2d(y)
if z is None:
rotate = np.identity(4, dtype=np.float64)
rotate[:3, :3] = make_rotation_matrix(y, Y_AXIS)
else:
z = np.atleast_2d(z)
rotate_norm = np.identity(4, dtype=np.float64)
x_unit = as_unit(np.cross(y, z))
rotate_norm[:3, :3] = make_rotation_matrix(x_unit, X_AXIS)
new_y = np.dot(rotate_norm[:3, :3], y.flatten())
rotate_y = np.identity(4, dtype=np.float64)
rotate_y[:3, :3] = make_rotation_matrix(new_y.flatten(), Y_AXIS)
rotate = np.dot(rotate_y, rotate_norm)
transform = np.dot(rotate, translate)
else:
transform = translate
return transform
def make_rotation_matrix(v0, v1):
"""Create 3x3 matrix of rotation from `v0` onto `v1`.
Should be used by dot(R, v0.T).T.
Parameters
----------
v0 : 1x3 array of float
Initial vector before alignment.
v1 : 1x3 array of float
Vector to which to align `v0`.
"""
v0 = as_unit(v0)
v1 = as_unit(v1)
u = np.cross(v0.ravel(), v1.ravel())
if np.all(u == 0.0):
return np.identity(3, dtype=np.float64)
sin_ang = u.dot(u) ** 0.5
u /= sin_ang
cos_ang = np.dot(v0, v1.T)
# fmt: off
ux = np.array([[ 0., -u[2], u[1]],
[ u[2], 0., -u[0]],
[-u[1], u[0], 0.]], dtype=np.float64)
# fmt: on
rot = (
cos_ang * np.identity(3, dtype=np.float64)
+ sin_ang * ux
+ (1 - cos_ang) * np.outer(u, u)
)
return rot
def transform_array(transform_matrix, a):
"""Pad an array with 1s, transform, and return with original dimensions.
Parameters
----------
transform_matrix : 4x4 array of float
4x4 homogenous transformation matrix
a : Nx3 array of float
Array of 3-D coordinates.
Returns
-------
Nx3 array of float : Transformed array
"""
return unpad_array(np.dot(transform_matrix, pad_array(a).T).T)
def pad_array(a, n=1.0, axis=1):
"""Return `a` with row of `n` appended to `axis`.
Parameters
----------
a : ndarray
Array to pad
n : float or int, optional
Value to pad `a` with
axis : int, optional
Axis of `a` to pad with `n`.
Returns
-------
ndarray
Padded array.
"""
if a.ndim == 1:
pad = np.ones(a.shape[0] + 1, dtype=a.dtype) * n
pad[: a.shape[0]] = a
else:
shape = list(a.shape)
shape[axis] += 1
pad = np.ones(shape, dtype=a.dtype)
pad[: a.shape[0], : a.shape[1]] = a
return pad
def unpad_array(a, axis=1):
"""Return `a` with row removed along `axis`.
Parameters
----------
a : ndarray
Array from which to remove row
axis : int, optional
Axis from which to remove row
Returns
-------
ndarray
Unpadded array.
"""
if a.ndim == 1:
return a[:-1]
else:
shape = list(a.shape)
shape[axis] -= 1
return a[: shape[0], : shape[1]]
def project_to_plane(vec_arr, norm):
"""Project array of vectors to plane with normal `norm`.
Parameters
----------
vec_arr : Nx3 array
Array of N 3D vectors.
norm : 1x3 array
Normal vector to plane.
Returns
-------
Nx3 array
Array of vectors projected onto plane.
"""
unit_norm = as_unit(norm).flatten()
mag_on_norm = np.dot(vec_arr, unit_norm)
if vec_arr.ndim == 1:
vec_on_norm = np.array(unit_norm, copy=True)
vec_on_norm *= mag_on_norm
else:
vec_on_norm = np.tile(unit_norm, (vec_arr.shape[0], 1))
vec_on_norm *= mag_on_norm[:, None]
return vec_arr - vec_on_norm
def calculate_angles(vec_arr, ref, ref_norm=None):
"""Calculate angles between vectors in `vec_arr` and `ref` vector.
If `ref_norm` is not provided, angle ranges between 0 and pi. If it is
provided, angle ranges between 0 and 2pi. Note that if `ref_norm` is
orthogonal to `vec_arr` and `ref`, then the angle is rotation around the
axis, but if a non-orthogonal axis is provided, this may not be the case.
Parameters
----------
vec_arr : Nx3 array of float
Array of N 3D vectors.
ref : 1x3 array of float
Reference vector
ref_norm : 1x3 array of float
Normal vector.
Returns
-------
1-D array
Array of N angles
"""
unit_vec_arr = as_unit(vec_arr)
unit_ref = as_unit(ref).flatten()
ang = np.arccos(np.clip(np.dot(unit_vec_arr, unit_ref), -1.0, 1.0))
# handle cases where a vector is the origin
ang[np.all(unit_vec_arr == np.zeros(3), axis=1)] = 0.0
if ref_norm is not None:
sign = np.sign(
np.dot(ref_norm, np.cross(unit_vec_arr, unit_ref).T)
).flatten()
sign[sign == 0] = 1
ang = rotate_angles(sign * ang, 2 * np.pi)
return ang
def rotate_angles(angles, amount):
"""Rotate angles by `amount`, keeping in 0 to 2pi range.
Parameters
----------
angles : 1-D array of float
Angles in radians
amount : float
Amount to rotate angles by
Returns
-------
1-D array of float : Rotated angles
"""
return (angles + amount) % (2 * np.pi)
def quaternion_to_transform_matrix(quaternion, translation=np.zeros(3)):
"""Convert quaternion to homogenous 4x4 transform matrix.
Parameters
----------
quaternion : 4x1 array of float
Quaternion describing rotation after translation.
translation : 3x1 array of float, optional
Translation to be performed before rotation.
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.linalg.norm(q)
if n < 1e-12:
return np.identity(4, dtype=np.float64)
q /= n
q = 2 * np.outer(q, q)
# fmt: off
transform_mat = np.array(
[[1.-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.],
[ q[1, 2]+q[3, 0], 1.-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.-q[1, 1]-q[2, 2], 0.],
[ 0., 0., 0., 1.]],
dtype=np.float64
)
# fmt: on
transform_mat[:3, 3] = translation
return transform_mat
def transform_matrix_to_quaternion(transform_matrix, dtype=QUATERNION_DTYPE):
"""Convert homogenous 4x4 transform matrix to quaternion.
Parameters
----------
transform_matrix : 4x4 array of float
Homogenous transformation matrix.
dtype : numpy dtype, optional
Datatype for returned quaternion.
"""
T = np.array(transform_matrix, dtype=np.float64)
R = T[:3, :3]
q = np.zeros(4, dtype=dtype)
q[0] = np.sqrt(1.0 + R.trace()) / 2.0
q[1] = R[2, 1] - R[1, 2]
q[2] = R[0, 2] - R[2, 0]
q[3] = R[1, 0] - R[0, 1]
q[1:4] /= 4.0 * q[0]
return q
| lgpl-3.0 |
viru/ansible-modules-core | cloud/openstack/_quantum_network.py | 127 | 10265 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
deprecated: Deprecated in 2.0. Use os_network instead
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
tenant_name:
description:
- The name of the tenant for whom the network is created
required: false
default: None
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be assigned to the nework
required: true
default: None
provider_network_type:
description:
- The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified.
required: false
default: None
provider_physical_network:
description:
- The physical network which would realize the virtual network for flat and vlan networks.
required: false
default: None
provider_segmentation_id:
description:
- The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id
required: false
default: None
router_external:
description:
- If 'yes', specifies that the virtual network is a external network (public).
required: false
default: false
shared:
description:
- Whether this network is shared or not
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Create a GRE backed Quantum network with tunnel id 1 for tenant1
- quantum_network: name=t1network tenant_name=tenant1 state=present
provider_network_type=gre provider_segmentation_id=1
login_username=admin login_password=admin login_tenant_name=admin
# Create an external network
- quantum_network: name=external_network state=present
provider_network_type=local router_external=yes
login_username=admin login_password=admin login_tenant_name=admin
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s " %e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = " Error in connecting to neutron: %s " %e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
tenant_name = module.params['login_tenant_name']
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _create_network(module, neutron):
neutron.format = 'json'
network = {
'name': module.params.get('name'),
'tenant_id': _os_tenant_id,
'provider:network_type': module.params.get('provider_network_type'),
'provider:physical_network': module.params.get('provider_physical_network'),
'provider:segmentation_id': module.params.get('provider_segmentation_id'),
'router:external': module.params.get('router_external'),
'shared': module.params.get('shared'),
'admin_state_up': module.params.get('admin_state_up'),
}
if module.params['provider_network_type'] == 'local':
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'flat':
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'gre':
network.pop('provider:physical_network', None)
if module.params['provider_network_type'] is None:
network.pop('provider:network_type', None)
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
try:
net = neutron.create_network({'network':network})
except Exception, e:
module.fail_json(msg = "Error in creating network: %s" % e.message)
return net['network']['id']
def _delete_network(module, net_id, neutron):
try:
id = neutron.delete_network(net_id)
except Exception, e:
module.fail_json(msg = "Error in deleting the network: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']),
provider_physical_network = dict(default=None),
provider_segmentation_id = dict(default=None),
router_external = dict(default=False, type='bool'),
shared = dict(default=False, type='bool'),
admin_state_up = dict(default=True, type='bool'),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
if module.params['provider_network_type'] in ['vlan', 'gre']:
if not module.params['provider_segmentation_id']:
module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.")
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
network_id = _get_net_id(neutron, module)
if not network_id:
network_id = _create_network(module, neutron)
module.exit_json(changed = True, result = "Created", id = network_id)
else:
module.exit_json(changed = False, result = "Success", id = network_id)
if module.params['state'] == 'absent':
network_id = _get_net_id(neutron, module)
if not network_id:
module.exit_json(changed = False, result = "Success")
else:
_delete_network(module, network_id, neutron)
module.exit_json(changed = True, result = "Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sawenzel/root | main/python/cmdLineUtils.py | 4 | 50993 | #!/usr/bin/env @python@
# ROOT command line tools module: cmdLineUtils
# Author: Julien Ripoche
# Mail: [email protected]
# Date: 20/08/15
"""Contain utils for ROOT command line tools"""
##########
# Stream redirect functions
# The original code of the these functions can be found here :
# http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
# Thanks J.F. Sebastian !!
from contextlib import contextmanager
import os
import sys
def fileno(file_or_fd):
"""
Look for 'fileno' attribute.
"""
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def streamRedirected(source=sys.stdout, destination=os.devnull):
"""
Redirect the output from source to destination.
"""
stdout_fd = fileno(source)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
source.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(destination), stdout_fd) # $ exec >&destination
except ValueError: # filename
with open(destination, 'wb') as destination_file:
os.dup2(destination_file.fileno(), stdout_fd) # $ exec > destination
try:
yield source # allow code to be run with the redirected stream
finally:
# restore source to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
source.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def stdoutRedirected():
"""
Redirect the output from sys.stdout to os.devnull.
"""
return streamRedirected(sys.stdout, os.devnull)
def stderrRedirected():
"""
Redirect the output from sys.stderr to os.devnull.
"""
return streamRedirected(sys.stderr, os.devnull)
# The end of streamRedirected functions
##########
##########
# Imports
##
# redirect output (escape characters during ROOT importation...)
# The gymnastic with sys argv is necessary to workaround for ROOT-7577
argvTmp = sys.argv[:]
sys.argv = []
with stdoutRedirected():
import ROOT
ROOT.gROOT.GetVersion()
sys.argv = argvTmp
import argparse
import glob
import fnmatch
import logging
LOG_FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT)
# The end of imports
##########
##########
# Different functions to get a parser of arguments and options
def _getParser(theHelp, theEpilog):
"""
Get a commandline parser with the defaults of the commandline utils.
"""
return argparse.ArgumentParser(description=theHelp,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = theEpilog)
def getParserSingleFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
source file or not.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='?', help="Input file")
return parser
def getParserFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
list of source files.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='+', help="Input file")
return parser
def getParserSourceDest(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils,
a list of source files and a destination file.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("SOURCE", nargs='+', help="Source file")
parser.add_argument("DEST", help="Destination file")
return parser
# The end of get parser functions
##########
##########
# Several utils
@contextmanager
def _setIgnoreLevel(level):
originalLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = level
yield
ROOT.gErrorIgnoreLevel = originalLevel
def changeDirectory(rootFile,pathSplit):
"""
Change the current directory (ROOT.gDirectory) by the corresponding (rootFile,pathSplit)
"""
rootFile.cd()
for directoryName in pathSplit:
theDir = ROOT.gDirectory.Get(directoryName)
if not theDir:
logging.warning("Directory %s does not exist." %directoryName)
return 1
else:
theDir.cd()
return 0
def createDirectory(rootFile,pathSplit):
"""
Add a directory named 'pathSplit[-1]' in (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0: ROOT.gDirectory.mkdir(pathSplit[-1])
return retcode
def getFromDirectory(objName):
"""
Get the object objName from the current directory
"""
return ROOT.gDirectory.Get(objName)
def isExisting(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), exits
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetListOfKeys().Contains(pathSplit[-1])
def isDirectoryKey(key):
"""
Return True if the object, corresponding to the key, inherits from TDirectory
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TDirectory.Class())
def isTreeKey(key):
"""
Return True if the object, corresponding to the key, inherits from TTree
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TTree.Class())
def getKey(rootFile,pathSplit):
"""
Get the key of the corresponding object (rootFile,pathSplit)
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetKey(pathSplit[-1])
def isDirectory(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TDirectory
"""
if pathSplit == []: return True # the object is the rootFile itself
else: return isDirectoryKey(getKey(rootFile,pathSplit))
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit))
def getKeyList(rootFile,pathSplit):
"""
Get the list of keys of the directory (rootFile,pathSplit),
if (rootFile,pathSplit) is not a directory then get the key in a list
"""
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
return ROOT.gDirectory.GetListOfKeys()
else: return [getKey(rootFile,pathSplit)]
def keyListSort(keyList):
"""
Sort list of keys by their names ignoring the case
"""
keyList.sort(key=lambda x: x.GetName().lower())
def tupleListSort(tupleList):
"""
Sort list of tuples by their first elements ignoring the case
"""
tupleList.sort(key=lambda x: x[0].lower())
def dirListSort(dirList):
"""
Sort list of directories by their names ignoring the case
"""
dirList.sort(key=lambda x: [n.lower() for n in x])
def keyClassSpliter(rootFile,pathSplitList):
"""
Return a list of directories and a list of keys corresponding
to the other objects, for rootLs and rooprint use
"""
keyList = []
dirList = []
for pathSplit in pathSplitList:
if pathSplit == []: dirList.append(pathSplit)
elif isDirectory(rootFile,pathSplit): dirList.append(pathSplit)
else: keyList.append(getKey(rootFile,pathSplit))
keyListSort(keyList)
dirListSort(dirList)
return keyList,dirList
def openROOTFile(fileName, mode="read"):
"""
Open the ROOT file corresponding to fileName in the corresponding mode,
redirecting the output not to see missing dictionnaries
Returns:
theFile (TFile)
"""
#with stderrRedirected():
with _setIgnoreLevel(ROOT.kError):
theFile = ROOT.TFile.Open(fileName, mode)
if not theFile:
logging.warning("File %s does not exist", fileName)
return theFile
def openROOTFileCompress(fileName, compress, recreate):
"""
Open a ROOT file (like openROOTFile) with the possibility
to change compression settings
"""
if compress != None and os.path.isfile(fileName):
logging.warning("can't change compression settings on existing file")
return None
mode = "recreate" if recreate else "update"
theFile = openROOTFile(fileName, mode)
if compress != None: theFile.SetCompressionSettings(compress)
return theFile
def joinPathSplit(pathSplit):
"""
Join the pathSplit with '/'
"""
return "/".join(pathSplit)
MANY_OCCURENCE_WARNING = "Same name objects aren't supported: '{0}' of '{1}' won't be processed"
def manyOccurenceRemove(pathSplitList,fileName):
"""
Search for double occurence of the same pathSplit and remove them
"""
if len(pathSplitList) > 1:
for n in pathSplitList:
if pathSplitList.count(n) != 1:
logging.warning(MANY_OCCURENCE_WARNING.format(joinPathSplit(n),fileName))
while n in pathSplitList: pathSplitList.remove(n)
def patternToPathSplitList(fileName,pattern):
"""
Get the list of pathSplit of objects in the ROOT file
corresponding to fileName that match with the pattern
"""
# Open ROOT file
rootFile = openROOTFile(fileName)
if not rootFile: return []
# Split pattern avoiding multiple slash problem
patternSplit = [n for n in pattern.split("/") if n != ""]
# Main loop
pathSplitList = [[]]
for patternPiece in patternSplit:
newPathSplitList = []
for pathSplit in pathSplitList:
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
newPathSplitList.extend( \
[pathSplit + [key.GetName()] \
for key in ROOT.gDirectory.GetListOfKeys() \
if fnmatch.fnmatch(key.GetName(),patternPiece)])
pathSplitList = newPathSplitList
# No match
if pathSplitList == []:
logging.warning("can't find {0} in {1}".format(pattern,fileName))
# Same match (remove double occurences from the list)
manyOccurenceRemove(pathSplitList,fileName)
return pathSplitList
def fileNameListMatch(filePattern,wildcards):
"""
Get the list of fileName that match with objPattern
"""
if wildcards: return [os.path.expandvars(os.path.expanduser(i)) for i in glob.iglob(filePattern)]
else: return [os.path.expandvars(os.path.expanduser(filePattern))]
def pathSplitListMatch(fileName,objPattern,wildcards):
"""
Get the list of pathSplit that match with objPattern
"""
if wildcards: return patternToPathSplitList(fileName,objPattern)
else: return [[n for n in objPattern.split("/") if n != ""]]
def patternToFileNameAndPathSplitList(pattern,wildcards = True):
"""
Get the list of tuple containing both :
- ROOT file name
- list of splited path (in the corresponding file) of objects that matche
Use unix wildcards by default
"""
rootFilePattern = "*.root"
rootObjPattern = rootFilePattern+":*"
httpRootFilePattern = "htt*://*.root"
httpRootObjPattern = httpRootFilePattern+":*"
xrootdRootFilePattern = "root://*.root"
xrootdRootObjPattern = xrootdRootFilePattern+":*"
s3RootFilePattern = "s3://*.root"
s3RootObjPattern = s3RootFilePattern+":*"
gsRootFilePattern = "gs://*.root"
gsRootObjPattern = gsRootFilePattern+":*"
rfioRootFilePattern = "rfio://*.root"
rfioRootObjPattern = rfioRootFilePattern+":*"
pcmFilePattern = "*.pcm"
pcmObjPattern = pcmFilePattern+":*"
if fnmatch.fnmatch(pattern,httpRootObjPattern) or \
fnmatch.fnmatch(pattern,xrootdRootObjPattern) or \
fnmatch.fnmatch(pattern,s3RootObjPattern) or \
fnmatch.fnmatch(pattern,gsRootObjPattern) or \
fnmatch.fnmatch(pattern,rfioRootObjPattern):
patternSplit = pattern.rsplit(":", 1)
fileName = patternSplit[0]
objPattern = patternSplit[1]
pathSplitList = pathSplitListMatch(fileName,objPattern,wildcards)
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,httpRootFilePattern) or \
fnmatch.fnmatch(pattern,xrootdRootFilePattern) or \
fnmatch.fnmatch(pattern,s3RootFilePattern) or \
fnmatch.fnmatch(pattern,gsRootFilePattern) or \
fnmatch.fnmatch(pattern,rfioRootFilePattern):
fileName = pattern
pathSplitList = [[]]
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,rootObjPattern) or \
fnmatch.fnmatch(pattern,pcmObjPattern):
patternSplit = pattern.split(":")
filePattern = patternSplit[0]
objPattern = patternSplit[1]
fileNameList = fileNameListMatch(filePattern,wildcards)
return [(fileName,pathSplitListMatch(fileName,objPattern,wildcards)) for fileName in fileNameList]
if fnmatch.fnmatch(pattern,rootFilePattern) or \
fnmatch.fnmatch(pattern,pcmFilePattern):
filePattern = pattern
fileNameList = fileNameListMatch(filePattern,wildcards)
pathSplitList = [[]]
return [(fileName,pathSplitList) for fileName in fileNameList]
logging.warning("{0}: No such file (or extension not supported)".format(pattern))
return []
# End of utils
##########
##########
# Set of functions to put the arguments in shape
def getArgs(parser):
"""
Get arguments corresponding to parser.
"""
return parser.parse_args()
def getSourceListArgs(parser, wildcards = True):
"""
Create a list of tuples that contain source ROOT file names
and lists of path in these files as well as the original arguments
"""
args = getArgs(parser)
inputFiles = []
try:
inputFiles = args.FILE
except:
inputFiles = args.SOURCE
sourceList = \
[tup for pattern in inputFiles \
for tup in patternToFileNameAndPathSplitList(pattern,wildcards)]
return sourceList, args
def getSourceListOptDict(parser, wildcards = True):
"""
Get the list of tuples and the dictionary with options
returns:
sourceList: a list of tuples with one list element per file
the first tuple entry being the root file,
the second a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
rootls tutorial/tmva/TMVA.root:Method_BDT/BDT turns into
[('tutorials/tmva/TMVA.root', [['Method_BDT','BDT']])]
vars(args): a dictionary of matched options, e.g.
{'longListing': False,
'oneColumn': False,
'treeListing': False,
'FILE': ['tutorials/tmva/TMVA.root:Method_BDT/BDT']
}
"""
sourceList, args = getSourceListArgs(parser, wildcards)
if sourceList == []:
logging.error("Input file(s) not found!")
return sourceList, vars(args)
def getSourceDestListOptDict(parser, wildcards = True):
"""
Get the list of tuples of sources, create destination name, destination pathSplit
and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
destList = \
patternToFileNameAndPathSplitList( \
args.DEST,wildcards=False)
if destList != []:
destFileName,destPathSplitList = destList[0]
destPathSplit = destPathSplitList[0]
else:
destFileName = ""
destPathSplit = []
return sourceList, destFileName, destPathSplit, vars(args)
# The end of the set of functions to put the arguments in shape
##########
##########
# Several functions shared by roocp, roomv and roorm
TARGET_ERROR = "target '{0}' is not a directory"
OMITTING_FILE_ERROR = "omitting file '{0}'"
OMITTING_DIRECTORY_ERROR = "omitting directory '{0}'"
OVERWRITE_ERROR = "cannot overwrite non-directory '{0}' with directory '{1}'"
def copyRootObject(sourceFile,sourcePathSplit,destFile,destPathSplit,oneSource,recursive,replace):
"""
Initialize the recursive function 'copyRootObjectRecursive', written to be as unix-like as possible
"""
retcode = 0
isMultipleInput = not (oneSource and sourcePathSplit != [])
recursiveOption = recursive
# Multiple input and unexisting or non-directory destination
# TARGET_ERROR
if isMultipleInput and destPathSplit != [] \
and not (isExisting(destFile,destPathSplit) \
and isDirectory(destFile,destPathSplit)):
logging.warning(TARGET_ERROR.format(destPathSplit[-1]))
retcode += 1
# Entire ROOT file or directory in input omitting "-r" option
# OMITTING_FILE_ERROR or OMITTING_DIRECTORY_ERROR
if not recursiveOption:
if sourcePathSplit == []:
logging.warning(OMITTING_FILE_ERROR.format( \
sourceFile.GetName()))
retcode += 1
elif isDirectory(sourceFile,sourcePathSplit):
logging.warning(OMITTING_DIRECTORY_ERROR.format( \
sourcePathSplit[-1]))
retcode += 1
# Run copyRootObjectRecursive function with the wish
# to follow the unix copy behaviour
if sourcePathSplit == []:
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = ""
if not isMultipleInput and (destPathSplit != [] \
and not isExisting(destFile,destPathSplit)):
setName = destPathSplit[-1]
objectName = sourcePathSplit[-1]
if isDirectory(sourceFile,sourcePathSplit):
if setName != "":
createDirectory(destFile,destPathSplit[:-1]+[setName])
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1]+[setName],replace)
elif isDirectory(destFile,destPathSplit):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
else:
logging.warning(OVERWRITE_ERROR.format( \
destPathSplit[-1],objectName))
retcode += 1
else:
if setName != "":
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
elif isDirectory(destFile,destPathSplit):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = destPathSplit[-1]
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
return retcode
DELETE_ERROR = "object {0} was not existing, so it is not deleted"
def deleteObject(rootFile,pathSplit):
"""
Delete the object 'pathSplit[-1]' from (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0:
fileName = pathSplit[-1]
if isExisting(rootFile,pathSplit):
ROOT.gDirectory.Delete(fileName+";*")
else:
logging.warning(DELETE_ERROR.format(fileName))
retcode += 1
return retcode
def copyRootObjectRecursive(sourceFile,sourcePathSplit,destFile,destPathSplit,replace,setName=""):
"""
Copy objects from a file or directory (sourceFile,sourcePathSplit)
to an other file or directory (destFile,destPathSplit)
- Has the will to be unix-like
- that's a recursive function
- Python adaptation of a root input/output tutorial :
$ROOTSYS/tutorials/io/copyFiles.C
"""
retcode = 0
replaceOption = replace
for key in getKeyList(sourceFile,sourcePathSplit):
objectName = key.GetName()
if isDirectoryKey(key):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode +=copyRootObjectRecursive(sourceFile, \
sourcePathSplit+[objectName], \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
elif isTreeKey(key):
T = key.GetMotherDir().Get(objectName+";"+str(key.GetCycle()))
if replaceOption and isExisting(destFile,destPathSplit+[T.GetName()]):
retcodeTemp = deleteObject(destFile,destPathSplit+[T.GetName()])
if retcodeTemp:
retcode += retcodeTemp
continue
changeDirectory(destFile,destPathSplit)
newT = T.CloneTree(-1,"fast")
if setName != "":
newT.SetName(setName)
newT.Write()
else:
obj = key.ReadObj()
if replaceOption and isExisting(destFile,destPathSplit+[setName]):
changeDirectory(destFile,destPathSplit)
otherObj = getFromDirectory(setName)
if not otherObj == obj:
retcodeTemp = deleteObject(destFile,destPathSplit+[setName])
if retcodeTemp:
retcode += retcodeTemp
continue
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
elif issubclass(obj.__class__, ROOT.TCollection):
# probably the object was written with kSingleKey
changeDirectory(destFile,destPathSplit)
obj.Write(setName, ROOT.TObject.kSingleKey)
else:
if setName != "":
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
obj.Delete()
changeDirectory(destFile,destPathSplit)
ROOT.gDirectory.SaveSelf(ROOT.kTRUE)
return retcode
FILE_REMOVE_ERROR = "cannot remove '{0}': Is a ROOT file"
DIRECTORY_REMOVE_ERROR = "cannot remove '{0}': Is a directory"
ASK_FILE_REMOVE = "remove '{0}' ? (y/n) : "
ASK_OBJECT_REMOVE = "remove '{0}' from '{1}' ? (y/n) : "
def deleteRootObject(rootFile, pathSplit, interactive, recursive):
"""
Remove the object (rootFile,pathSplit)
-interactive : prompt before every removal
-recursive : allow directory, and ROOT file, removal
"""
retcode = 0
if not recursive and isDirectory(rootFile,pathSplit):
if pathSplit == []:
logging.warning(FILE_REMOVE_ERROR.format(rootFile.GetName()))
retcode += 1
else:
logging.warning(DIRECTORY_REMOVE_ERROR.format(pathSplit[-1]))
retcode += 1
else:
if interactive:
if pathSplit != []:
answer = raw_input(ASK_OBJECT_REMOVE \
.format("/".join(pathSplit),rootFile.GetName()))
else:
answer = raw_input(ASK_FILE_REMOVE \
.format(rootFile.GetName()))
remove = answer.lower() == 'y'
else:
remove = True
if remove:
if pathSplit != []:
retcode += deleteObject(rootFile,pathSplit)
else:
rootFile.Close()
os.remove(rootFile.GetName())
return retcode
# End of functions shared by roocp, roomv and roorm
##########
##########
# Help strings for ROOT command line tools
# Arguments
SOURCE_HELP = "path of the source."
SOURCES_HELP = "path of the source(s)."
DEST_HELP = "path of the destination."
# Options
COMPRESS_HELP = \
"""change the compression settings of the
destination file (if not already existing)."""
INTERACTIVE_HELP = "prompt before every removal."
RECREATE_HELP = "recreate the destination file."
RECURSIVE_HELP = "recurse inside directories"
REPLACE_HELP = "replace object if already existing"
# End of help strings
##########
##########
# ROOTBROWSE
def _openBrowser(rootFile=None):
browser = ROOT.TBrowser()
if rootFile: rootFile.Browse(browser)
ROOT.PyROOT.TPyROOTApplication.Run(ROOT.gApplication)
def rootBrowse(fileName=None):
if fileName:
rootFile = openROOTFile(fileName)
if not rootFile: return 1
_openBrowser(rootFile)
rootFile.Close()
else:
_openBrowser()
return 0
# End of ROOTBROWSE
##########
##########
# ROOTCP
def _copyObjects(fileName, pathSplitList, destFile, destPathSplit, oneFile, \
recursive, replace):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcode += copyRootObject(rootFile, pathSplit, destFile, destPathSplit, \
oneSource, recursive, replace)
if fileName != destFileName: rootFile.Close()
return retcode
def rootCp(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, recursive=False, replace=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in [n[0] for n in sourceList]:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, recursive, replace)
destFile.Close()
return retcode
# End of ROOTCP
##########
##########
# ROOTEVENTSELECTOR
def _copyTreeSubset(sourceFile,sourcePathSplit,destFile,destPathSplit,firstEvent,lastEvent,selectionString):
"""Copy a subset of the tree from (sourceFile,sourcePathSplit)
to (destFile,destPathSplit) according to options in optDict"""
retcode = changeDirectory(sourceFile,sourcePathSplit[:-1])
if retcode != 0: return retcode
bigTree = getFromDirectory(sourcePathSplit[-1])
nbrEntries = bigTree.GetEntries()
# changeDirectory for the small tree not to be memory-resident
retcode = changeDirectory(destFile,destPathSplit)
if retcode != 0: return retcode
smallTree = bigTree.CloneTree(0)
if lastEvent == -1:
lastEvent = nbrEntries-1
isNtuple = bigTree.InheritsFrom(ROOT.TNtuple.Class())
for i in range(firstEvent, lastEvent+1):
bigTree.GetEntry(i)
if isNtuple:
super(ROOT.TNtuple,smallTree).Fill()
else:
smallTree.Fill()
if selectionString:
if isNtuple:
smallSkimmedTree = super(ROOT.TNtuple,smallTree).CopyTree(selectionString)
else:
smallSkimmedTree = smallTree.CopyTree(selectionString)
smallSkimmedTree.Write()
else:
smallTree.Write()
return retcode
def _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, first, last, selectionString):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
for pathSplit in pathSplitList:
if isTree(rootFile,pathSplit):
retcode += _copyTreeSubset(rootFile,pathSplit, \
destFile,destPathSplit,first,last,selectionString)
if fileName != destFileName: rootFile.Close()
return retcode
def rootEventselector(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, first=0, last=-1, selectionString=""):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
# Loop on the root file
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, \
first, last, selectionString)
destFile.Close()
return retcode
# End of ROOTEVENTSELECTOR
##########
##########
# ROOTLS
# Ansi characters
ANSI_BOLD = "\x1B[1m"
ANSI_BLUE = "\x1B[34m"
ANSI_GREEN = "\x1B[32m"
ANSI_END = "\x1B[0m"
# Needed for column width calculation
ANSI_BOLD_LENGTH = len(ANSI_BOLD+ANSI_END)
ANSI_BLUE_LENGTH = len(ANSI_BLUE+ANSI_END)
ANSI_GREEN_LENGTH = len(ANSI_GREEN+ANSI_END)
# Terminal and platform booleans
IS_TERMINAL = sys.stdout.isatty()
IS_WIN32 = sys.platform == 'win32'
def isSpecial(ansiCode,string):
"""Use ansi code on 'string' if the output is the
terminal of a not Windows platform"""
if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END
else: return string
def write(string,indent=0,end=""):
"""Use sys.stdout.write to write the string with an indentation
equal to indent and specifying the end character"""
sys.stdout.write(" "*indent+string+end)
TREE_TEMPLATE = "{0:{nameWidth}}"+"{1:{titleWidth}}{2:{memoryWidth}}"
def _recursifTreePrinter(tree,indent):
"""Print recursively tree informations"""
listOfBranches = tree.GetListOfBranches()
if len(listOfBranches) > 0: # Width informations
maxCharName = max([len(branch.GetName()) \
for branch in listOfBranches])
maxCharTitle = max([len(branch.GetTitle()) \
for branch in listOfBranches])
dic = { \
"nameWidth":maxCharName+2, \
"titleWidth":maxCharTitle+4, \
"memoryWidth":1}
for branch in listOfBranches: # Print loop
rec = \
[branch.GetName(), \
"\""+branch.GetTitle()+"\"", \
str(branch.GetTotBytes())]
write(TREE_TEMPLATE.format(*rec,**dic),indent,end="\n")
_recursifTreePrinter(branch,indent+2)
def _prepareTime(time):
"""Get time in the proper shape
ex : 174512 for 17h 45m 12s
ex : 094023 for 09h 40m 23s"""
time = str(time)
time = '000000'+time
time = time[len(time)-6:]
return time
MONTH = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun', \
7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
LONG_TEMPLATE = \
isSpecial(ANSI_BOLD,"{0:{classWidth}}")+"{1:{timeWidth}}" + \
"{2:{nameWidth}}{3:{titleWidth}}"
def _rootLsPrintLongLs(keyList,indent,treeListing):
"""Print a list of Tkey in columns
pattern : classname, datetime, name and title"""
if len(keyList) > 0: # Width informations
maxCharClass = max([len(key.GetClassName()) for key in keyList])
maxCharTime = 12
maxCharName = max([len(key.GetName()) for key in keyList])
dic = { \
"classWidth":maxCharClass+2, \
"timeWidth":maxCharTime+2, \
"nameWidth":maxCharName+2, \
"titleWidth":1}
date = ROOT.Long(0)
for key in keyList:
datime = key.GetDatime()
time = datime.GetTime()
date = datime.GetDate()
time = _prepareTime(time)
rec = \
[key.GetClassName(), \
MONTH[int(str(date)[4:6])]+" " +str(date)[6:]+ \
" "+time[:2]+":"+time[2:4], \
key.GetName(), \
"\""+key.GetTitle()+"\""]
write(LONG_TEMPLATE.format(*rec,**dic),indent,end="\n")
if treeListing and isTreeKey(key):
tree = key.ReadObj()
_recursifTreePrinter(tree,indent+2)
##
# The code of the getTerminalSize function can be found here :
# https://gist.github.com/jtriley/1108174
# Thanks jtriley !!
import os
import shlex
import struct
import platform
import subprocess
def getTerminalSize():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
#print "default"
#_get_terminal_size_windows() or _get_terminal_size_tput don't work
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# End of getTerminalSize code
##
def _rootLsPrintSimpleLs(keyList,indent,oneColumn):
"""Print list of strings in columns
- blue for directories
- green for trees"""
# This code is adaptated from the pprint_list function here :
# http://stackoverflow.com/questions/25026556/output-list-like-ls
# Thanks hawkjo !!
if len(keyList) == 0: return
(term_width, term_height) = getTerminalSize()
term_width = term_width - indent
min_chars_between = 2
min_element_width = min( len(key.GetName()) for key in keyList ) \
+ min_chars_between
max_element_width = max( len(key.GetName()) for key in keyList ) \
+ min_chars_between
if max_element_width >= term_width: ncol,col_widths = 1,[1]
else:
# Start with max possible number of columns and reduce until it fits
ncol = 1 if oneColumn else min( len(keyList), term_width / min_element_width )
while True:
col_widths = \
[ max( len(key.GetName()) + min_chars_between \
for j, key in enumerate(keyList) if j % ncol == i ) \
for i in range(ncol) ]
if sum( col_widths ) <= term_width: break
else: ncol -= 1
for i, key in enumerate(keyList):
if i%ncol == 0: write("",indent) # indentation
# Don't add spaces after the last element of the line or of the list
if (i+1)%ncol != 0 and i != len(keyList)-1:
if not IS_TERMINAL: write( \
key.GetName().ljust(col_widths[i%ncol]))
elif isDirectoryKey(keyList[i]): write( \
isSpecial(ANSI_BLUE,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_BLUE_LENGTH))
elif isTreeKey(keyList[i]): write( \
isSpecial(ANSI_GREEN,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_GREEN_LENGTH))
else: write(key.GetName().ljust(col_widths[i%ncol]))
else: # No spaces after the last element of the line or of the list
if not IS_TERMINAL: write(key.GetName())
elif isDirectoryKey(keyList[i]):
write(isSpecial(ANSI_BLUE, key.GetName()))
elif isTreeKey(keyList[i]):
write(isSpecial(ANSI_GREEN, key.GetName()))
else: write(key.GetName())
write('\n')
def _rootLsPrint(keyList, indent, oneColumn, \
longListing, treeListing):
"""Print informations given by keyList with a rootLs
style choosen with the options"""
if longListing or treeListing: \
_rootLsPrintLongLs(keyList, indent, treeListing)
else:
_rootLsPrintSimpleLs(keyList, indent, oneColumn)
def _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing):
'''rootls main routine for one file looping over paths in the file
sorts out directories and key, and loops over all paths, then forwards to
(_rootLsPrintLongLs or _rootLsPrintSimpleLs) - split in _rootLsPrint
args:
oneColumn (bool):
longListing (bool):
treeListing (bool):
indent (int): how many columns the printout should be indented globally
manySources (bool): if more than one file is printed
fileName (str): the root file name
pathSplitList: a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
[['Method_BDT','BDT']]
Returns:
retcode (int): 0 in case of success, 1 if the file could not be opened
'''
retcode = 0
rootFile = openROOTFile(fileName)
if not rootFile: return 1
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
# keyList lists the TKey objects from pathSplitList
# dirList is 'just the pathSplitList' for what aren't TKeys
if manySources: write("{0} :".format(fileName)+"\n")
_rootLsPrint(keyList, indent, oneColumn, longListing, treeListing)
# Loop on the directories
manyPathSplits = len(pathSplitList) > 1
indentDir = 2 if manyPathSplits else 0
for pathSplit in dirList:
keyList = getKeyList(rootFile,pathSplit)
keyListSort(keyList)
if manyPathSplits: write("{0} :".format("/".join(pathSplit)),indent,end="\n")
_rootLsPrint(keyList, indent+indentDir, oneColumn, longListing, treeListing)
rootFile.Close()
return retcode
def rootLs(sourceList, oneColumn=False, longListing=False, treeListing=False):
'''rootls main routine for an arbitrary number of files
args:
oneColumn (bool):
longListing (bool):
treeListing (bool):
sourceList: a list of tuples with one list element per file
the first tuple entry being the root file,
the second a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
rootls tutorial/tmva/TMVA.root:Method_BDT/BDT turns into
[('tutorials/tmva/TMVA.root', [['Method_BDT','BDT']])]
returns:
retcode (int): 0 in case of success
'''
# Check arguments
if sourceList == []: return 1
# sort sourceList according to filenames
tupleListSort(sourceList)
# Loop on the ROOT files
retcode = 0
manySources = len(sourceList) > 1
indent = 2 if manySources else 0
for fileName, pathSplitList in sourceList:
retcode += _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing)
return retcode
# End of ROOTLS
##########
##########
# ROOTMKDIR
MKDIR_ERROR = "cannot create directory '{0}'"
def _createDirectories(rootFile,pathSplit,parents):
"""Same behaviour as createDirectory but allows the possibility
to build an whole path recursively with the option \"parents\" """
retcode = 0
lenPathSplit = len(pathSplit)
if lenPathSplit == 0:
pass
elif parents:
for i in range(lenPathSplit):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
retcode += createDirectory(rootFile,currentPathSplit)
else:
doMkdir = True
for i in range(lenPathSplit-1):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
doMkdir = False
break
if doMkdir:
retcode += createDirectory(rootFile,pathSplit)
else:
logging.warning(MKDIR_ERROR.format("/".join(pathSplit)))
retcode += 1
return retcode
def _rootMkdirProcessFile(fileName, pathSplitList, parents):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode+=_createDirectories(rootFile,pathSplit,parents)
rootFile.Close()
return retcode
def rootMkdir(sourceList, parents=False):
# Check arguments
if sourceList == []: return 1
# Loop on the ROOT files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _rootMkdirProcessFile(fileName, pathSplitList, parents)
return retcode
# End of ROOTMKDIR
##########
##########
# ROOTMV
MOVE_ERROR = "error during copy of {0}, it is not removed from {1}"
def _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
oneFile, interactive):
retcode = 0
recursive = True
replace = True
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName,"update") \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcodeTemp = copyRootObject(rootFile,pathSplit, \
destFile,destPathSplit,oneSource,recursive,replace)
if not retcodeTemp:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
else:
logging.warning(MOVE_ERROR.format("/".join(pathSplit),rootFile.GetName()))
retcode += retcodeTemp
if fileName != destFileName: rootFile.Close()
return retcode
def rootMv(sourceList, destFileName, destPathSplit, compress=None, \
interactive=False, recreate=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName,compress,recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, interactive)
destFile.Close()
return retcode
# End of ROOTMV
##########
##########
# ROOTPRINT
def _keyListExtended(rootFile,pathSplitList):
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
for pathSplit in dirList: keyList.extend(getKeyList(rootFile,pathSplit))
keyList = [key for key in keyList if not isDirectoryKey(key)]
keyListSort(keyList)
return keyList
def rootPrint(sourceList, directoryOption = None, divideOption = None, drawOption = "", formatOption = None, \
outputOption = None, sizeOption = None, styleOption = None, verboseOption = False):
# Check arguments
if sourceList == []: return 1
tupleListSort(sourceList)
# Don't open windows
ROOT.gROOT.SetBatch()
# (Style option)
if styleOption: ROOT.gInterpreter.ProcessLine(".x {0}".format(styleOption))
# (Verbose option)
if not verboseOption: ROOT.gErrorIgnoreLevel = 9999
# Initialize the canvas (Size option)
if sizeOption:
try:
width,height = sizeOption.split("x")
width = int(width)
height = int(height)
except ValueError:
logging.warning("canvas size is on a wrong format")
return 1
canvas = ROOT.TCanvas("canvas","canvas",width,height)
else:
canvas = ROOT.TCanvas("canvas")
# Divide the canvas (Divide option)
if divideOption:
try:
x,y = divideOption.split(",")
x = int(x)
y = int(y)
except ValueError:
logging.warning("divide is on a wrong format")
return 1
canvas.Divide(x,y)
caseNumber = x*y
# Take the format of the output file (formatOutput option)
if not formatOption and outputOption:
fileName = outputOption
fileFormat = fileName.split(".")[-1]
formatOption = fileFormat
# Use pdf as default format
if not formatOption: formatOption = "pdf"
# Create the output directory (directory option)
if directoryOption:
if not os.path.isdir(os.path.join(os.getcwd(),directoryOption)):
os.mkdir(directoryOption)
# Make the output name, begin to print (output option)
if outputOption:
if formatOption in ['ps','pdf']:
outputFileName = outputOption
if directoryOption: outputFileName = \
directoryOption + "/" + outputFileName
canvas.Print(outputFileName+"[",formatOption)
else:
logging.warning("can't merge pictures, only postscript or pdf files")
return 1
# Loop on the root files
retcode = 0
objDrawnNumber = 0
openRootFiles = []
for fileName, pathSplitList in sourceList:
rootFile = openROOTFile(fileName)
if not rootFile:
retcode += 1
continue
openRootFiles.append(rootFile)
# Fill the key list (almost the same as in rools)
keyList = _keyListExtended(rootFile,pathSplitList)
for key in keyList:
if isTreeKey(key):
pass
else:
if divideOption:
canvas.cd(objDrawnNumber%caseNumber + 1)
objDrawnNumber += 1
obj = key.ReadObj()
obj.Draw(drawOption)
if divideOption:
if objDrawnNumber%caseNumber == 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber)+"."+formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
canvas.Clear()
canvas.Divide(x,y)
else:
if not outputOption:
outputFileName = key.GetName() + "." +formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
if outputOption or formatOption == 'pdf':
objTitle = "Title:"+key.GetClassName()+" : "+key.GetTitle()
canvas.Print(outputFileName,objTitle)
else:
canvas.Print(outputFileName,formatOption)
# Last page (divideOption)
if divideOption:
if objDrawnNumber%caseNumber != 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber + 1)+"."+formatOption
if directoryOption:
outputFileName = os.path.join(directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
# End to print (output option)
if outputOption:
if not divideOption:
canvas.Print(outputFileName+"]",objTitle)
else:
canvas.Print(outputFileName+"]")
# Close ROOT files
map(lambda rootFile: rootFile.Close(),openRootFiles)
return retcode
# End of ROOTPRINT
##########
##########
# ROOTRM
def _removeObjects(fileName, pathSplitList, interactive=False, recursive=False):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
rootFile.Close()
return retcode
def rootRm(sourceList, interactive=False, recursive=False):
# Check arguments
if sourceList == []: return 1
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _removeObjects(fileName, pathSplitList, interactive, recursive)
return retcode
# End of ROOTRM
##########
| lgpl-2.1 |
Vishruit/DDP_models | tf1/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-3.0 |
ClimbsRocks/scikit-learn | sklearn/preprocessing/tests/test_data.py | 6 | 62084 |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
@ignore_warnings
def test_deprecation_minmax_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = MinMaxScaler().fit(X)
depr_message = ("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
data_range = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_range")
assert_array_equal(data_range, scaler.data_range)
depr_message = ("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
data_min = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_min")
assert_array_equal(data_min, scaler.data_min)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_deprecation_standard_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = StandardScaler().fit(X)
depr_message = ("Function std_ is deprecated; Attribute ``std_`` will be "
"removed in 0.19. Use ``scale_`` instead")
std_ = assert_warns_message(DeprecationWarning, depr_message, getattr,
scaler, "std_")
assert_array_equal(std_, scaler.scale_)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| bsd-3-clause |
bmazin/ARCONS-pipeline | examples/Pal2012-crab/assignPNtoGRP.py | 1 | 2372 | import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.stdout.flush()
np.set_printoptions(precision=11)
path = '/Scratch/dataProcessing/crabData/'
path2 = '/Scratch/dataProcessing/crabData2/'
pulseLabel = 1
labels='BTOA Noise_Offset Noise_RMS Max Mean Index TestMax TestMean TestIndex'
labels = np.array(labels.split())
radioGiantData = np.loadtxt(path2+'radio/Giant_List_P{}_BTOA_Flux_Sorted'.format(pulseLabel),usecols=range(1,len(labels)+1))
giantDict = dict()
print np.shape(radioGiantData)
for iLabel,label in enumerate(labels):
labelIdx = np.argmax(label==labels)
giantDict[label] = radioGiantData[:,labelIdx]
radioGiantBTOA = giantDict['BTOA']#radioGiantData[:,0]
opticalData = np.load(path+'btoa20121211wave.npz')#has timeAdjustFile incorporated
opticalBTOA = opticalData['btoaList']
sortIdx = np.argsort(opticalBTOA)
opticalBTOA = opticalBTOA[sortIdx]
opticalCumulativePhase = opticalData['btoaPhase'][sortIdx]
opticalPulseNumbers = np.array(opticalCumulativePhase,dtype=np.int)
pulsarPeriod = 33e-3/(3600*24) #days, approximately
#pulseStartIdx = np.searchsorted(opticalPulseNumbers,pulseIdx+idxOffset)
print 'iRadioBTOA radioBTOA opticalPulseNumber radioMax radioMean'
radioPulseNumbers = np.zeros(len(radioGiantBTOA),dtype=np.uint64)
periodThreshold=.5
outFile = open(os.path.join(path2,'giantPulseList_P{}_3sigma_indices.txt'.format(pulseLabel)),mode='w')
outFile.write('iBTOA\tpulseNumber')
for label in labels:#everything after BTOA
outFile.write('\t'+label)
outFile.write('\n')
for iBTOA,radioBTOA in enumerate(radioGiantBTOA):
idx = np.searchsorted(opticalBTOA,radioBTOA)
pulseNumber = opticalPulseNumbers[idx]
#check that the nearest optical photon and the radioBTOA
# aren't more than half a pulse period apart
# indicating that we have less than half (probably none) of the optical GRP
periodsOff = (opticalBTOA[idx]-radioBTOA)/pulsarPeriod
if (np.abs(periodsOff) < periodThreshold):
print 'matched pulse',pulseNumber,'by',periodsOff
radioPulseNumbers[iBTOA]=pulseNumber
outFile.write(str(iBTOA)+'\t'+'\t'+str(pulseNumber))
remainingRow = ''.join(['\t'+str(cell) for cell in radioGiantData[iBTOA,:]])
outFile.write(remainingRow)
outFile.write('\n')
else:
print 'missed pulse',radioBTOA
outFile.close()
print 'done'
| gpl-2.0 |
supertuxkart/stk-stats | userreport/x86.py | 2 | 3849 | # CPUID feature bits, from LSB to MSB:
# (Names and descriptions gathered from various Intel and AMD sources)
def get_data():
cap_raw = (
# EAX=01H ECX:
"""SSE3
PCLMULQDQ
DTES64: 64-bit debug store
MONITOR: MONITOR/MWAIT
DS-CPL: CPL qualified debug store
VMX: virtual machine extensions
SMX: safer mode extensions
EST: enhanced SpeedStep
TM2: thermal monitor 2
SSSE3
CNXT-ID: L1 context ID
?(ecx11)
FMA: fused multiply add
CMPXCHG16B
xTPR: xTPR update control
PDCM: perfmon and debug capability
?(ecx16)
PCID: process context identifiers
DCA: direct cache access
SSE4_1
SSE4_2
x2APIC: extended xAPIC support
MOVBE
POPCNT
TSC-DEADLINE
AES
XSAVE: XSAVE instructions supported
OSXSAVE: XSAVE instructions enabled
AVX
F16C: half-precision convert
?(ecx30)
RAZ: used by hypervisor to indicate guest status
""" +
# EAX=01H EDX:
"""FPU
VME: virtual 8086 mode enhancements
DE: debugging extension
PSE: page size extension
TSC: time stamp counter
MSR: model specific registers
PAE: physical address extension
MCE: machine-check exception
CMPXCHG8
APIC
?(edx10)
SEP: fast system call
MTRR: memory type range registers
PGE: page global enable
MCA: machine-check architecture
CMOV
PAT: page attribute table
PSE-36: 36-bit page size extension
PSN: processor serial number
CLFSH: CLFLUSH
?(edx20)
DS: debug store
ACPI
MMX
FXSR: FXSAVE and FXSTOR
SSE
SSE2
SS: self-snoop
HTT: hyper-threading
TM: thermal monitor
?(edx30)
PBE: pending break enable
""" +
# EAX=80000001H ECX:
"""LAHF: LAHF/SAHF instructions
CMP: core multi-processing legacy mode
SVM: secure virtual machine
ExtApic
AltMovCr8
ABM: LZCNT instruction
SSE4A
MisAlignSse
3DNowPrefetch
OSVW: OS visible workaround
IBS: instruction based sampling
XOP: extended operation support
SKINIT
WDT: watchdog timer support
?(ext:ecx14)
LWP: lightweight profiling support
FMA4: 4-operand FMA
?(ext:ecx17)
?(ext:ecx18)
NodeId
?(ext:ecx20)
TBM: trailing bit manipulation extensions
TopologyExtensions
?(ext:ecx23)
?(ext:ecx24)
?(ext:ecx25)
?(ext:ecx26)
?(ext:ecx27)
?(ext:ecx28)
?(ext:ecx29)
?(ext:ecx30)
?(ext:ecx31)
""" +
# EAX=80000001H ECX:
"""FPU[2]
VME[2]
DE[2]
PSE[2]
TSC[2]
MSR[2]
PAE[2]
MCE[2]
CMPXCHG8[2]
APIC[2]
?(ext:edx10)
SYSCALL: SYSCALL/SYSRET instructions
MTRR[2]
PGE[2]
MCA[2]
CMOV[2]
PAT[2]
PSE36[2]
?(ext:edx18)
MP: MP-capable
NX: no execute bit
?(ext:edx21)
MmxExt
MMX[2]
FXSR[2]
FFXSR
1GB: 1GB pages
RDTSCP
?(ext:edx28)
x86-64
3DNowExt
3DNow
"""
)
cap_bits = []
cap_descs = {}
idx = 0
for c in cap_raw.strip().split('\n'):
s = c.strip().split(':')
if len(s) == 1:
cap_bits.append((s[0], None, idx))
else:
cap_bits.append((s[0], s[1], idx))
cap_descs[s[0]] = s[1]
idx += 1
return cap_bits, cap_descs, idx
| mit |
davidharvey1986/pyRRG | unittests/bugFixPyRRG/lib/python3.7/site-packages/setuptools/depends.py | 23 | 5517 | import sys
import marshal
import contextlib
from distutils.version import StrictVersion
from .py33compat import Bytecode
from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import py27compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(
self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def maybe_close(f):
@contextlib.contextmanager
def empty():
yield
return
if not f:
return empty()
return contextlib.closing(f)
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = py27compat.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
imported = py27compat.get_module(module, paths, info)
return getattr(imported, symbol, None)
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| mit |
DominoTree/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/recwarn.py | 32 | 8634 | """ recording warnings during test function execution. """
from __future__ import absolute_import, division, print_function
import inspect
import _pytest._code
import py
import sys
import warnings
import re
from _pytest.fixtures import yield_fixture
from _pytest.outcomes import fail
@yield_fixture
def recwarn():
"""Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter("default")
yield wrec
def deprecated_call(func=None, *args, **kwargs):
"""context manager that can be used to ensure a block of code triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``::
>>> import warnings
>>> def api_call_v2():
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
>>> with deprecated_call():
... assert api_call_v2() == 200
``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,
in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings
types above.
"""
if not func:
return _DeprecatedCallContext()
else:
__tracebackhide__ = True
with _DeprecatedCallContext():
return func(*args, **kwargs)
class _DeprecatedCallContext(object):
"""Implements the logic to capture deprecation warnings as a context manager."""
def __enter__(self):
self._captured_categories = []
self._old_warn = warnings.warn
self._old_warn_explicit = warnings.warn_explicit
warnings.warn_explicit = self._warn_explicit
warnings.warn = self._warn
def _warn_explicit(self, message, category, *args, **kwargs):
self._captured_categories.append(category)
def _warn(self, message, category=None, *args, **kwargs):
if isinstance(message, Warning):
self._captured_categories.append(message.__class__)
else:
self._captured_categories.append(category)
def __exit__(self, exc_type, exc_val, exc_tb):
warnings.warn_explicit = self._old_warn_explicit
warnings.warn = self._old_warn
if exc_type is None:
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
if not any(
issubclass(c, deprecation_categories) for c in self._captured_categories
):
__tracebackhide__ = True
msg = "Did not produce DeprecationWarning or PendingDeprecationWarning"
raise AssertionError(msg)
def warns(expected_warning, *args, **kwargs):
r"""Assert that code raises a particular class of warning.
Specifically, the parameter ``expected_warning`` can be a warning class or
sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or
classes.
This helper produces a list of :class:`warnings.WarningMessage` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
In the context manager form you may use the keyword argument ``match`` to assert
that the exception matches a text or regex::
>>> with warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
"""
match_expr = None
if not args:
if "match" in kwargs:
match_expr = kwargs.pop("match")
return WarningsChecker(expected_warning, match_expr=match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with WarningsChecker(expected_warning, match_expr=match_expr):
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with WarningsChecker(expected_warning, match_expr=match_expr):
return func(*args[1:], **kwargs)
class WarningsRecorder(warnings.catch_warnings):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self):
super(WarningsRecorder, self).__init__(record=True)
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._list = super(WarningsRecorder, self).__enter__()
warnings.simplefilter("always")
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
super(WarningsRecorder, self).__exit__(*exc_info)
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, match_expr=None):
super(WarningsChecker, self).__init__()
msg = (
"exceptions must be old-style classes or " "derived from Warning, not %s"
)
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
self.match_expr = match_expr
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(issubclass(r.category, self.expected_warning) for r in self):
__tracebackhide__ = True
fail(
"DID NOT WARN. No warnings of type {} was emitted. "
"The list of emitted warnings is: {}.".format(
self.expected_warning, [each.message for each in self]
)
)
elif self.match_expr is not None:
for r in self:
if issubclass(r.category, self.expected_warning):
if re.compile(self.match_expr).search(str(r.message)):
break
else:
fail(
"DID NOT WARN. No warnings of type {} matching"
" ('{}') was emitted. The list of emitted warnings"
" is: {}.".format(
self.expected_warning,
self.match_expr,
[each.message for each in self],
)
)
| mpl-2.0 |
crazy-canux/linux | Documentation/target/tcm_mod_builder.py | 337 | 24391 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
mlaitinen/odoo | addons/account/wizard/account_invoice_refund.py | 214 | 13008 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
class account_invoice_refund(osv.osv_memory):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
_columns = {
'date': fields.date('Date', help='This date will be used as the invoice date for credit note and period will be chosen accordingly!'),
'period': fields.many2one('account.period', 'Force period'),
'journal_id': fields.many2one('account.journal', 'Refund Journal', help='You can select here the journal to use for the credit note that will be created. If you leave that field empty, it will use the same journal as the current invoice.'),
'description': fields.char('Reason', required=True),
'filter_refund': fields.selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'),('modify', 'Modify: create refund, reconcile and create a new draft invoice')], "Refund Method", required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled'),
}
def _get_journal(self, cr, uid, context=None):
obj_journal = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
if context is None:
context = {}
inv_type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
type = (inv_type == 'out_invoice') and 'sale_refund' or \
(inv_type == 'out_refund') and 'sale' or \
(inv_type == 'in_invoice') and 'purchase_refund' or \
(inv_type == 'in_refund') and 'purchase'
journal = obj_journal.search(cr, uid, [('type', '=', type), ('company_id','=',company_id)], limit=1, context=context)
return journal and journal[0] or False
def _get_reason(self, cr, uid, context=None):
active_id = context and context.get('active_id', False)
if active_id:
inv = self.pool.get('account.invoice').browse(cr, uid, active_id, context=context)
return inv.name
else:
return ''
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'journal_id': _get_journal,
'filter_refund': 'refund',
'description': _get_reason,
}
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
# remove the entry with key 'form_view_ref', otherwise fields_view_get crashes
context = dict(context or {})
context.pop('form_view_ref', None)
res = super(account_invoice_refund,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
journal_type = (type == 'out_invoice') and 'sale_refund' or \
(type == 'out_refund') and 'sale' or \
(type == 'in_invoice') and 'purchase_refund' or \
(type == 'in_refund') and 'purchase'
for field in res['fields']:
if field == 'journal_id':
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', journal_type), ('company_id','child_of',[company_id])], context=context)
res['fields'][field]['selection'] = journal_select
return res
def compute_refund(self, cr, uid, ids, mode='refund', context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the account invoice refund’s ID or list of IDs
"""
inv_obj = self.pool.get('account.invoice')
reconcile_obj = self.pool.get('account.move.reconcile')
account_m_line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
inv_tax_obj = self.pool.get('account.invoice.tax')
inv_line_obj = self.pool.get('account.invoice.line')
res_users_obj = self.pool.get('res.users')
if context is None:
context = {}
for form in self.browse(cr, uid, ids, context=context):
created_inv = []
date = False
period = False
description = False
company = res_users_obj.browse(cr, uid, uid, context=context).company_id
journal_id = form.journal_id.id
for inv in inv_obj.browse(cr, uid, context.get('active_ids'), context=context):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise osv.except_osv(_('Error!'), _('Cannot %s draft/proforma/cancel invoice.') % (mode))
if inv.reconciled and mode in ('cancel', 'modify'):
raise osv.except_osv(_('Error!'), _('Cannot %s invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.') % (mode))
if form.period.id:
period = form.period.id
else:
period = inv.period_id and inv.period_id.id or False
if not journal_id:
journal_id = inv.journal_id.id
if form.date:
date = form.date
if not form.period.id:
cr.execute("select name from ir_model_fields \
where model = 'account.period' \
and name = 'company_id'")
result_query = cr.fetchone()
if result_query:
cr.execute("""select p.id from account_fiscalyear y, account_period p where y.id=p.fiscalyear_id \
and date(%s) between p.date_start AND p.date_stop and y.company_id = %s limit 1""", (date, company.id,))
else:
cr.execute("""SELECT id
from account_period where date(%s)
between date_start AND date_stop \
limit 1 """, (date,))
res = cr.fetchone()
if res:
period = res[0]
else:
date = inv.date_invoice
if form.description:
description = form.description
else:
description = inv.name
if not period:
raise osv.except_osv(_('Insufficient Data!'), \
_('No period found on the invoice.'))
refund_id = inv_obj.refund(cr, uid, [inv.id], date, period, description, journal_id, context=context)
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
inv_obj.write(cr, uid, [refund.id], {'date_due': date,
'check_total': inv.check_total})
inv_obj.button_compute(cr, uid, refund_id)
created_inv.append(refund_id[0])
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_id
to_reconcile_ids = {}
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconcile_id:
line.reconcile_id.unlink()
refund.signal_workflow('invoice_open')
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
for tmpline in refund.move_id.line_id:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_ids[tmpline.account_id.id].append(tmpline.id)
for account in to_reconcile_ids:
account_m_line_obj.reconcile(cr, uid, to_reconcile_ids[account],
writeoff_period_id=period,
writeoff_journal_id = inv.journal_id.id,
writeoff_acc_id=inv.account_id.id
)
if mode == 'modify':
invoice = inv_obj.read(cr, uid, [inv.id],
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term', 'account_id',
'currency_id', 'invoice_line', 'tax_line',
'journal_id', 'period_id'], context=context)
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(cr, uid, invoice['invoice_line'], context=context)
invoice_lines = inv_obj._refund_cleanup_lines(cr, uid, invoice_lines, context=context)
tax_lines = inv_tax_obj.browse(cr, uid, invoice['tax_line'], context=context)
tax_lines = inv_obj._refund_cleanup_lines(cr, uid, tax_lines, context=context)
invoice.update({
'type': inv.type,
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'period_id': period,
'name': description
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_id = inv_obj.create(cr, uid, invoice, {})
if inv.payment_term.id:
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv.payment_term.id, date)
if 'value' in data and data['value']:
inv_obj.write(cr, uid, [inv_id], data['value'])
created_inv.append(inv_id)
xml_id = (inv.type == 'out_refund') and 'action_invoice_tree1' or \
(inv.type == 'in_refund') and 'action_invoice_tree2' or \
(inv.type == 'out_invoice') and 'action_invoice_tree3' or \
(inv.type == 'in_invoice') and 'action_invoice_tree4'
result = mod_obj.get_object_reference(cr, uid, 'account', xml_id)
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
def invoice_refund(self, cr, uid, ids, context=None):
data_refund = self.read(cr, uid, ids, ['filter_refund'],context=context)[0]['filter_refund']
return self.compute_refund(cr, uid, ids, data_refund, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
styx0x6/gremlins | lib/thirdparty/colorama/ansitowin32.py | 450 | 9668 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
| agpl-3.0 |
carlcarl/grabflickr | grabflickr/grabflickr.py | 1 | 10748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import gevent
import grequests
except ImportError:
pass
else:
from gevent import monkey
import sys
import os
import hashlib
import json
import logging
import argparse
import multiprocessing
from ConfigParser import SafeConfigParser
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
API_URL = 'https://flickr.com/services/rest/'
directory = ''
image_size_mode = 1
counter = 0
CONFIG_PATH = os.path.expanduser('~/.grabflickr.conf')
api_key = ''
api_secret = ''
SINGLE_PROCESS = 0
MULTITHREAD = 1
GEVENT = 2
def read_config():
"""Read the config from CONFIG_PATH(Default: ~/.grabflickr.conf)
This will prompt for API key and secret if it not exists.
After, it sets the global variable `api_key` and `api_secret`.
"""
parser = SafeConfigParser()
parser.read(CONFIG_PATH)
if not parser.has_section('flickr'):
logger.info('Seems you don\'t set API key, please enter the following informations: ')
enter_api_key(parser)
global api_key, api_secret
api_key = parser.get('flickr', 'API_KEY')
api_secret = parser.get('flickr', 'API_SECRET')
def enter_api_key(parser=None):
"""Prompt for API key and secret
Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf)
:param parser: Config parser
:type parser: SafeConfigParser
"""
if parser is None:
parser = SafeConfigParser()
parser.add_section('flickr')
global api_key, api_secret
api_key = raw_input('Enter your API key: ')
api_secret = raw_input('Enter your API secret: ')
parser.set('flickr', 'API_KEY', api_key)
parser.set('flickr', 'API_SECRET', api_secret)
with open(CONFIG_PATH, 'wb') as f:
parser.write(f)
def _get_request_args(method, **kwargs):
"""Use `method` and other settings to produce a flickr API arguments.
Here also use json as the return type.
:param method: The method provided by flickr,
ex: flickr.photosets.getPhotos
:type method: str
:param kwargs: Other settings
:type kwargs: dict
:return: An argument list used for post request
:rtype: list of sets
"""
args = [
('api_key', api_key),
('format', 'json'),
('method', method),
('nojsoncallback', '1'),
]
if kwargs:
for key, value in kwargs.iteritems():
args.append((key, value))
args.sort(key=lambda tup: tup[0])
api_sig = _get_api_sig(args)
args.append(api_sig)
return args
def _get_api_sig(args):
"""Flickr API need a hash string which made using post arguments
:param args: Arguments of the flickr request
:type args: list of sets
:return: api_sig, ex: ('api_sig', 'abcdefg')
:rtype: tuple
"""
tmp_sig = api_secret
for i in args:
tmp_sig = tmp_sig + i[0] + i[1]
api_sig = hashlib.md5(tmp_sig.encode('utf-8')).hexdigest()
return 'api_sig', api_sig
def create_dir(path):
"""Create dir with the path
:param path: The path to be created
:type path: str
"""
if os.path.exists(path):
if not os.path.isdir(path):
logger.error('%s is not a directory', path)
sys.exit(1)
else: # ignore
pass
else:
os.makedirs(path)
logger.info('Create dir: %s', path)
def get_photos_info(photoset_id):
"""Request the photos information with the photoset id
:param photoset_id: The photoset id of flickr
:type photoset_id: str
:return: photos information
:rtype: list
"""
args = _get_request_args(
'flickr.photosets.getPhotos',
photoset_id=photoset_id
)
resp = requests.post(API_URL, data=args)
resp_json = json.loads(resp.text.encode('utf-8'))
logger.debug(resp_json)
photos = resp_json['photoset']['photo']
return photos
def get_photo_url(photo_id):
"""Request the photo download url with the photo id
:param photo_id: The photo id of flickr
:type photo_id: str
:return: Photo download url
:rtype: str
"""
args = _get_request_args(
'flickr.photos.getSizes',
photo_id=photo_id
)
resp = requests.post(API_URL, data=args)
resp_json = json.loads(resp.text.encode('utf-8'))
logger.debug(json.dumps(resp_json, indent=2))
size_list = resp_json['sizes']['size']
size_list_len = len(size_list)
global image_size_mode
image_size_mode = size_list_len if size_list_len < image_size_mode \
else image_size_mode
download_url = resp_json['sizes']['size'][-image_size_mode]['source']
return download_url
def download_photo_async(photo):
"""Download a photo to the the path(global varialbe `directory`)
:param photo: The photo information include id and title
:type photo: dict
"""
photo_id = photo['id']
photo_title = photo['title']
download_url = get_photo_url(photo_id)
photo_format = download_url.split('.')[-1]
photo_title = photo_title + '.' + photo_format
file_path = directory + os.sep + photo_title
logger.info('Download %s...', photo_title.encode('utf-8'))
req = [grequests.get(download_url)]
counter_lock = multiprocessing.Lock()
for resp in grequests.map(req):
with open(file_path, 'w') as f:
f.write(resp.content)
with counter_lock:
global counter
counter -= 1
logger.info(
'The number of pictures remaining: %s', counter
)
def download_photo(photo):
"""Download a photo to the the path(global varialbe `directory`)
:param photo: The photo information include id and title
:type photo: dict
"""
counter_lock = multiprocessing.Lock()
photo_id = photo['id']
photo_title = photo['title']
download_url = get_photo_url(photo_id)
photo_format = download_url.split('.')[-1]
photo_title = photo_title + '.' + photo_format
file_path = directory + os.sep + photo_title
logger.info('Download %s...', photo_title.encode('utf-8'))
resp = requests.get(download_url)
with open(file_path, 'w') as f:
f.write(resp.content)
with counter_lock:
global counter
counter -= 1
logger.info(
'The number of pictures remaining: %s', counter
)
def single_download_photos(photos):
"""Use single process to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
global counter
counter = len(photos)
for photo in photos:
download_photo(photo)
def event_download_photos(photos):
"""Use asynchronous I/O to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
try:
assert gevent
assert grequests
except NameError:
logger.error('You need install gevent module. Aborting...')
sys.exit(1)
global counter
counter = len(photos)
from gevent.pool import Pool
pool = Pool(multiprocessing.cpu_count())
jobs = [pool.spawn(download_photo_async, photo) for photo in photos]
pool.join()
def multithread_download_photos(photos):
"""Use multiple threads to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
from concurrent import futures
global counter
counter = len(photos)
cpu_num = multiprocessing.cpu_count()
with futures.ThreadPoolExecutor(max_workers=cpu_num) as executor:
for photo in photos:
executor.submit(download_photo, photo)
def init_logger():
"""Initialize the logger and set its format
"""
formatter = logging.Formatter('%(levelname)s: %(message)s')
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
def _parse_cli_args():
"""Parse the arguments from CLI using ArgumentParser
:return: The arguments parsed by ArgumentParser
:rtype: Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
help='The photoset id to be downloaded',
metavar='<photoset_id>'
)
parser.add_argument(
'-s',
default=1,
help=(
'Image size. 12 is smallest, 1 is original size. '
'Default: 1'
),
type=int,
choices=xrange(0, 10),
metavar='<num>'
)
parser.add_argument(
'-d',
default=None,
help=(
'The path to store the downloaded images. '
'Automatically create it if not exist. '
'Default use the photoset id as folder name under current path'
),
metavar='<path>'
)
parser.add_argument(
'-O',
default=1,
help=(
'0 for single process, '
'1 for multithread. '
'2 for event driven. '
'Default: 1'
),
type=int,
choices=xrange(0, 3),
metavar='<num>'
)
parser.add_argument(
'-u',
help=(
'Set your API key'
),
action='store_true'
)
args = parser.parse_args()
logger.debug(args)
return args
def set_image_size_mode(s):
"""Set the quality of the images to be downloaded
This set the global variable `image_size_mode`
:param s: quality level, 1 is original size, 12 is smallest
:type s: str
"""
global image_size_mode
image_size_mode = s
def _gevent_patch():
"""Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
"""
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all() # Must patch before get_photos_info
return GEVENT
def main():
"""The main procedure
"""
init_logger()
args = _parse_cli_args()
if args.u:
enter_api_key()
return
if args.O == GEVENT:
args.O = _gevent_patch()
set_image_size_mode(args.s)
photoset_id = args.g
global directory
directory = args.d if args.d else photoset_id
read_config()
photos = get_photos_info(photoset_id)
create_dir(directory)
if args.O == SINGLE_PROCESS:
single_download_photos(photos)
elif args.O == GEVENT:
event_download_photos(photos)
elif args.O == MULTITHREAD:
multithread_download_photos(photos)
else:
logger.error('Unknown Error')
if __name__ == '__main__':
main()
| mit |
selecsosi/django-cms | cms/management/commands/subcommands/delete_orphaned_plugins.py | 18 | 2261 | from django.core.management.base import NoArgsCommand
from django.utils.six.moves import input
from cms.management.commands.subcommands.list import plugin_report
class DeleteOrphanedPluginsCommand(NoArgsCommand):
help = "Delete plugins from the CMSPlugins table that should have instances but don't, and ones for which a corresponding plugin model can no longer be found"
def handle_noargs(self, **options):
"""
Obtains a plugin report -
cms.management.commands.subcommands.list.plugin_report - and uses it
to delete orphaned plugins from the database, i.e. ones that are no
longer installed, and ones that have no corresponding saved plugin
instances (as will happen if a plugin is inserted into a placeholder,
but not saved).
"""
self.stdout.write(u"Obtaining plugin report\n")
uninstalled_instances = []
unsaved_instances = []
for plugin in plugin_report():
if not plugin["model"]:
for instance in plugin["instances"]:
uninstalled_instances.append(instance)
for instance in plugin["unsaved_instances"]:
unsaved_instances.append(instance)
if options.get('interactive'):
confirm = input("""
You have requested to delete any instances of uninstalled plugins and unsaved plugin instances.
There are %d uninstalled plugins and %d unsaved_plugins.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (len(uninstalled_instances), len(unsaved_instances)))
else:
confirm = 'yes'
if confirm == 'yes':
# delete items whose plugin is uninstalled and items with unsaved instances
self.stdout.write(u"... deleting any instances of uninstalled plugins and unsaved plugin instances\n")
for instance in uninstalled_instances:
instance.delete()
for instance in unsaved_instances:
instance.delete()
self.stdout.write(u"Deleted instances of: \n %s uninstalled plugins \n %s plugins with unsaved instances\n" % (len(uninstalled_instances), len(unsaved_instances)))
self.stdout.write(u"all done\n")
| bsd-3-clause |
siavooshpayandehazad/SoCDep2 | src/main/python/Clusterer/Clustering_Functions.py | 2 | 12178 | # Copyright (C) 2015 Siavoosh Payandeh Azad
import statistics
from Clusterer import Clustering_Reports
from ConfigAndPackages import Config
import random
def remove_task_from_ctg(tg, ctg, task):
"""
Removes a Task from TG from Clustred Task graph
:param tg: Task graph
:param ctg: Clustered Task Graph
:param task: Task ID
:return: None
"""
task_cluster = tg.node[task]['task'].cluster
# print("\tREMOVING TASK:", Task, " FROM CLUSTER:", task_cluster)
for edge in tg.edges():
if task in edge:
weight_to_remove = tg.edges[edge]['ComWeight']
source_cluster = tg.node[edge[0]]['task'].cluster
destination_cluster = tg.node[edge[1]]['task'].cluster
if source_cluster is not None and destination_cluster is not None:
if source_cluster != destination_cluster:
# print("\t\tREMOVING TG EDGE:", edge, "WITH WEIGHT", weight_to_remove, "FROM CLUSTER:", \
# source_cluster, "--->", destination_cluster)
if (source_cluster, destination_cluster) not in ctg.edges():
print("\t\033[31mERROR\033[0m:: EDGE ", source_cluster, "--->",
destination_cluster, "DOESNT EXIST")
Clustering_Reports.report_ctg(ctg, "CTG_Error.png")
raise ValueError("remove_task_from_ctg::EDGE DOESNT EXIST")
else:
if ctg.edges[(source_cluster, destination_cluster)]['Weight'] - weight_to_remove >= 0:
ctg.edges[(source_cluster, destination_cluster)]['Weight'] -= weight_to_remove
if ctg.edges[(source_cluster, destination_cluster)]['Weight'] == 0:
ctg.remove_edge(source_cluster, destination_cluster)
else:
print("\t\033[31mERROR\033[0m::FINAL WEIGHT IS NEGATIVE")
raise ValueError("remove_task_from_ctg::FINAL WEIGHT IS NEGATIVE")
tg.node[task]['task'].cluster = None
ctg.node[task_cluster]['TaskList'].remove(task)
if len(ctg.node[task_cluster]['TaskList']) == 0:
ctg.node[task_cluster]['Criticality'] = 'L'
ctg.node[task_cluster]['Utilization'] -= tg.node[task]['task'].wcet
return None
def add_task_to_ctg(tg, ctg, task, cluster):
"""
Takes a Task from a Task Graph and adds it to a cluster in Cluster graph
by adding related edges etc.
:param tg: Task graph
:param ctg: clustered task graph
:param task: Task to be added
:param cluster: destination cluster fro mapping the Task
:return: True if addition is success, False if otherwise...
"""
# print("\tADDING TASK:", task, " TO CLUSTER:", cluster)
if len(ctg.node[cluster]['TaskList']) == 0:
ctg.node[cluster]['Criticality'] = tg.node[task]['task'].criticality
else:
if Config.EnablePartitioning:
if ctg.node[cluster]['Criticality'] == tg.node[task]['task'].criticality:
pass
else:
return False
ctg.node[cluster]['TaskList'].append(task)
ctg.node[cluster]['Utilization'] += tg.node[task]['task'].wcet
tg.node[task]['task'].cluster = cluster
for edge in tg.edges():
if task in edge:
weight_to_add = tg.edges[edge]['ComWeight']
source_cluster = tg.node[edge[0]]['task'].cluster
destination_cluster = tg.node[edge[1]]['task'].cluster
if source_cluster is not None and destination_cluster is not None:
if source_cluster != destination_cluster:
if (source_cluster, destination_cluster) in ctg.edges():
if Config.clustering.detailed_report:
print("\t\tEDGE", source_cluster, "--->", destination_cluster,
"ALREADY EXISTS... ADDING", weight_to_add, "TO WEIGHT...")
ctg.edges[(source_cluster, destination_cluster)]['Weight'] += weight_to_add
else:
if Config.clustering.detailed_report:
print("\t\tEDGE", source_cluster, destination_cluster,
"DOES NOT EXISTS... ADDING EDGE WITH WEIGHT:",
tg.edges[edge]['ComWeight'])
ctg.add_edge(source_cluster, destination_cluster, Weight=weight_to_add)
return True
def ctg_cost_function(ctg):
"""
This Function is calculating the cost of a solution for clustering optimization algorithm.
:param ctg: The Clustered task graph
:return: Cost
"""
com_weight_list = []
for edge in ctg.edges():
com_weight_list .append(ctg.edges[edge]['Weight'])
cluster_utilization = []
for node in ctg.nodes():
cluster_utilization.append(ctg.node[node]['Utilization'])
total_com_weight = sum(com_weight_list)
max_com_weight = max(com_weight_list)
max_util = max(cluster_utilization)
avg_util = sum(cluster_utilization)/len(cluster_utilization)
if Config.clustering.cost_function == 'SD':
cluster_util_sd = statistics.stdev(cluster_utilization)
com_weight_sd = statistics.stdev(com_weight_list)
cost = cluster_util_sd + com_weight_sd
elif Config.clustering.cost_function == 'SD+MAX':
cluster_util_sd = statistics.stdev(cluster_utilization)
com_weight_sd = statistics.stdev(com_weight_list)
cost = max_com_weight + com_weight_sd + max_util + cluster_util_sd
elif Config.clustering.cost_function == 'MAX':
cost = max_com_weight + max_util
elif Config.clustering.cost_function == 'MAXCOM':
cost = max_com_weight
elif Config.clustering.cost_function == 'AVGUTIL':
cost = avg_util
elif Config.clustering.cost_function == 'SUMCOM':
cost = total_com_weight
else:
raise ValueError("clustering cost function is not valid")
return cost
def clear_clustering(tg, ctg):
"""
Clears a clustering that has been done. by removing the tasks in task-list of clusters,
removing parent cluster of tasks and deleting all the edges in cluster graph.
:param tg: Task Graph
:param ctg: Clustered Task Graph
:return: None
"""
for node in tg.nodes():
tg.node[node]['task'].cluster = None
for cluster in ctg.nodes():
ctg.node[cluster]['TaskList'] = []
ctg.node[cluster]['Utilization'] = 0
ctg.node[cluster]['Criticality'] = None
for edge in ctg.edges():
ctg.remove_edge(edge[0], edge[1])
return None
def ctg_opt_move(tg, ctg, iteration, logging):
"""
Controls the Optimization moves for CTG optimization
:param tg: Task Graph
:param ctg: Clustered Task Graph
:param iteration: Iteration number that this move is happening in it.
:param logging: logging file
:return: None
"""
if Config.clustering.opt_move == 'RandomTaskMove':
random_task_move(tg, ctg, iteration, logging)
elif Config.clustering.opt_move == 'Swap':
task_swap(tg, ctg, iteration, logging)
elif Config.clustering.opt_move == 'Circulate':
task_circulation()
return None
def random_task_move(tg, ctg, iteration, logging):
"""
Randomly chooses one task from CTG and moves it from its cluster to another random cluster
:param tg: Task Graph
:param ctg: Clustered Task Graph
:param logging: logging file
:return: None
"""
random_seed = Config.clustering.random_seed
random.seed(Config.mapping_random_seed)
for i in range(0, iteration):
random_seed = random.randint(1, 100000)
random.seed(random_seed)
logging.info("Moving to next solution: random_seed: "+str(random_seed)+" iteration: "+str(iteration))
random_task = random.choice(tg.nodes())
random_task_cluster = tg.node[random_task]['task'].cluster
# remove it and all its connections from CTG
remove_task_from_ctg(tg, ctg, random_task)
# randomly choose another cluster
# move the task to the cluster and add the connections
random_cluster = random.choice(ctg.nodes())
while not add_task_to_ctg(tg, ctg, random_task, random_cluster):
# remove_task_from_ctg(tg, ctg, random_task)
add_task_to_ctg(tg, ctg, random_task, random_task_cluster)
# double_check_ctg(tg, ctg)
random_task = random.choice(tg.nodes())
random_task_cluster = tg.node[random_task]['task'].cluster
remove_task_from_ctg(tg, ctg, random_task)
random_cluster = random.choice(ctg.nodes())
logging.info("TASK"+str(random_task)+"MOVED TO CLUSTER"+str(random_cluster)+"RESULTS IN UTILIZATION:" +
str(ctg.node[random_cluster]['Utilization']+tg.node[random_task]['task'].wcet))
return None
def task_swap(tg, ctg, iteration, logging):
"""
randomly chooses 2 tasks in CTG and swaps them.
:param tg: Task Graph
:param ctg: Clustered Task Graph
:param logging: logging file
:return: None
"""
random_seed = Config.clustering.random_seed
random.seed(Config.mapping_random_seed)
for i in range(0, iteration):
random_seed = random.randint(1, 100000)
random.seed(random_seed)
logging.info("Moving to next solution: random_seed: "+str(random_seed)+" iteration: "+str(iteration))
random_cluster1 = None
random_cluster2 = None
while random_cluster1 == random_cluster2:
random_cluster1 = random.choice(ctg.nodes())
while len(ctg.node[random_cluster1]['TaskList']) == 0:
random_cluster1 = random.choice(ctg.nodes())
random_cluster2 = random.choice(ctg.nodes())
while len(ctg.node[random_cluster2]['TaskList']) == 0:
random_cluster2 = random.choice(ctg.nodes())
random_task1 = random.choice(ctg.node[random_cluster1]['TaskList'])
random_task2 = random.choice(ctg.node[random_cluster2]['TaskList'])
remove_task_from_ctg(tg, ctg, random_task1)
remove_task_from_ctg(tg, ctg, random_task2)
task1_clustering = add_task_to_ctg(tg, ctg, random_task1, random_cluster2)
task2_clustering = add_task_to_ctg(tg, ctg, random_task2, random_cluster1)
while not (task1_clustering and task2_clustering):
if task1_clustering:
remove_task_from_ctg(tg, ctg, random_task1)
if task2_clustering:
remove_task_from_ctg(tg, ctg, random_task2)
add_task_to_ctg(tg, ctg, random_task1, random_cluster1)
add_task_to_ctg(tg, ctg, random_task2, random_cluster2)
# here we are back to normal...
random_cluster1 = None
random_cluster2 = None
while random_cluster1 == random_cluster2:
random_cluster1 = random.choice(ctg.nodes())
while len(ctg.node[random_cluster1]['TaskList']) == 0:
random_cluster1 = random.choice(ctg.nodes())
random_cluster2 = random.choice(ctg.nodes())
while len(ctg.node[random_cluster2]['TaskList']) == 0:
random_cluster2 = random.choice(ctg.nodes())
random_task1 = random.choice(ctg.node[random_cluster1]['TaskList'])
random_task2 = random.choice(ctg.node[random_cluster2]['TaskList'])
remove_task_from_ctg(tg, ctg, random_task1)
remove_task_from_ctg(tg, ctg, random_task2)
task1_clustering = add_task_to_ctg(tg, ctg, random_task1, random_cluster2)
task2_clustering = add_task_to_ctg(tg, ctg, random_task2, random_cluster1)
logging.info("TASK "+str(random_task1) + " FROM CLUSTER " + str(random_cluster1) + " SWAPPED WITH TASK " +
str(random_task2)+" FROM CLUSTER "+str(random_cluster2))
return None
def task_circulation():
# todo... Circulate N tasks...
return None
def remove_empty_clusters(ctg):
"""
Takes a ctg and deletes the empty clusters
:param ctg: Clustered Task Graph
:return: None
"""
for cluster in ctg.nodes():
if len(ctg.node[cluster]['TaskList']) == 0:
ctg.remove_node(cluster)
return None
| gpl-2.0 |
vwvww/servo | components/script/dom/bindings/codegen/parser/tests/test_identifier_conflict.py | 53 | 1193 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
try:
parser.parse("""
enum Foo { "a" };
interface Foo;
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Name collision" in e.message,
"Should have name collision for interface")
parser = parser.reset()
try:
parser.parse("""
dictionary Foo { long x; };
enum Foo { "a" };
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Name collision" in e.message,
"Should have name collision for dictionary")
parser = parser.reset()
try:
parser.parse("""
enum Foo { "a" };
enum Foo { "b" };
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Multiple unresolvable definitions" in e.message,
"Should have name collision for dictionary")
| mpl-2.0 |
EvenStrangest/tensorflow | tensorflow/contrib/lookup/lookup_ops_test.py | 3 | 11467 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class HashTableOpTest(tf.test.TestCase):
def testHashTable(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.test_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys,
values,
value_dtype=tf.int64),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.test_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
tf.initialize_all_tables().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.test_session():
default_val = tf.constant(-1, tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.test_session() as sess:
default_val = tf.constant(-1, tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = tf.SparseTensor(
tf.constant(sp_indices, tf.int64),
tf.constant(["brain", "salad", "tank"]),
tf.constant(sp_shape, tf.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
input_string = tf.constant([1, 2, 3], tf.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.test_session():
default_val = -1
with self.assertRaises(TypeError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
["a"], [1], [tf.string], tf.int64), default_val)
def testNotInitialized(self):
with self.test_session():
default_val = -1
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
["a"],
[1],
value_dtype=tf.int64),
default_val)
input_string = tf.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
with self.assertRaisesOpError("Table already initialized"):
table.init.run()
def testInitializationWithInvalidDimensions(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2, 3, 4], tf.int64)
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys,
values), default_val)
class StringToIndexTest(tf.test.TestCase):
def test_string_to_index(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
feats = tf.constant(["salad", "surgery", "tarkus"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings)
self.assertRaises(tf.OpError, indices.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
feats = tf.constant(["hello", "hola"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings)
self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
feats = tf.constant(["salad", "surgery", "tarkus"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings,
default_value=default_value)
self.assertRaises(tf.OpError, indices.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTest(tf.test.TestCase):
def test_index_to_string(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([0, 1, 2, 3], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), feats.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
indices = tf.constant([0, 1, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings)
tf.initialize_all_tables().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([1, 2, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings,
default_value=default_value)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
xmission/d-note | venv/lib/python2.7/site.py | 784 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| agpl-3.0 |
grahamawelch/wordclock | svg/makediffuser.py | 1 | 1482 | from xml.etree.ElementTree import QName, ElementTree, Element, SubElement, register_namespace
from sys import stdout, stderr
# (Amt, Size)
RECTS = [
(1, 2.6), # 1
(6, 2.25), # 1
(5, 4.6), # 2
(5, 7), # 3
(7, 9.25), # 4
(3, 12), # 5
(3, 14.5), # 6
(1, 17.25), # 7
]
class SVG(object):
def __getattr__(self, name):
def f(*children, **kwargs):
qname = QName('http://www.w3.org/2000/svg', name)
e = Element(qname, **kwargs)
e.extend(children)
return e
return f
svg = SVG()
def MakeRectangle(x, y, width, height):
rect = svg.rect(
x=str(x),
y=str(y),
width=str(width),
height=str(height),
style='stroke:black;fill:none;stroke-width:.01;'
)
return rect
def BuildDiffuser():
register_namespace('svg', 'http://www.w3.org/2000/svg')
root = svg.svg(
width='30cm',
height='30cm',
viewBox = '0 0 30 30',
version='1.1',
)
x_off = 0.5
y_off = 0.5
y_inc = 2.25
x = 0
y = 0
for rect in RECTS:
amt, size = rect
while(amt > 0):
if (x + x_off + size) > 27:
# Go to the next line if we get too long
x = 0
y += 1
px = x + x_off
py = (y * y_inc) + y_off
rect = MakeRectangle(px, py, size, y_inc)
#print 'Rect %s of %s for size %s, (%s,%s)' % (i, amt, size, px, py)
root.append(rect)
amt -= 1
x += size
tree = ElementTree(root)
tree.write(stdout)
#Run everything
BuildDiffuser()
| mit |
krez13/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
steincastillo/img_utils | auto_canny.py | 1 | 1359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 17:43:30 2016
@author: Stein
"""
#import libraries
import numpy as np
import argparse
import glob
import cv2
def auto_canny(image, sigma=0.33):
#compute the median of a single channel pixel intensity
v = np.median(image)
#apply automatic canny edge detection using the computed medina
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
#return edged image
return edged
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required = True,
help ="path to the dataset of images")
args = vars(ap.parse_args())
#loop over the images
for imagePath in glob.glob(args["images"] + "/*.jpg"):
#load the image, convert to grayscale and blur
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
#apply canny edge detection using wide, tight and auto thresholds
wide = cv2.Canny(blurred, 10, 200)
tight = cv2.Canny(blurred, 225, 250)
auto = auto_canny(blurred)
#show the images
cv2.imshow("Original", image)
cv2.imshow("Edges", np.hstack([wide, tight, auto]))
cv2.waitKey(0) | gpl-3.0 |
abridgett/boto | boto/cloudfront/signers.py | 170 | 2074 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Signer(object):
def __init__(self):
self.id = None
self.key_pair_ids = []
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Self':
self.id = 'Self'
elif name == 'AwsAccountNumber':
self.id = value
elif name == 'KeyPairId':
self.key_pair_ids.append(value)
class ActiveTrustedSigners(list):
def startElement(self, name, attrs, connection):
if name == 'Signer':
s = Signer()
self.append(s)
return s
def endElement(self, name, value, connection):
pass
class TrustedSigners(list):
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Self':
self.append(name)
elif name == 'AwsAccountNumber':
self.append(value)
| mit |
ecreall/nova-ideo | novaideo/views/challenge_management/create_challenge.py | 1 | 1710 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.schema import select
from novaideo.content.processes.challenge_management.behaviors import (
CreateChallenge, CrateAndPublish)
from novaideo.content.challenge import ChallengeSchema, Challenge
from novaideo.content.novaideo_application import NovaIdeoApplication
from novaideo import _
from novaideo.views.core import update_anonymous_schemanode
@view_config(
name='createchallenge',
context=NovaIdeoApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class CreateChallengeView(FormView):
title = _('Create a challenge')
schema = select(ChallengeSchema(factory=Challenge, editable=True, omit=('anonymous',)),
['title',
'description',
'keywords',
'image',
'text',
'is_restricted',
'invited_users',
'deadline',
'attached_files',
'anonymous'])
behaviors = [CrateAndPublish, CreateChallenge, Cancel]
formid = 'formcreatechallenge'
name = 'createchallenge'
css_class = 'panel-transparent'
def before_update(self):
self.schema = update_anonymous_schemanode(
self.request.root, self.schema)
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{CreateChallenge: CreateChallengeView})
| agpl-3.0 |
cliffano/swaggy-jenkins | clients/python-flask/generated/openapi_server/models/input_step_impllinks.py | 1 | 2330 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.link import Link # noqa: F401,E501
from openapi_server import util
class InputStepImpllinks(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _self: Link=None, _class: str=None): # noqa: E501
"""InputStepImpllinks - a model defined in OpenAPI
:param _self: The _self of this InputStepImpllinks. # noqa: E501
:type _self: Link
:param _class: The _class of this InputStepImpllinks. # noqa: E501
:type _class: str
"""
self.openapi_types = {
'_self': Link,
'_class': str
}
self.attribute_map = {
'_self': 'self',
'_class': '_class'
}
self.__self = _self
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'InputStepImpllinks':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The InputStepImpllinks of this InputStepImpllinks. # noqa: E501
:rtype: InputStepImpllinks
"""
return util.deserialize_model(dikt, cls)
@property
def _self(self) -> Link:
"""Gets the _self of this InputStepImpllinks.
:return: The _self of this InputStepImpllinks.
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self: Link):
"""Sets the _self of this InputStepImpllinks.
:param _self: The _self of this InputStepImpllinks.
:type _self: Link
"""
self.__self = _self
@property
def _class(self) -> str:
"""Gets the _class of this InputStepImpllinks.
:return: The _class of this InputStepImpllinks.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this InputStepImpllinks.
:param _class: The _class of this InputStepImpllinks.
:type _class: str
"""
self.__class = _class
| mit |
trabacus-softapps/openerp-8.0-cc | openerp/addons/marketing/__openerp__.py | 55 | 1677 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing',
'version': '1.1',
'depends': ['base', 'base_setup', 'crm'],
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
Menu for Marketing.
===================
Contains the installer for marketing-related modules.
""",
'website': 'http://www.openerp.com',
'data': [
'security/marketing_security.xml',
'security/ir.model.access.csv',
'marketing_view.xml',
'res_config_view.xml',
],
'demo': ['marketing_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/config_marketing.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
codedecde/ImageQA | Src/TheanoModel/Code/san_att_conv_twolayer_theano.py | 1 | 16341 | #!/usr/bin/env python
import pdb
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
import numpy
import numpy as np
from collections import OrderedDict
import cPickle as pickle
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
floatX = config.floatX
def shared_to_cpu(shared_params, params):
for k, v in shared_params.iteritems():
params[k] = v.get_value()
def cpu_to_shared(params, shared_params):
for k, v in params.iteritems():
shared_params[k].set_value(v)
def save_model(filename, options, params, shared_params=None):
if not shared_params == None:
shared_to_cpu(shared_params, params);
model = OrderedDict()
model['options'] = options
model['params'] = params
pickle.dump(model, open(filename, 'w'))
def load_model(filename):
model = pickle.load(open(filename, 'rb'))
options = model['options']
params = model['params']
shared_params = init_shared_params(params)
return options, params, shared_params
# return options, params, shared_params
def ortho_weight(ndim):
"""
Random orthogonal weights, we take
the right matrix in the SVD.
Remember in SVD, u has the same # rows as W
and v has the same # of cols as W. So we
are ensuring that the rows are
orthogonal.
"""
W = numpy.random.randn(ndim, ndim)
u, _, _ = numpy.linalg.svd(W)
return u.astype('float32')
def init_weight(n, d, options):
''' initialize weight matrix
options['init_type'] determines
gaussian or uniform initlizaiton
'''
if options['init_type'] == 'gaussian':
return (numpy.random.randn(n, d).astype(floatX)) * options['std']
elif options['init_type'] == 'uniform':
# [-range, range]
return ((numpy.random.rand(n, d) * 2 - 1) * \
options['range']).astype(floatX)
def init_convweight(w_shape, options):
''' initialize weight matrix of convolutional layer
'''
if options['init_type'] == 'gaussian':
return numpy.random.randn(*w_shape).astype(floatX) * options['std']
elif options['init_type'] == 'uniform':
return ((numpy.random.rand(*w_shape) * 2 - 1) * options['range']).astype(floatX)
layers = {'ff': ('init_fflayer', 'fflayer'),
'lstm': ('init_lstm_layer', 'lstm_layer'),
'lstm_append': (None, 'lstm_append_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# initialize the parmaters
def init_params(options):
''' Initialize all the parameters
'''
params = OrderedDict()
n_words = options['n_words']
n_emb = options['n_emb']
n_dim = options['n_dim']
n_attention = options['n_attention']
n_image_feat = options['n_image_feat']
n_common_feat = options['n_common_feat']
n_output = options['n_output']
# embedding weights
# params['w_emb'] = init_weight(n_words, n_emb, options)
## use the same initialization as BOW
params['w_emb'] = ((numpy.random.rand(n_words, n_emb) * 2 - 1) * 0.5).astype(floatX)
n_filter = 0
if options['use_unigram_conv']:
params = init_fflayer(params, n_emb, options['num_filter_unigram'],
options, prefix='conv_unigram')
n_filter += options['num_filter_unigram']
if options['use_bigram_conv']:
params = init_fflayer(params, 2 * n_emb, options['num_filter_bigram'],
options, prefix='conv_bigram')
n_filter += options['num_filter_bigram']
if options['use_trigram_conv']:
params = init_fflayer(params, 3 * n_emb, options['num_filter_trigram'],
options, prefix='conv_trigram')
n_filter += options['num_filter_trigram']
params = init_fflayer(params, n_image_feat, n_filter, options,
prefix='image_mlp')
# attention model based parameters
params = init_fflayer(params, n_filter, n_attention, options,
prefix='image_att_mlp_1')
params = init_fflayer(params, n_filter, n_attention, options,
prefix='sent_att_mlp_1')
params = init_fflayer(params, n_attention, 1, options,
prefix='combined_att_mlp_1')
params = init_fflayer(params, n_filter, n_attention, options,
prefix='image_att_mlp_2')
params = init_fflayer(params, n_filter, n_attention, options,
prefix='sent_att_mlp_2')
params = init_fflayer(params, n_attention, 1, options,
prefix='combined_att_mlp_2')
for i in range(options['combined_num_mlp']):
if i == 0 and options['combined_num_mlp'] == 1:
params = init_fflayer(params, n_filter, n_output,
options, prefix='combined_mlp_%d'%(i))
elif i == 0 and options['combined_num_mlp'] != 1:
params = init_fflayer(params, n_filter, n_common_feat,
options, prefix='combined_mlp_%d'%(i))
elif i == options['combined_num_mlp'] - 1 :
params = init_fflayer(params, n_common_feat, n_output,
options, prefix='combined_mlp_%d'%(i))
else:
params = init_fflayer(params, n_common_feat, n_common_feat,
options, prefix='combined_mlp_%d'%(i))
return params
def init_shared_params(params):
''' return a shared version of all parameters
'''
shared_params = OrderedDict()
for k, p in params.iteritems():
shared_params[k] = theano.shared(params[k], name = k)
return shared_params
# activation function for ff layer
def tanh(x):
return T.tanh(x)
def relu(x):
return T.maximum(x, np.float32(0.))
def linear(x):
return x
def init_fflayer(params, nin, nout, options, prefix='ff'):
''' initialize ff layer
'''
params[prefix + '_w'] = init_weight(nin, nout, options)
params[prefix + '_b'] = np.zeros(nout, dtype='float32')
return params
def fflayer(shared_params, x, options, prefix='ff', act_func='tanh'):
''' fflayer: multiply weight then add bias
'''
return eval(act_func)(T.dot(x, shared_params[prefix + '_w']) +
shared_params[prefix + '_b'])
def init_convlayer(params, w_shape, options, prefix='conv'):
''' init conv layer
'''
params[prefix + '_w'] = init_convweight(w_shape, options)
params[prefix + '_b'] = np.zeros(w_shape[0]).astype(floatX)
return params
def convlayer(shared_params, x, options, prefix='conv', act_func='tanh'):
return eval(act_func)(conv.conv2d(x, shared_params[prefix + '_w']) +
shared_params[prefix + '_b'].dimshuffle('x', 0, 'x', 'x'))
def maxpool_layer(shared_params, x, maxpool_shape, options):
return downsample.max_pool_2d(x, maxpool_shape, ignore_border=False)
def dropout_layer(x, dropout, trng, drop_ratio=0.5):
''' dropout layer
'''
x_drop = T.switch(dropout,
(x * trng.binomial(x.shape,
p = 1 - drop_ratio,
n = 1,
dtype = x.dtype) \
/ (numpy.float32(1.0) - drop_ratio)),
x)
return x_drop
def similarity_layer(feat, feat_seq):
def _step(x, y):
return T.sum(x*y, axis=1) / (T.sqrt(T.sum(x*x, axis=1) * \
T.sum(y*y, axis=1))
+ np.float(1e-7))
similarity, updates = theano.scan(fn = _step,
sequences = [feat_seq],
outputs_info = None,
non_sequences = [feat],
n_steps = feat_seq.shape[0])
return similarity
def build_model(shared_params, options):
trng = RandomStreams(1234)
drop_ratio = options['drop_ratio']
batch_size = options['batch_size']
n_dim = options['n_dim']
w_emb = shared_params['w_emb']
dropout = theano.shared(numpy.float32(0.))
image_feat = T.ftensor3('image_feat')
# batch_size x T
input_idx = T.imatrix('input_idx')
input_mask = T.matrix('input_mask')
# label is the TRUE label
label = T.ivector('label')
empty_word = theano.shared(value=np.zeros((1, options['n_emb']),
dtype='float32'),
name='empty_word')
w_emb_extend = T.concatenate([empty_word, shared_params['w_emb']],
axis=0)
input_emb = w_emb_extend[input_idx]
# a trick here, set the maxpool_h/w to be large
# maxpool_shape = (options['maxpool_h'], options['maxpool_w'])
# turn those appending words into zeros
# batch_size x T x n_emb
input_emb = input_emb * input_mask[:, :, None]
if options['sent_drop']:
input_emb = dropout_layer(input_emb, dropout, trng, drop_ratio)
if options['use_unigram_conv']:
unigram_conv_feat = fflayer(shared_params, input_emb, options,
prefix='conv_unigram',
act_func=options.get('sent_conv_act', 'tanh'))
unigram_pool_feat = unigram_conv_feat.max(axis=1)
if options['use_bigram_conv']:
idx = T.concatenate([T.arange(input_emb.shape[1])[:-1],
T.arange(input_emb.shape[1])[1:]]).reshape((2, input_emb.shape[1] - 1)).transpose().flatten()
bigram_emb = T.reshape(input_emb[:, idx, :], (input_emb.shape[0],
input_emb.shape[1] - 1,
2 * input_emb.shape[2]))
bigram_conv_feat = fflayer(shared_params, bigram_emb,
options, prefix='conv_bigram',
act_func=options.get('sent_conv_act', 'tanh'))
bigram_pool_feat = bigram_conv_feat.max(axis=1)
if options['use_trigram_conv']:
idx = T.concatenate([T.arange(input_emb.shape[1])[:-2],
T.arange(input_emb.shape[1])[1:-1],
T.arange(input_emb.shape[1])[2:]]).reshape((3, input_emb.shape[1] - 2)).transpose().flatten()
trigram_emb = T.reshape(input_emb[:, idx, :], (input_emb.shape[0],
input_emb.shape[1] - 2,
3 * input_emb.shape[2]))
trigram_conv_feat = fflayer(shared_params, trigram_emb,
options, prefix='conv_trigram',
act_func=options.get('sent_conv_act', 'tanh'))
trigram_pool_feat = trigram_conv_feat.max(axis=1) #
pool_feat = T.concatenate([unigram_pool_feat,
bigram_pool_feat,
trigram_pool_feat], axis=1)
image_feat_down = fflayer(shared_params, image_feat, options,
prefix='image_mlp',
act_func=options.get('image_mlp_act',
'tanh'))
if options.get('use_before_attention_drop', False):
image_feat_down = dropout_layer(image_feat_down, dropout, trng, drop_ratio)
pool_feat = dropout_layer(pool_feat, dropout, trng, drop_ratio)
# attention model begins here
# first layer attention model
image_feat_attention_1 = fflayer(shared_params, image_feat_down, options,
prefix='image_att_mlp_1',
act_func=options.get('image_att_mlp_act',
'tanh'))
pool_feat_attention_1 = fflayer(shared_params, pool_feat, options,
prefix='sent_att_mlp_1',
act_func=options.get('sent_att_mlp_act',
'tanh'))
combined_feat_attention_1 = image_feat_attention_1 + \
pool_feat_attention_1[:, None, :]
if options['use_attention_drop']:
combined_feat_attention_1 = dropout_layer(combined_feat_attention_1,
dropout, trng, drop_ratio)
combined_feat_attention_1 = fflayer(shared_params,
combined_feat_attention_1, options,
prefix='combined_att_mlp_1',
act_func=options.get(
'combined_att_mlp_act',
'tanh'))
prob_attention_1 = T.nnet.softmax(combined_feat_attention_1[:, :, 0])
image_feat_ave_1 = (prob_attention_1[:, :, None] * image_feat_down).sum(axis=1)
combined_hidden_1 = image_feat_ave_1 + pool_feat
# second layer attention model
image_feat_attention_2 = fflayer(shared_params, image_feat_down, options,
prefix='image_att_mlp_2',
act_func=options.get('image_att_mlp_act',
'tanh'))
pool_feat_attention_2 = fflayer(shared_params, combined_hidden_1, options,
prefix='sent_att_mlp_2',
act_func=options.get('sent_att_mlp_act',
'tanh'))
combined_feat_attention_2 = image_feat_attention_2 + \
pool_feat_attention_2[:, None, :]
if options['use_attention_drop']:
combined_feat_attention_2 = dropout_layer(combined_feat_attention_2,
dropout, trng, drop_ratio)
combined_feat_attention_2 = fflayer(shared_params,
combined_feat_attention_2, options,
prefix='combined_att_mlp_2',
act_func=options.get(
'combined_att_mlp_act', 'tanh'))
prob_attention_2 = T.nnet.softmax(combined_feat_attention_2[:, :, 0])
image_feat_ave_2 = (prob_attention_2[:, :, None] * image_feat_down).sum(axis=1)
if options.get('use_final_image_feat_only', False):
combined_hidden = image_feat_ave_2 + pool_feat
else:
combined_hidden = image_feat_ave_2 + combined_hidden_1
for i in range(options['combined_num_mlp']):
if options.get('combined_mlp_drop_%d'%(i), False):
combined_hidden = dropout_layer(combined_hidden, dropout, trng,
drop_ratio)
if i == options['combined_num_mlp'] - 1:
combined_hidden = fflayer(shared_params, combined_hidden, options,
prefix='combined_mlp_%d'%(i),
act_func='linear')
else:
combined_hidden = fflayer(shared_params, combined_hidden, options,
prefix='combined_mlp_%d'%(i),
act_func=options.get('combined_mlp_act_%d'%(i),
'tanh'))
# drop the image output
prob = T.nnet.softmax(combined_hidden)
prob_y = prob[T.arange(prob.shape[0]), label]
pred_label = T.argmax(prob, axis=1)
# sum or mean?
cost = -T.mean(T.log(prob_y))
accu = T.mean(T.eq(pred_label, label))
# return image_feat, input_idx, input_mask, \
# label, dropout, cost, accu
return image_feat, input_idx, input_mask, \
label, dropout, cost, accu, pred_label, \
prob_attention_1, prob_attention_2
# return image_feat, input_idx, input_mask, \
# label, dropout, cost, accu, pred_label, \
# image_feat_down, pool_feat
# for debug
# return image_feat, input_idx, input_mask, label, dropout, cost, accu, \
# input_emb, bigram_emb, trigram_emb, trigram_pool_feat, pool_feat
| mit |
pla93/django-mantis | mantis/menus.py | 2 | 2181 | from menu import Menu, MenuItem
from django.core.urlresolvers import reverse
Menu.add_item( "mantis_main",
MenuItem("List, Filter & Search", "",
weight = 50,
children = (
MenuItem("Info Object List (generic filter)", reverse("url.dingos.list.infoobject.generic"), weight = 40 ),
MenuItem("Info Object List (filter by ID)", reverse("url.dingos.list.infoobject.by_id"), weight = 50 ),
MenuItem("Fact Search (simple)", reverse("url.dingos.search.fact.simple"), weight = 40 ),
MenuItem("Fact Search (unique)", reverse("url.dingos.search.fact.simple.unique"), weight = 40 ),
),
)
)
Menu.add_item( "mantis_main",
MenuItem("Saved Filters/Searches", "",
weight = 50,
children = ()
)
)
def user_name(request):
if request.user.is_authenticated():
return request.user.username
else:
return "Not logged in"
def login_name(request):
if request.user.is_authenticated():
return "Log out"
else:
return "Log in"
Menu.add_item( "mantis_main",
MenuItem(user_name,
"",
weight = 50,
children = (MenuItem("Edit user config", reverse("url.dingos.admin.view.userprefs"), weight = 40 ),
MenuItem("Edit saved searches", reverse("url.dingos.admin.edit.savedsearches"), weight = 40 ),
MenuItem(login_name,
reverse("admin:logout"),
weight = 40,
# Seems that the check functionality of simple menu
# is somehow broken.
#check = lambda request: request.user.is_authenticated())
)
)
)
)
| gpl-2.0 |
kurtrwall/wagtail | wagtail/wagtailembeds/rich_text.py | 9 | 1316 | from __future__ import absolute_import, unicode_literals
from wagtail.wagtailembeds import format
from wagtail.wagtailembeds.exceptions import EmbedException
class MediaEmbedHandler(object):
"""
MediaEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="media". The resulting element in the database
representation will be:
<embed embedtype="media" url="http://vimeo.com/XXXXX">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as a media embed (because it has a
data-embedtype="media" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'url': tag['data-url'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
if for_editor:
try:
return format.embed_to_editor_html(attrs['url'])
except EmbedException:
# Could be replaced with a nice error message
return ''
else:
return format.embed_to_frontend_html(attrs['url'])
| bsd-3-clause |
petewarden/tensorflow | tensorflow/python/kernel_tests/constant_op_test.py | 9 | 38520 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.cached_session():
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testInvalidDType(self):
# Test case for GitHub issue 18474
with self.assertRaises(TypeError):
constant_op.constant(dtypes_lib.string, "[,]")
@test_util.run_deprecated_v1
def testBFloat16(self):
bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(np.empty((2, 0, 5)).astype(bfloat16))
@test_util.run_deprecated_v1
def testHalf(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16))
self._testAll(np.empty((2, 0, 5)).astype(np.float16))
@test_util.run_deprecated_v1
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
@test_util.run_deprecated_v1
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
@test_util.run_deprecated_v1
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
@test_util.run_deprecated_v1
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
@test_util.run_deprecated_v1
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
@test_util.run_deprecated_v1
def testComplex128(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.run_deprecated_v1
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
@test_util.run_deprecated_v1
def testVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const = constant_op.constant(variant_tensor)
const_value = const.op.get_attr("value")
# Ensure we stored the tensor proto properly.
self.assertProtoEquals(variant_tensor, const_value)
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
logging_const_op = logging_ops.Print(
const, [const],
message="Variant storing an int, decoded const value:").op
logging_const_op.run()
@test_util.run_deprecated_v1
def testStringWithNulls(self):
with self.cached_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.cached_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.cached_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
"""Tests PyObject refs are managed correctly when executing eagerly."""
constant_op.constant([[1.]])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testPromotionShapes(self):
with ops.Graph().as_default():
c = constant_op.constant([7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
c = constant_op.constant(3, shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "Too many elements provided."):
constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[5])
with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegex(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegex(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
@test_util.run_deprecated_v1
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegex(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegex(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegex(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
with self.assertRaisesRegex(ValueError,
"a dimension is too large .2147483648."):
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int32)
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
with self.assertRaisesRegex(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegex(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
@test_util.run_deprecated_v1
def testAsTensorForDimensionInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
shape = tensor_shape.TensorShape(None)
if shape._v2_behavior:
with self.assertRaisesRegex(ValueError, "None values not supported"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegex(ValueError, "None values not supported"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
else:
with self.assertRaisesRegex(ValueError, "unknown Dimension"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegex(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
class IdentityOpTest(test.TestCase):
def testIdTensor(self):
with ops.Graph().as_default():
x = constant_op.constant(2.0, shape=[6], name="input")
id_op = array_ops.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }",
id_op.op.node_def)
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.cached_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.cached_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.disable_tfrt("b/169901260")
def testQint8Dtype(self):
dtype = dtypes_lib.qint8
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
# cast to int32 so that it can be compred with numpy
# where [qint|quint][8|16] are not available.
z_value = self.evaluate(math_ops.cast(z, dtypes_lib.int32))
self.assertFalse(np.any(z_value))
@test_util.disable_tfrt("b/169901260")
def testQint16Dtype(self):
dtype = dtypes_lib.qint16
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
# cast to int32 so that it can be compred with numpy
# where [qint|quint][8|16] are not available.
z_value = self.evaluate(math_ops.cast(z, dtypes_lib.int32))
self.assertFalse(np.any(z_value))
@test_util.disable_tfrt("b/169901260")
def testQint32Dtype(self):
dtype = dtypes_lib.qint32
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
# cast to int32 so that it can be compred with numpy
# where [qint|quint][8|16] are not available.
z_value = self.evaluate(math_ops.cast(z, dtypes_lib.int32))
self.assertFalse(np.any(z_value))
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
if fully_defined_shape:
d = constant_op.constant(
np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
else:
d = array_ops.placeholder(dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
if fully_defined_shape:
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
feed_dict = {}
if not fully_defined_shape:
feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
z_value = z_var.eval(feed_dict=feed_dict)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.run_deprecated_v1
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False)
@test_util.run_deprecated_v1
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.complex64,
dtypes_lib.complex128, dtypes_lib.bool
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True)
@test_util.run_deprecated_v1
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
@test_util.run_deprecated_v1
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.cached_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
@test_util.run_deprecated_v1
def testZerosLikeVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported AND we register a
# ZerosLike callback for GPU for Variant storing primitive types
# in variant_op_registry.cc.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const_variant = constant_op.constant(variant_tensor)
zeros_like = array_ops.zeros_like(const_variant)
zeros_like_op = logging_ops.Print(
zeros_like, [const_variant, zeros_like],
message="Variant storing an int, input and output of zeros_like:").op
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
zeros_like_op.run()
class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.cached_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.cached_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testAutoPack(self):
with self.cached_session():
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128,
dtypes_lib.int64, dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z, np.ones([2, 3]))
@test_util.disable_tfrt("b/169901260")
def testQintDtype(self):
@def_function.function(autograph=False)
def f():
return math_ops.cast(
array_ops.ones([2, 3], dtype=dtypes_lib.quint8), dtypes_lib.int32)
value = self.evaluate(f())
self.assertTrue(np.all(value))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32,
dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]:
numpy_dtype = dtype.as_numpy_dtype
with self.cached_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(
np.ones(
(2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = self.evaluate(z_var)
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
@test_util.run_deprecated_v1
def testOnesLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex128(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex128)
self._compareAll([2, 3], np_ans[0][0], np_ans)
@test_util.run_deprecated_v1
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testFillNegative(self):
with self.cached_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
self.evaluate(p_identity)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[], name="p")
p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
@test_util.run_deprecated_v1
def testPartialShapeWhenNotFed(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
# Should trigger an operator error, not a shape error.
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testControlDependency(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.multiply(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
@test_util.run_deprecated_v1
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
@test_util.run_deprecated_v1
def testTensorStr(self):
a = array_ops.placeholder(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
if c.shape._v2_behavior:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, None, 2) dtype=qint32>", repr(c))
else:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
@test_util.run_deprecated_v1
def testOldGraph(self):
# Load graph generated from earlier version of TF where
# placeholder shape was not set.
#
# a = tf.compat.v1.placeholder(tf.float32)
# b = a + 1.0
#
# Older graph's default shape is 'shape {}', not 'shape {
# unknown_rank: true }'
graph = """
node {
name: "Placeholder"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
}
}
}
}
node {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
node {
name: "add"
op: "Add"
input: "Placeholder"
input: "add/y"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
"""
gdef = graph_pb2.GraphDef()
text_format.Merge(graph, gdef)
with self.cached_session():
p, ret = importer.import_graph_def(
gdef, return_elements=["Placeholder:0", "add:0"])
# Feed in a vector of two elements. Since the producer version
# of 21, a shape of {} is interpreted as "any shape". If
# producer version were 22, then we'd get a shape mismatch
# error.
self.assertAllEqual([2.0, 3.0], ret.eval(feed_dict={p: [1.0, 2.0]}))
class PlaceholderWithDefaultTest(test.TestCase):
@test_util.run_deprecated_v1
def testFullShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], self.evaluate(a))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
@test_util.run_deprecated_v1
def testNoShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(force_gpu=test_util.is_gpu_available()):
x = array_ops.placeholder(dtypes_lib.float32, [5, 7])
y = array_ops.placeholder_with_default(x, None)
err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7])
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jymannob/CouchPotatoServer | couchpotato/core/media/__init__.py | 9 | 3065 | import os
import traceback
from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.plugins.base import Plugin
import six
log = CPLog(__name__)
class MediaBase(Plugin):
_type = None
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type
def createOnComplete(self, media_id):
def onComplete():
try:
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return onComplete
def createNotifyFront(self, media_id):
def notifyFront():
try:
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return notifyFront
def getDefaultTitle(self, info, ):
# Set default title
default_title = toUnicode(info.get('title'))
titles = info.get('titles', [])
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
return def_title or 'UNKNOWN'
def getPoster(self, image_urls, existing_files):
image_type = 'poster'
# Remove non-existing files
file_type = 'image_%s' % image_type
# Make existing unique
unique_files = list(set(existing_files.get(file_type, [])))
# Remove files that can't be found
for ef in unique_files:
if not os.path.isfile(ef):
unique_files.remove(ef)
# Replace new files list
existing_files[file_type] = unique_files
if len(existing_files) == 0:
del existing_files[file_type]
# Loop over type
for image in image_urls.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
existing_files[file_type] = [file_path]
break
else:
break
| gpl-3.0 |
y12uc231/edx-platform | common/djangoapps/reverification/tests/test_models.py | 17 | 2964 | """
Tests for Reverification models
"""
from datetime import timedelta, datetime
import pytz
from django.core.exceptions import ValidationError
from reverification.models import MidcourseReverificationWindow
from reverification.tests.factories import MidcourseReverificationWindowFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestMidcourseReverificationWindow(ModuleStoreTestCase):
""" Tests for MidcourseReverificationWindow objects """
def setUp(self, **kwargs):
super(TestMidcourseReverificationWindow, self).setUp()
self.course_id = CourseFactory.create().id
def test_window_open_for_course(self):
# Should return False if no windows exist for a course
self.assertFalse(MidcourseReverificationWindow.window_open_for_course(self.course_id))
# Should return False if a window exists, but it's not in the current timeframe
MidcourseReverificationWindowFactory(
course_id=self.course_id,
start_date=datetime.now(pytz.utc) - timedelta(days=10),
end_date=datetime.now(pytz.utc) - timedelta(days=5)
)
self.assertFalse(MidcourseReverificationWindow.window_open_for_course(self.course_id))
# Should return True if a non-expired window exists
MidcourseReverificationWindowFactory(
course_id=self.course_id,
start_date=datetime.now(pytz.utc) - timedelta(days=3),
end_date=datetime.now(pytz.utc) + timedelta(days=3)
)
self.assertTrue(MidcourseReverificationWindow.window_open_for_course(self.course_id))
def test_get_window(self):
# if no window exists, returns None
self.assertIsNone(MidcourseReverificationWindow.get_window(self.course_id, datetime.now(pytz.utc)))
# we should get the expected window otherwise
window_valid = MidcourseReverificationWindowFactory(
course_id=self.course_id,
start_date=datetime.now(pytz.utc) - timedelta(days=3),
end_date=datetime.now(pytz.utc) + timedelta(days=3)
)
self.assertEquals(
window_valid,
MidcourseReverificationWindow.get_window(self.course_id, datetime.now(pytz.utc))
)
def test_no_overlapping_windows(self):
window_valid = MidcourseReverificationWindow(
course_id=self.course_id,
start_date=datetime.now(pytz.utc) - timedelta(days=3),
end_date=datetime.now(pytz.utc) + timedelta(days=3)
)
window_valid.save()
with self.assertRaises(ValidationError):
window_invalid = MidcourseReverificationWindow(
course_id=self.course_id,
start_date=datetime.now(pytz.utc) - timedelta(days=2),
end_date=datetime.now(pytz.utc) + timedelta(days=4)
)
window_invalid.save()
| agpl-3.0 |
mcgachey/edx-platform | lms/djangoapps/commerce/views.py | 7 | 3654 | """ Commerce views. """
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from verify_student.models import SoftwareSecurePhotoVerification
from shoppingcart.processors.CyberSource2 import is_user_payment_error
from django.utils.translation import ugettext as _
log = logging.getLogger(__name__)
@csrf_exempt
def checkout_cancel(_request):
""" Checkout/payment cancellation view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_cancel.html", context)
@csrf_exempt
def checkout_error(_request):
""" Checkout/payment error view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_error.html", context)
@csrf_exempt
@login_required
def checkout_receipt(request):
""" Receipt view. """
page_title = _('Receipt')
is_payment_complete = True
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
payment_support_link = '<a href=\"mailto:{email}\">{email}</a>'.format(email=payment_support_email)
is_cybersource = all(k in request.POST for k in ('signed_field_names', 'decision', 'reason_code'))
if is_cybersource and request.POST['decision'] != 'ACCEPT':
# Cybersource may redirect users to this view if it couldn't recover
# from an error while capturing payment info.
is_payment_complete = False
page_title = _('Payment Failed')
reason_code = request.POST['reason_code']
# if the problem was with the info submitted by the user, we present more detailed messages.
if is_user_payment_error(reason_code):
error_summary = _("There was a problem with this transaction. You have not been charged.")
error_text = _(
"Make sure your information is correct, or try again with a different card or another form of payment."
)
else:
error_summary = _("A system error occurred while processing your payment. You have not been charged.")
error_text = _("Please wait a few minutes and then try again.")
for_help_text = _("For help, contact {payment_support_link}.").format(payment_support_link=payment_support_link)
else:
# if anything goes wrong rendering the receipt, it indicates a problem fetching order data.
error_summary = _("An error occurred while creating your receipt.")
error_text = None # nothing particularly helpful to say if this happens.
for_help_text = _(
"If your course does not appear on your dashboard, contact {payment_support_link}."
).format(payment_support_link=payment_support_link)
context = {
'page_title': page_title,
'is_payment_complete': is_payment_complete,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'verified': SoftwareSecurePhotoVerification.verification_valid_or_pending(request.user).exists(),
'error_summary': error_summary,
'error_text': error_text,
'for_help_text': for_help_text,
'payment_support_email': payment_support_email,
'username': request.user.username,
'nav_hidden': True,
}
return render_to_response('commerce/checkout_receipt.html', context)
| agpl-3.0 |
DasIch/subrosa | docs/conf.py | 1 | 10164 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Subrosa documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 19 10:00:23 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from subrosa import __version__, __version_info__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Subrosa'
copyright = '2016, Daniel Neuhäuser'
author = 'Daniel Neuhäuser'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(str(part) for part in __version_info__)
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Subrosa v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Subrosadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Subrosa.tex', 'Subrosa Documentation',
'Daniel Neuhäuser', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'subrosa', 'Subrosa Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Subrosa', 'Subrosa Documentation',
author, 'Subrosa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
doctest_global_setup = 'from subrosa import *'
| bsd-3-clause |
yktoo/indicator-sound-switcher | lib/indicator_sound_switcher/stream.py | 1 | 1764 | from gi.repository import GObject
class Stream(GObject.GObject):
"""Base class for sink and source. Call it Stream to be consistent with Gnome Sound Panel."""
def get_is_active(self):
"""is_active: defines whether the associated sink/source is the active (default) one."""
return self._is_active
def set_is_active(self, value: bool):
self._is_active = value
# If activated, also activate the item that corresponds to the active port
if value:
for port in self.ports.values():
if port.is_active or port.is_dummy:
port.is_active = True
break
is_active = GObject.property(type=bool, default=False, getter=get_is_active, setter=set_is_active)
def __init__(self, index: int, name: str, display_name: str, description: str, ports: dict, card_index: int):
"""Constructor."""
GObject.GObject.__init__(self)
self.index = index
self.name = name
self.display_name = display_name
self.description = description
self.ports = ports
self.card_index = card_index
self._is_active = False
# Assign every port's owner_stream
for port in self.ports.values():
port.owner_stream = self
# Activates the specified port by its name
def activate_port_by_name(self, name: str):
for port in self.ports.values():
port.is_active = port.name == name
def get_display_name(self) -> str:
"""Returns display name for the stream."""
return self.display_name or self.description
class Source(Stream):
"""Source class."""
pass
class Sink(Stream):
"""Sink class."""
pass
| gpl-3.0 |
jeremiahyan/lammps | tools/moltemplate/src/nbody_Dihedrals.py | 19 | 2460 | from nbody_graph_search import Ugraph
# This file defines how dihedral interactions are generated by moltemplate.sh
# by default. It can be overridden by supplying your own custom file.
# To find 4-body "dihedral" interactions, we would use this subgraph:
#
# 1st bond connects atoms 0 and 1
# *---*---*---* => 2nd bond connects atoms 1 and 2
# 0 1 2 3 3rd bond connects atoms 2 and 3
#
bond_pattern = Ugraph([(0,1), (1,2), (2,3)])
# (Ugraph atom indices begin at 0, not 1)
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 4 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of a dihedral interaction is a function of the dihedral-angle.
The dihedral-angle is usually defined as the angle between planes formed
by atoms 0,1,2 & 1,2,3. This angle does not change when reversing the
order of the atoms. So it does not make sense to define a separate
dihedral interaction between atoms 0,1,2,3 AS WELL AS between 3,2,1,0.
So we sort the atoms so that the first atom has a lower atomID than the
last atom. (Later we will check to see if we have already defined an
interaction between these 4 atoms. If not then we create a new one.)
"""
# match[0][0:3] contains the ID numbers of the 4 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
# match[1][0:2] contains the ID numbers of the the 3 bonds
bond0 = match[1][0]
bond1 = match[1][1]
bond2 = match[1][2]
if atom0 < atom3:
#return ((atom0, atom1, atom2, atom3), (bond0, bond1, bond2)) same as:
return match
else:
return ((atom3, atom2, atom1, atom0), (bond2, bond1, bond0))
| gpl-2.0 |
eggertec/lextend | configuration/config_manager.py | 1 | 8184 | # Lextend
# Copyright (c) 2014-2015 Egger Enertech <http://www.egger-enertech.ch>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
import os
from xmlsettings import XMLSettings
from lxml import etree
import pyinotify
import logging
import time
class dummy: pass
class EventHandler(pyinotify.ProcessEvent):
""" This class is used by inotify to handle filesystem changes events.
"""
def __init__(self, configManagerInstance):
super(EventHandler, self).__init__()
self.configManagerInstance = configManagerInstance
def process_IN_CLOSE_WRITE(self, event):
""" This is a callback handler. Used to handle filesystem events.
It will check for the config_filename CREATED and MODIFIED events,
and reload the configuration in such cases.
"""
if self.configManagerInstance.config_filename in event.pathname:
self.configManagerInstance.loadfile()
class ConfigManager():
""" This class is used to read, write, reset the global config,
It is used by sonosdoorbell service and by webfrontend.
Configuration is stored in an XML file.
Configuration is autoloaded when a file change is detected.
NOTE: When an exception occurs, the configuration is generally reset
and is saved again to the XML file. A backup is also created.
"""
def __init__(self, config_subdir, config_filename, lextend_ip, logger=None):
""" ConfigManager initializer.
This function will ensure that folder structure is created.
It will load (and save to ensure consistency in case of errors) the XML.
It will then start watching the config_file for changes.
"""
self.lextend_ip = lextend_ip
self.logger = logger or logging.getLogger(__name__)
self.config_filename = None
config_userconfig = os.path.join("/root",
".config", config_subdir, config_filename)
# Make sure config_file exists in the config directory.
try:
if not os.path.exists(config_userconfig):
try:
conf_dir = os.path.dirname(config_userconfig)
os.makedirs(conf_dir)
except:
self.logger.error("Cannot create %s." % conf_dir, exc_info=True)
self.config_filename = config_userconfig
except:
self.logger.error("Could not ensure %s exists." % config_userconfig,
exc_info=True)
# Try to load and save the config file : enforce consistency.
self.loadfile()
self.save()
# Start watching the config file for changes.
try:
self.wm = pyinotify.WatchManager()
mask = pyinotify.IN_CLOSE_WRITE
self.notifier = pyinotify.ThreadedNotifier(self.wm, EventHandler(self))
self.notifier.start()
self.wdd = self.wm.add_watch(os.path.dirname(self.config_filename),
mask,
rec=True)
except:
self.logger.error("Could not start observe on %s" % self.config_filename,
exc_info=True)
def loadfile(self):
""" Load config from the XML file, and reset and save in case of error.
"""
self.logger.info("Loading settings from %s." % self.config_filename)
try:
self.config = XMLSettings(self.config_filename)
except:
self.logger.error("Could not load Config from %s." % self.config_filename,
exc_info=True)
self.reset()
self.save()
self.load()
def load(self):
""" Load settings from the config file.
"""
def load_general():
section = "General"
# Prepare the structures
self.general = dummy()
self.general.lextend = dummy()
self.general.miniserver = dummy()
# read the settings
lextend_ip = "192.168.0.231"
if self.lextend_ip != "":
lextend_ip = self.lextend_ip
self.general.lextend.ip = self.config.get(section + "/Lextend/ip",
lextend_ip)
self.general.lextend.port = self.config.get(section + "/Lextend/port",
"5050")
self.general.miniserver.ip = self.config.get(section + "/Miniserver/ip",
"192.168.0.230")
self.general.miniserver.port = self.config.get(section + "/Miniserver/port",
"5050")
def load_sonos_doorbell():
section = "Services/Sonos_Doorbell"
# Prepare the structures
self.sonos_doorbell = dummy()
tmp = self.config.get(section + "/enable", "True")
self.sonos_doorbell.enable = True if "True" in tmp else False
tmp = self.config.get(section + "/volume_override", "False")
self.sonos_doorbell.volume_override = True if "True" in tmp else False
self.sonos_doorbell.volume = self.config.get(section + "/volume", 50)
self.sonos_doorbell.default_sound = self.config.get(section + "/default_sound", 1)
self.sonos_doorbell.sounds_filelist = []
for i in range(10):
key = section + "/Sounds/sound_%s" % i
self.sonos_doorbell.sounds_filelist.append(self.config.get(key, "default sound"))
self.sonos_doorbell.protocol = self.config.get(section + "/Protocol",
"10!x1")
load_general()
load_sonos_doorbell()
def save(self):
""" Save settings to the config file.
"""
self.logger.info("Saving Config to %s." % self.config_filename)
def put_general():
section = "General"
self.config.put(section + "/version", "1")
self.config.put(section + "/Lextend/ip", self.general.lextend.ip)
self.config.put(section + "/Lextend/port", self.general.lextend.port)
self.config.put(section + "/Miniserver/ip", self.general.miniserver.ip)
self.config.put(section + "/Miniserver/port", self.general.miniserver.port)
def put_sonos_doorbell():
section = "Services/Sonos_Doorbell"
self.config.put(section + "/enable", str(self.sonos_doorbell.enable))
self.config.put(section + "/volume_override",
str(self.sonos_doorbell.volume_override))
self.config.put(section + "/volume", self.sonos_doorbell.volume)
self.config.put(section + "/default_sound", self.sonos_doorbell.default_sound)
for i in range(10):
self.config.put(section + "/Sounds/sound_%s" % i, self.sonos_doorbell.sounds_filelist[i])
self.config.put(section + "/Protocol", self.sonos_doorbell.protocol)
put_general()
put_sonos_doorbell()
try:
self.config.save()
except:
self.logger.error("Could not save settings.", exc_info=True)
# Lazy attempt to solve the bug with using config before it is loaded again;
time.sleep(0.5)
def remove_xml_element(self, element_name):
try:
f = open(self.config_filename, "rw")
tree = etree.parse(f)
f.close()
for element in tree.xpath("//%s" % element_name):
element.getparent().remove(element)
fi = open(self.config_filename, "r+w")
fi.write(etree.tostring(tree))
except:
self.logger.error("While removing %s in %s" % (element_name,
self.config_filename),
exc_info=True)
def reset_service(self, service_name):
self.remove_xml_element(service_name)
self.load()
self.save()
def reset_general(self):
self.reset_service("General")
def reset_sonos_doorbell(self):
self.reset_service("Sonos_Doorbell")
def reset(self):
""" Reset settings and save them to the XML config file.
"""
self.logger.info("Resetting Config to %s" % self.config_filename)
try:
os.rename(self.config_filename, "%s.bak" % self.config_filename)
self.logger.info("Config file backed up to %s.bak" % self.config_filename)
except:
self.logger.warn("reset", exc_info=True)
try:
self.config = XMLSettings(self.config_filename)
except:
self.config = XMLSettings("")
self.logger.warn("reset", exc_info=True)
self.load()
self.save()
| gpl-3.0 |
pfctdayelise/allseasons | tests/test_location.py | 1 | 2668 | import pytest
from libs import location
places = {'london': (51.507351, -0.127758),
'murmansk': (68.958524, 33.08266),
'buenos aires': (-34.603684, -58.381559),
'melbourne': (-37.813628, 144.963058),
'pontianak': (0.0, -109.20),
'mumbai': (19.0760, 72.8777),
'delhi': (28.7041, 77.1025),
'bangalore': (12.9716, 77.5946),
'hyderabad': (17.3850, 78.4867),
}
class TestLocation:
"""This class is just grouping the tests together,
to keep the structure parallel with the source class.
"""
@pytest.mark.europe
@pytest.mark.external
def test_country_uk(self):
lat, lng = places['london']
loc = location.Location(lat, lng)
assert loc.country == 'UK'
@pytest.mark.europe
@pytest.mark.external
def test_country_russia(self):
lat, lng = places['murmansk']
loc = location.Location(lat, lng)
assert loc.country == 'Russian Federation'
@pytest.mark.external
def test_country_argentina(self):
lat, lng = places['buenos aires']
loc = location.Location(lat, lng)
assert loc.country == 'Argentina'
@pytest.mark.oceania
@pytest.mark.external
def test_country_australia(self):
lat, lng = places['melbourne']
loc = location.Location(lat, lng)
assert loc.country == 'Australia'
@pytest.mark.xfail(reason="Not sure why this fails, can you figure out why?")
@pytest.mark.oceania
@pytest.mark.external
def test_country_indonesia(self):
lat, lng = 'pontianak'
loc = location.Location(lat, lng)
assert loc.country == 'Indonesia'
def test_hemisphere_melbourne(self):
lat, lng = places['melbourne']
loc = location.Location(lat, lng)
assert loc.hemisphere == 'northern'
@pytest.mark.parametrize(('placename', 'expected'), [
('london', 'northern'),
('murmansk', 'northern'),
('buenos aires', 'southern'),
('pontianak', 'southern'),
])
def test_hemisphere_bulk(self, placename, expected):
lat, lng = places[placename]
loc = location.Location(lat, lng)
assert loc.hemisphere == expected
def test_hemisphere_errors(self):
"""We don't validate the input, but we expect
lat and lng to be floats rather than eg strings.
It's not exactly intentional, but this test is
documenting that behaviour.
"""
lat = '37°S'
lng = '144°E'
loc = location.Location(lat, lng)
with pytest.raises(TypeError):
_hemisphere = loc.hemisphere
| bsd-3-clause |
kenwang815/KodiPlugins | script.module.youtube.dl/lib/youtube_dl/extractor/clyp.py | 89 | 1742 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
parse_iso8601,
)
class ClypIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)'
_TEST = {
'url': 'https://clyp.it/ojz2wfah',
'md5': '1d4961036c41247ecfdcc439c0cddcbb',
'info_dict': {
'id': 'ojz2wfah',
'ext': 'mp3',
'title': 'Krisson80 - bits wip wip',
'description': '#Krisson80BitsWipWip #chiptune\n#wip',
'duration': 263.21,
'timestamp': 1443515251,
'upload_date': '20150929',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
metadata = self._download_json(
'https://api.clyp.it/%s' % audio_id, audio_id)
formats = []
for secure in ('', 'Secure'):
for ext in ('Ogg', 'Mp3'):
format_id = '%s%s' % (secure, ext)
format_url = metadata.get('%sUrl' % format_id)
if format_url:
formats.append({
'url': format_url,
'format_id': format_id,
'vcodec': 'none',
})
self._sort_formats(formats)
title = metadata['Title']
description = metadata.get('Description')
duration = float_or_none(metadata.get('Duration'))
timestamp = parse_iso8601(metadata.get('DateCreated'))
return {
'id': audio_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
| gpl-2.0 |
yashLadha/coala | coalib/parsing/LineParser.py | 28 | 9973 | import logging
import re
from coala_utils.string_processing.StringConverter import StringConverter
from coala_utils.string_processing import (unescape, convert_to_raw,
position_is_escaped,
unescaped_rstrip)
class LineParser:
def __init__(self,
key_value_delimiters=('=',),
comment_separators=('#',),
key_delimiters=(',', ' '),
section_name_surroundings=None,
section_override_delimiters=('.',),
key_value_append_delimiters=('+=',)):
"""
Creates a new line parser. Please note that no delimiter or separator
may be an "o" or you may encounter undefined behaviour with the
escapes.
:param key_value_delimiters: Delimiters that delimit a key from
a value.
:param comment_separators: Used to initiate a comment.
:param key_delimiters: Delimiters between several keys.
:param section_name_surroundings: Dictionary, e.g. {"[", "]"} means a
section name is surrounded by [].
If None, {"[": "]"} is used as
default.
:param section_override_delimiters: Delimiter for a section override.
E.g. "." would mean that
section.key is a possible key that
puts the key into the section
"section" despite of the current
section.
:param key_value_append_delimiters: Delimiters to separate key and
value in setting arguments where
settings are being appended.
"""
section_name_surroundings = (
{'[': ']'} if section_name_surroundings is None
else section_name_surroundings)
self.key_value_delimiters = key_value_delimiters
self.key_value_append_delimiters = key_value_append_delimiters
self.comment_separators = comment_separators
self.key_delimiters = key_delimiters
self.section_name_surroundings = section_name_surroundings
self.section_override_delimiters = section_override_delimiters
def parse(self, line):
"""
Note that every value in the returned tuple *besides the value* is
unescaped. This is so since the value is meant to be put into a Setting
later thus the escapes may be needed there.
:param line: The line to parse.
:return: section_name (empty string if it's no section name),
[(section_override, key), ...], value, comment
"""
logging.warning('The parse method of LineParser is deprecated and will'
' be removed. Please use `_parse` which has a new '
'return type, a tuple containing 5 values instead of '
'4. Refer to the method documentation for further '
'information.')
section_name, key_tuples, value, _, comment = self._parse(line)
return section_name, key_tuples, value, comment
def _parse(self, line):
"""
Note that every value in the returned tuple *besides the value* is
unescaped. This is so since the value is meant to be put into a Setting
later thus the escapes may be needed there.
:param line: The line to parse.
:return: section_name (empty string if it's no section name),
[(section_override, key), ...], value, to_append (True if
append delimiter is found else False), comment
"""
for separator in self.comment_separators:
if (re.match('[^ ]' + separator, line)
or re.match(separator + '[^ ]', line)):
logging.warning('This comment does not have whitespace' +
' before or after ' + separator + ' in: ' +
repr(line.replace('\n', '')) + '. If you ' +
'didn\'t mean to make a comment, use a ' +
'backslash for escaping.')
line, comment = self.__separate_by_first_occurrence(
line,
self.comment_separators)
comment = unescape(comment)
if line == '':
return '', [], '', False, comment
section_name = unescape(self.__get_section_name(line))
if section_name != '':
return section_name, [], '', False, comment
# Escapes in value might be needed by the bears
append = True
keys, value = self.__extract_keys_and_value(
line, self.key_value_append_delimiters)
if not value:
keys, value = self.__extract_keys_and_value(
line, self.key_value_delimiters, True)
append = False
# Add all the delimiters that stored as tuples
all_delimiters = self.key_value_delimiters
all_delimiters += self.key_value_append_delimiters
all_delimiters += self.key_delimiters
all_delimiters += self.comment_separators
all_delimiters += self.section_override_delimiters
all_delimiters = ''.join(all_delimiters)
# Add all keys and values in section_name_surroundings, which is
# stored as a dict
all_delimiters += ''.join(self.section_name_surroundings.keys())
all_delimiters += ''.join(self.section_name_surroundings.values())
value = convert_to_raw(value, all_delimiters)
key_tuples = []
for key in keys:
key = convert_to_raw(key, all_delimiters)
section, key = self.__separate_by_first_occurrence(
key,
self.section_override_delimiters,
True,
True)
key_tuples.append((unescape(section), unescape(key)))
return '', key_tuples, value, append, comment
@staticmethod
def __separate_by_first_occurrence(string,
delimiters,
strip_delim=False,
return_second_part_nonempty=False):
"""
Separates a string by the first of all given delimiters. Any whitespace
characters will be stripped away from the parts.
:param string: The string to separate.
:param delimiters: The delimiters.
:param strip_delim: Strips the delimiter from the
result if true.
:param return_second_part_nonempty: If no delimiter is found and this
is true the contents of the string
will be returned in the second part
of the tuple instead of the first
one.
:return: (first_part, second_part)
"""
temp_string = string.replace('\\\\', 'oo')
i = temp_string.find('\\')
while i != -1:
temp_string = temp_string[:i] + 'oo' + temp_string[i+2:]
i = temp_string.find('\\', i+2)
delim_pos = len(string)
used_delim = ''
for delim in delimiters:
pos = temp_string.find(delim)
if 0 <= pos < delim_pos:
delim_pos = pos
used_delim = delim
if return_second_part_nonempty and delim_pos == len(string):
return '', string.strip(' \n')
first_part = string[:delim_pos]
second_part = string[delim_pos + (
len(used_delim) if strip_delim else 0):]
if not position_is_escaped(second_part, len(second_part) - 1):
first_part = unescaped_rstrip(first_part)
second_part = unescaped_rstrip(second_part)
return (first_part.lstrip().rstrip('\n'),
second_part.lstrip().rstrip('\n'))
def __get_section_name(self, line):
for begin, end in self.section_name_surroundings.items():
if (line[0:len(begin)] == begin and
line[len(line) - len(end):len(line)] == end):
return line[len(begin):len(line) - len(end)].strip(' \n')
return ''
def __extract_keys_and_value(self,
line,
delimiters,
return_second_part_nonempty=False):
"""
This method extracts the keys and values from the give string by
splitting them based on the delimiters provided.
:param line: The input string.
:param delimiters: A list of delimiters to split the
strings on.
:param return_second_part_nonempty: If no delimiter is found and this
is true the contents of the string
will be returned as value
:return: The parsed keys and values from a
line.
"""
key_part, value = self.__separate_by_first_occurrence(
line,
delimiters,
True,
return_second_part_nonempty)
keys = list(StringConverter(
key_part,
list_delimiters=self.key_delimiters).__iter__(
remove_backslashes=False))
return keys, value
| agpl-3.0 |
girving/tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | 38 | 10762 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end benchmark for batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def batch_norm_op(tensor, mean, variance, beta, gamma, scale):
"""Fused kernel for batch normalization."""
# _batch_norm_with_global_normalization is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
tensor, mean, variance, beta, gamma, 0.001, scale)
# pylint: enable=protected-access
# Note that the naive implementation is much slower:
# batch_norm = (tensor - mean) * tf.rsqrt(variance + 0.001)
# if scale:
# batch_norm *= gamma
# return batch_norm + beta
def batch_norm_py(tensor, mean, variance, beta, gamma, scale):
"""Python implementation of batch normalization."""
return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if
scale else None, 0.001)
def batch_norm_slow(tensor, mean, variance, beta, gamma, scale):
batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
if scale:
batch_norm *= gamma
return batch_norm + beta
def build_graph(device, input_shape, axes, num_layers, mode, scale, train):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
Returns:
An array of tensors to run()
"""
moment_shape = []
keep_dims = mode == "py" or mode == "slow"
if keep_dims:
for axis in range(len(input_shape)):
if axis in axes:
moment_shape.append(1)
else:
moment_shape.append(input_shape[axis])
else:
for axis in range(len(input_shape)):
if axis not in axes:
moment_shape.append(input_shape[axis])
with ops.device("/%s:0" % device):
tensor = variables.Variable(random_ops.truncated_normal(input_shape))
for _ in range(num_layers):
if train:
mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)
else:
mean = array_ops.zeros(moment_shape)
variance = array_ops.ones(moment_shape)
beta = variables.Variable(array_ops.zeros(moment_shape))
gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))
if mode == "py":
tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)
elif mode == "op":
tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)
elif mode == "slow":
tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)
if train:
return gradients_impl.gradients([tensor], variables.trainable_variables())
else:
return [tensor]
def print_difference(mode, t1, t2):
"""Print the difference in timing between two runs."""
difference = (t2 - t1) / t1 * 100.0
print("=== %s: %.1f%% ===" % (mode, difference))
class BatchNormBenchmark(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
iModels/mbuild | mbuild/tests/test_packing.py | 2 | 13909 | import os
import numpy as np
import pytest
import mbuild as mb
from mbuild import Box
from mbuild.exceptions import MBuildError
from mbuild.tests.base_test import BaseTest
class TestPacking(BaseTest):
def test_fill_box(self, h2o):
filled = mb.fill_box(h2o, n_compounds=50, box=Box([2, 2, 2]))
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
assert np.array_equal(filled.box.lengths, [2, 2, 2])
assert np.array_equal(filled.box.angles, (90, 90, 90))
def test_fill_box_density_box(self, h2o):
filled = mb.fill_box(h2o, n_compounds=100, density=100)
assert np.all(
np.isclose(filled.box.lengths, np.ones(3) * 3.104281669169261)
)
def test_fill_box_aspect_ratio(self, h2o):
filled = mb.fill_box(
h2o, n_compounds=1000, density=1000, aspect_ratio=[1, 2, 1]
)
assert np.isclose(filled.box.lengths[0] / filled.box.lengths[1], 0.5)
assert np.isclose(filled.box.lengths[1] / filled.box.lengths[2], 2)
def test_fill_box_density_n_compounds(self, h2o):
filled = mb.fill_box(
h2o, density=100, box=Box([3.1042931, 3.1042931, 3.1042931])
)
assert filled.n_particles == 300
def test_fill_box_compound_ratio(self, h2o, ethane):
filled = mb.fill_box(
compound=[h2o, ethane],
density=800,
compound_ratio=[2, 1],
box=Box([2, 2, 2]),
)
n_ethane = len([c for c in filled.children if c.name == "Ethane"])
n_water = len([c for c in filled.children if c.name == "H2O"])
assert n_water / n_ethane == 2
def test_fill_sphere(self, h2o):
filled = mb.fill_sphere(h2o, sphere=[3, 3, 3, 1.5], n_compounds=50)
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
center = np.array([3.0, 3.0, 3.0])
assert np.alltrue(np.linalg.norm(filled.xyz - center, axis=1) < 1.5)
def test_fill_sphere_density(self, h2o):
filled = mb.fill_sphere(h2o, sphere=[3, 3, 3, 1.5], density=1000)
assert filled.n_particles == 921
def test_fill_sphere_compound_ratio(self, h2o, ethane):
filled = mb.fill_sphere(
compound=[h2o, ethane],
sphere=[3, 3, 3, 1.5],
density=800,
compound_ratio=[2, 1],
)
n_ethane = len([c for c in filled.children if c.name == "Ethane"])
n_water = len([c for c in filled.children if c.name == "H2O"])
assert n_water / n_ethane == 2
def test_fill_sphere_bad_args(self, h2o, ethane):
with pytest.raises(ValueError):
mb.fill_sphere(compound=h2o, sphere=[4, 4, 4, 1])
with pytest.raises(ValueError):
mb.fill_sphere(
compound=h2o, n_compounds=100, density=100, sphere=[4, 4, 4, 1]
)
with pytest.raises(TypeError):
mb.fill_sphere(compound=h2o, density=1000, sphere="yes")
with pytest.raises(ValueError):
mb.fill_sphere(
compound=[h2o, ethane], n_compounds=1000, sphere=[1, 1, 1, 4]
)
with pytest.raises(ValueError):
mb.fill_sphere(
compound=h2o, n_compounds=[10, 10], sphere=[1, 1, 1, 4]
)
with pytest.raises(ValueError):
mb.fill_sphere(compound=h2o, n_compounds=100, sphere=[1, 1, 1, 4])
def test_fill_region(self, h2o):
filled = mb.fill_region(
h2o,
n_compounds=50,
region=Box(lengths=[2, 3, 3], angles=[90.0, 90.0, 90.0]),
bounds=[[3, 2, 2, 5, 5, 5]],
)
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
assert np.min(filled.xyz[:, 0]) >= 3
assert np.min(filled.xyz[:, 1]) >= 2
assert np.min(filled.xyz[:, 2]) >= 2
assert np.max(filled.xyz[:, 0]) <= 5
assert np.max(filled.xyz[:, 1]) <= 5
assert np.max(filled.xyz[:, 2]) <= 5
def test_fill_region_box(self, h2o):
mybox = Box(lengths=[4, 4, 4], angles=[90.0, 90.0, 90.0])
filled = mb.fill_region(
h2o, n_compounds=50, region=mybox, bounds=[[0, 0, 0, 4, 4, 4]]
)
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
assert np.min(filled.xyz[:, 0]) >= 0
assert np.max(filled.xyz[:, 2]) <= 4
def test_fill_region_multiple(self, ethane, h2o):
box1 = mb.Box(lengths=[2, 2, 2], angles=[90.0, 90.0, 90.0])
box2 = mb.Box(lengths=[2, 2, 2], angles=[90.0, 90.0, 90.0])
filled = mb.fill_region(
compound=[ethane, h2o],
n_compounds=[2, 2],
region=[box1, box2],
bounds=[[2, 2, 2, 4, 4, 4], [4, 2, 2, 6, 4, 4]],
)
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_region_incorrect_type(self, ethane):
box1 = {"a": 1}
with pytest.raises(ValueError, match=r"expected a list of type:"):
mb.fill_region(
compound=[ethane], n_compounds=[2], region=box1, bounds=None
)
def test_box_no_bound(self, ethane):
box1 = Box(lengths=[2, 2, 2], angles=[90.0, 90.0, 90.0])
mb.fill_region(
compound=[ethane], n_compounds=[2], region=box1, bounds=None
)
def test_fill_region_multiple_bounds(self, ethane, h2o):
box1 = Box.from_mins_maxs_angles(
mins=[2, 2, 2], maxs=[4, 4, 4], angles=[90.0, 90.0, 90.0]
)
box2 = mb.Box.from_mins_maxs_angles(
mins=[4, 2, 2], maxs=[6, 4, 4], angles=[90.0, 90.0, 90.0]
)
filled = mb.fill_region(
compound=[ethane, h2o],
n_compounds=[2, 2],
region=[box1, box2],
bounds=[[2, 2, 2, 4, 4, 4], [4, 2, 2, 6, 4, 4]],
)
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_region_multiple_types(self, ethane, h2o):
box1 = mb.Box.from_mins_maxs_angles(
mins=[2, 2, 2], maxs=[4, 4, 4], angles=[90.0, 90.0, 90.0]
)
box2 = [4, 2, 2, 6, 4, 4]
filled = mb.fill_region(
compound=[ethane, h2o],
n_compounds=[2, 2],
region=[box1, box2],
bounds=[[2, 2, 2, 4, 4, 4], box2],
)
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_box_multiple(self, ethane, h2o):
n_solvent = 100
filled = mb.fill_box([ethane, h2o], [1, 100], box=[4, 4, 4])
assert filled.n_particles == 8 + n_solvent * 3
assert filled.n_bonds == 7 + n_solvent * 2
assert len(filled.children) == 101
def test_solvate(self, ethane, h2o):
n_solvent = 100
solvated = mb.solvate(ethane, h2o, n_solvent=n_solvent, box=[4, 4, 4])
assert solvated.n_particles == 8 + n_solvent * 3
assert solvated.n_bonds == 7 + n_solvent * 2
def test_solvate_multiple(self, methane, ethane, h2o):
init_box = mb.fill_box(methane, 2, box=[4, 4, 4])
solvated = mb.solvate(init_box, [ethane, h2o], [20, 20], box=[4, 4, 4])
assert solvated.n_particles == 2 * 5 + 20 * 8 + 20 * 3
assert len(solvated.children) == 41
def test_fill_box_seed(self, ethane):
filled = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2])
filled_same = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2])
filled_diff = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2], seed=2)
assert np.array_equal(filled.xyz, filled_same.xyz)
assert not np.array_equal(filled.xyz, filled_diff.xyz)
def test_wrong_box(self, h2o):
with pytest.raises(MBuildError):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2])
with pytest.raises(MBuildError):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2, 2])
def test_bad_args(self, h2o):
with pytest.raises(ValueError):
mb.fill_box(h2o, n_compounds=10)
with pytest.raises(ValueError):
mb.fill_box(h2o, density=1000)
with pytest.raises(ValueError):
mb.fill_box(h2o, box=[2, 2, 2])
with pytest.raises(ValueError):
mb.fill_box(h2o, n_compounds=10, density=1000, box=[2, 2, 2])
with pytest.raises(ValueError):
mb.fill_box(compound=[h2o, h2o], n_compounds=[10], density=1000)
with pytest.raises(ValueError):
mb.solvate(
solute=h2o, solvent=[h2o], n_solvent=[10, 10], box=[2, 2, 2]
)
with pytest.raises(ValueError):
mb.fill_region(h2o, n_compounds=[10, 10], region=[2, 2, 2, 4, 4, 4])
with pytest.raises(ValueError):
mb.fill_box(
compound=[h2o, h2o],
n_compounds=[10],
density=1000,
fix_orientation=[True, True, True],
)
def test_write_temp_file(self, h2o):
cwd = os.getcwd() # Must keep track of the temp dir that pytest creates
filled = mb.fill_box(
h2o, n_compounds=10, box=Box([4, 4, 4]), temp_file="temp_file1.pdb"
)
region = mb.fill_region(
h2o,
10,
[[2, 2, 2, 4, 4, 4]],
temp_file="temp_file2.pdb",
bounds=[[2, 2, 2, 4, 4, 4]],
)
solvated = mb.solvate(
filled, h2o, 10, box=[4, 4, 4], temp_file="temp_file3.pdb"
)
assert os.path.isfile(os.path.join(cwd, "temp_file1.pdb"))
assert os.path.isfile(os.path.join(cwd, "temp_file2.pdb"))
assert os.path.isfile(os.path.join(cwd, "temp_file3.pdb"))
def test_packmol_error(self, h2o):
with pytest.raises(MBuildError, match=r"co\-linear"):
mb.fill_box(h2o, n_compounds=10, box=[0, 0, 0])
def test_packmol_warning(self, h2o):
with pytest.warns(UserWarning):
mb.fill_box(h2o, n_compounds=10, box=[1, 1, 1], overlap=10)
def test_rotate(self, h2o):
filled = mb.fill_box(h2o, 2, box=[1, 1, 1], fix_orientation=True)
w0 = filled.xyz[:3]
w1 = filled.xyz[3:]
# Translate w0 and w1 to COM
w0 -= w0.sum(0) / len(w0)
w1 -= w1.sum(0) / len(w1)
assert np.isclose(w0, w1).all()
def test_no_rotate(self, h2o):
filled = mb.fill_box(
[h2o, h2o], [1, 1], box=[1, 1, 1], fix_orientation=[False, True]
)
w0 = filled.xyz[:3]
w1 = filled.xyz[3:]
# Translate w0 and w1 to COM
w0 -= w0.sum(0) / len(w0)
w1 -= w1.sum(0) / len(w1)
assert np.isclose(w0, w1).all() is not True
def test_remove_port(self):
from mbuild.lib.recipes import Alkane
butane = Alkane(n=4)
butane.remove(butane[-1])
box = mb.fill_box(butane, n_compounds=10, density=1)
def test_sidemax(self):
from mbuild.lib.molecules import Methane
ch4 = Methane()
# With default sidemax
box_of_methane = mb.fill_box(
ch4, box=[1000, 1000, 1000], n_compounds=500
)
sphere_of_methane = mb.fill_sphere(
ch4, sphere=[1000, 1000, 1000, 1000], n_compounds=500
)
assert all(
np.asarray(box_of_methane.get_boundingbox().lengths)
< [110, 110, 110]
)
assert all(
np.asarray(sphere_of_methane.get_boundingbox().lengths)
< [210, 210, 210]
)
# With adjusted sidemax
big_box_of_methane = mb.fill_box(
ch4, box=[1000, 1000, 1000], n_compounds=500, sidemax=1000.0
)
big_sphere_of_methane = mb.fill_sphere(
ch4,
sphere=[1000, 1000, 1000, 1000],
n_compounds=500,
sidemax=2000.0,
)
assert all(
np.asarray(big_box_of_methane.get_boundingbox().lengths)
> [900, 900, 900]
)
assert all(
np.asarray(big_sphere_of_methane.get_boundingbox().lengths)
> [1800, 1800, 1800]
)
def test_box_edge(self, h2o, methane):
system_box = mb.Box(lengths=(1.8, 1.8, 1.8))
packed = mb.fill_box(
compound=h2o, n_compounds=100, box=system_box, edge=0.2
)
edge_sizes = np.subtract(
system_box.lengths, packed.get_boundingbox().lengths
)
assert np.allclose(edge_sizes, np.array([0.4] * 3), atol=0.1)
region = mb.fill_region(
compound=h2o,
n_compounds=100,
region=system_box,
edge=0.2,
bounds=[system_box],
)
edge_sizes = np.subtract(
system_box.lengths, packed.get_boundingbox().lengths
)
assert np.allclose(edge_sizes, np.array([0.4] * 3), atol=0.1)
edge = 0.2
bounds = [2, 2, 2, 1]
sphere = mb.fill_sphere(
compound=h2o, n_compounds=100, sphere=bounds, edge=edge
)
target_diameter = (bounds[3] - edge) * 2
assert np.allclose(
sphere.maxs - sphere.mins, np.array([target_diameter] * 3), atol=0.1
)
solvated = mb.solvate(
solvent=h2o,
solute=methane,
n_solvent=100,
box=system_box,
overlap=0.2,
)
edge_sizes = np.subtract(
system_box.lengths, solvated.get_boundingbox().lengths
)
assert np.allclose(edge_sizes, np.array([0.4] * 3), atol=0.1)
| mit |
vrv/tensorflow | tensorflow/contrib/ffmpeg/__init__.py | 82 | 1251 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Working with audio using FFmpeg.
See the @{$python/contrib.ffmpeg} guide.
@@decode_audio
@@encode_audio
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ffmpeg_ops import decode_audio
from tensorflow.contrib.ffmpeg.ffmpeg_ops import encode_audio
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['decode_audio', 'encode_audio']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
vishwaprakashmishra/xmatrix | vumi/dispatchers/tests/utils.py | 3 | 2173 | from twisted.internet.defer import inlineCallbacks
from vumi.tests.utils import VumiWorkerTestCase, PersistenceMixin
# For backcompat
from .helpers import DummyDispatcher
DummyDispatcher # To keep pyflakes happy.
class DispatcherTestCase(VumiWorkerTestCase, PersistenceMixin):
"""
This is a base class for testing dispatcher workers.
"""
transport_name = None
dispatcher_name = "sphex_dispatcher"
dispatcher_class = None
def setUp(self):
self._persist_setUp()
super(DispatcherTestCase, self).setUp()
@inlineCallbacks
def tearDown(self):
yield super(DispatcherTestCase, self).tearDown()
yield self._persist_tearDown()
def get_dispatcher(self, config, cls=None, start=True):
"""
Get an instance of a dispatcher class.
:param config: Config dict.
:param cls: The Dispatcher class to instantiate.
Defaults to :attr:`dispatcher_class`
:param start: True to start the displatcher (default), False otherwise.
Some default config values are helpfully provided in the
interests of reducing boilerplate:
* ``dispatcher_name`` defaults to :attr:`self.dispatcher_name`
"""
if cls is None:
cls = self.dispatcher_class
config = self.mk_config(config)
config.setdefault('dispatcher_name', self.dispatcher_name)
return self.get_worker(config, cls, start)
def rkey(self, name):
# We don't want the default behaviour for dispatchers.
return name
def get_dispatched_messages(self, transport_name, direction='outbound'):
return self._get_dispatched(
'%s.%s' % (transport_name, direction))
def wait_for_dispatched_messages(self, transport_name, amount,
direction='outbound'):
return self._wait_for_dispatched(
'%s.%s' % (transport_name, direction), amount)
def dispatch(self, message, transport_name, direction='inbound',
exchange='vumi'):
return self._dispatch(
message, '%s.%s' % (transport_name, direction), exchange)
| bsd-3-clause |
retomerz/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/tests/__init__.py | 231 | 1092 | from django.contrib.auth.tests.auth_backends import (BackendTest,
RowlevelBackendTest, AnonymousUserBackendTest, NoAnonymousUserBackendTest,
NoBackendsTest, InActiveUserBackendTest, NoInActiveUserBackendTest)
from django.contrib.auth.tests.basic import BasicTestCase
from django.contrib.auth.tests.decorators import LoginRequiredTestCase
from django.contrib.auth.tests.forms import (UserCreationFormTest,
AuthenticationFormTest, SetPasswordFormTest, PasswordChangeFormTest,
UserChangeFormTest, PasswordResetFormTest)
from django.contrib.auth.tests.remote_user import (RemoteUserTest,
RemoteUserNoCreateTest, RemoteUserCustomTest)
from django.contrib.auth.tests.models import ProfileTestCase
from django.contrib.auth.tests.signals import SignalTestCase
from django.contrib.auth.tests.tokens import TokenGeneratorTest
from django.contrib.auth.tests.views import (PasswordResetTest,
ChangePasswordTest, LoginTest, LogoutTest, LoginURLSettings)
from django.contrib.auth.tests.permissions import TestAuthPermissions
# The password for the fixture data users is 'password'
| apache-2.0 |
aleh-arol/tcollector | collectors/0/haproxy.py | 7 | 3715 | #!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2013 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
# Script uses UNIX socket opened by haproxy, you need to setup one with
# "stats socket" config parameter.
#
# You need to ensure that "stats timeout" (socket timeout) is big
# enough to work well with collector COLLECTION_INTERVAL constant.
# The default timeout on the "stats socket" is set to 10 seconds!
#
# See haproxy documentation for details:
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
# section 3.1. Process management and security.
"""HAproxy collector """
import os
import socket
import sys
import time
import stat
import subprocess
from collectors.lib import utils
COLLECTION_INTERVAL = 15
def haproxy_pid():
"""Finds out the pid of haproxy process"""
try:
pid = subprocess.check_output(["pidof", "haproxy"])
except subprocess.CalledProcessError:
return None
return pid.rstrip()
def find_conf_file(pid):
"""Returns the conf file of haproxy."""
try:
output = subprocess.check_output(["ps", "--no-headers", "-o", "cmd", pid])
except subprocess.CalledProcessError, e:
utils.err("HAProxy (pid %s) went away? %s" % (pid, e))
return None
return output.split("-f")[1].split()[0]
def find_sock_file(conf_file):
"""Returns the unix socket file of haproxy."""
try:
fd = open(conf_file)
except IOError, e:
utils.err("Error: %s. Config file path is relative: %s" % (e, conf_file))
return None
try:
for line in fd:
if line.lstrip(" \t").startswith("stats socket"):
sock_file = line.split()[2]
if utils.is_sockfile(sock_file):
return sock_file
finally:
fd.close()
def collect_stats(sock):
"""Collects stats from haproxy unix domain socket"""
sock.send("show stat\n")
stats = sock.recv(10240)
ts = time.time()
for line in stats.split("\n"):
var = line.split(",")
if var[0]:
# skip ready for next command value "> "
if var[0] == "> ":
continue
if var[1] in ("svname", "BACKEND", "FRONTEND"):
continue
print ("haproxy.current_sessions %i %s server=%s cluster=%s"
% (ts, var[4], var[1], var[0]))
print ("haproxy.session_rate %i %s server=%s cluster=%s"
% (ts, var[33], var[1], var[0]))
def main():
pid = haproxy_pid()
if not pid:
utils.err("Error: HAProxy is not running")
return 13 # Ask tcollector to not respawn us.
conf_file = find_conf_file(pid)
if not conf_file:
return 13
sock_file = find_sock_file(conf_file)
if sock_file is None:
utils.err("Error: HAProxy is not listening on any unix domain socket")
return 13
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(sock_file)
# put haproxy to interactive mode, otherwise haproxy closes
# connection after first command.
# See haproxy documentation section 9.2. Unix Socket commands.
sock.send("prompt\n")
while True:
collect_stats(sock)
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 |
jgeskens/django | django/conf/locale/fr/formats.py | 118 | 1448 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
cjastram/silverbot | archives/lib/trader.py | 1 | 8331 |
import sys
from ib.ext.Contract import Contract
from ib.ext.EWrapper import EWrapper
from ib.ext.EClientSocket import EClientSocket
from ib.ext.ExecutionFilter import ExecutionFilter
def showmessage(message, mapping):
try:
del(mapping['self'])
except (KeyError, ):
pass
items = mapping.items()
items.sort()
print '### %s' % (message, )
for k, v in items:
print ' %s:%s' % (k, v)
#def gen_tick_id():
#i = randint(100, 10000)
#while True:
#yield i
#i += 1
#gen_tick_id = gen_tick_id().next
class Wrapper(EWrapper):
orders = None
order_ids = [0]
parameters = None
connection = None
def __init__(self, parameters):
# Variable initialization
#self.orders = orders.OrderBook()
#self.price_log = data.PriceLog()
#self.parameters = parameters
self.connection = EClientSocket(self)
self.connection.eConnect('localhost', 7496, 0) # host, port, clientId
tick_id = 1
symbol = "SLV"
contract = self.makeContract(symbol)
self.connection.reqMktData(tick_id, contract, [], False)
def makeContract(self, symbol):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = 'STK'
contract.m_exchange = 'SMART'
contract.m_primaryExch = 'SMART'
contract.m_currency = 'USD'
contract.m_localSymbol = symbol
return contract
def tickPrice(self, tickerId, field, price, canAutoExecute):
#showmessage('tickPrice', vars())
# 1 = bid
# 2 = ask
# 4 = last
# 6 = high
# 7 = low
# 9 = close
priceLog = {}
side = ""
if field == 2:
print "a%0.2f " % price
elif field == 1:
print "b%0.2f " % price
if side != "":
print side, price
def openOrder(self, orderId, contract, order, state):
orderId = order.m_orderId
symbol = contract.m_symbol
qty = order.m_totalQuantity
price = order.m_lmtPrice
action = order.m_action
self.orders.add(orderId, symbol, qty, price, action)
order = [orderId, symbol, qty, price, action]
print "--> Open order:%s Status:%s Warning:%s" % (order, state.m_status, state.m_warningText)
def error(self, id=None, errorCode=None, errorMsg=None):
if errorCode == 2104:
print "--> %s" % errorMsg
else:
showmessage('error', vars())
def nextValidId(self, orderId):
self.order_ids.append(orderId)
def connectionClosed(self):
print "--> Connection closed, exiting..."
sys.exit(0)
def tickSize(self, tickerId, field, size): pass #showmessage('tickSize', vars())
def tickGeneric(self, tickerId, tickType, value): pass #showmessage('tickGeneric', vars())
def tickString(self, tickerId, tickType, value): pass #showmessage('tickString', vars())
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, impliedFuture, holdDays, futureExpiry, dividendImpact, dividendsToExpiry): showmessage('tickEFP', vars())
def tickOptionComputation(self, tickerId, field, impliedVolatility, delta): showmessage('tickOptionComputation', vars())
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeId): pass #showmessage('orderStatus', vars())
def openOrderEnd(self): showmessage('openOrderEnd', vars())
def updateAccountValue(self, key, value, currency, accountName): showmessage('updateAccountValue', vars())
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName): showmessage('updatePortfolio', vars())
def updateAccountTime(self, timeStamp): showmessage('updateAccountTime', vars())
def accountDownloadEnd(self, accountName): showmessage('accountDownloadEnd', vars())
def contractDetails(self, contractDetails): showmessage('contractDetails', vars())
def bondContractDetails(self, contractDetails): showmessage('bondContractDetails', vars())
def contractDetailsEnd(self, reqId): showmessage('contractDetailsEnd', vars())
def execDetails(self, orderId, contract, execution): showmessage('execDetails', vars())
def execDetailsEnd(self, reqId): showmessage('execDetailsEnd', vars())
def error_0(self, strval): showmessage('error_0', vars())
def error_1(self, strval): showmessage('error_1', vars())
def updateMktDepth(self, tickerId, position, operation, side, price, size): showmessage('updateMktDepth', vars())
def updateMktDepthL2(self, tickerId, position, marketMaker, operation, side, price, size): showmessage('updateMktDepthL2', vars())
def updateNewsBulletin(self, msgId, msgType, message, origExchange): showmessage('updateNewsBulletin', vars())
def managedAccounts(self, accountsList): pass #showmessage('managedAccounts', vars())
def receiveFA(self, faDataType, xml): showmessage('receiveFA', vars())
def historicalData(self, reqId, date, open, high, low, close, volume, count, WAP, hasGaps): showmessage('historicalData', vars())
def scannerParameters(self, xml): showmessage('scannerParameters', vars())
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection): showmessage('scannerData', vars())
def scannerDataEnd(self, reqId): showmessage('scannerDataEnd', vars())
def realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count): showmessage('realtimeBar', vars())
def currentTime(self, time): showmessage('currentTime', vars())
def fundamentalData(self, reqId, data): showmessage('fundamentalData', vars())
def deltaNeutralValidation(self, reqId, underComp): showmessage('deltaNeutralValidation', vars())
def tickSnapshotEnd(self, reqId): showmessage('tickSnapshotEnd', vars())
def marketDataType(self, reqId, marketDataType): showmessage('marketDataType', vars())
def commissionReport(self, commissionReport): showmessage('commissionReport', vars())
#class App:
#parameters = None
#def __init__(self, host='localhost', port=7496, clientId=0):
#self.host = host
#self.port = port
#self.clientId = clientId
##self.parameters = settings.TradeParameters()
#self.wrapper = Wrapper(self.parameters)
#self.connection = EClientSocket(self.wrapper)
#
#def eConnect(self):
#self.connection.eConnect(self.host, self.port, self.clientId)
#
#def reqAccountUpdates(self):
#self.connection.reqAccountUpdates(1, '')
#
#def reqOpenOrders(self):
#self.connection.reqOpenOrders()
#
#def reqExecutions(self):
#filt = ExecutionFilter()
#self.connection.reqExecutions(filt)
##def reqIds(self):
##self.connection.reqIds(10)
##def reqNewsBulletins(self):
##self.connection.reqNewsBulletins(1)
##def cancelNewsBulletins(self):
##self.connection.cancelNewsBulletins()
##def setServerLogLevel(self):
##self.connection.setServerLogLevel(3)
##def reqAutoOpenOrders(self):
##self.connection.reqAutoOpenOrders(1)
##def reqAllOpenOrders(self):
##self.connection.reqAllOpenOrders()
##def reqManagedAccts(self):
##self.connection.reqManagedAccts()
##def requestFA(self):
##self.connection.requestFA(1)
##def reqMktData(self):
##tick_id = 1
##symbol = "SLV"
##contract = self.wrapper.makeContract(symbol)
##self.connection.reqMktData(tick_id, contract, [], False)
##def reqHistoricalData(self):
##contract = Contract()
##contract.m_symbol = 'QQQQ'
##contract.m_secType = 'STK'
##contract.m_exchange = 'SMART'
##endtime = strftime('%Y%m%d %H:%M:%S')
##self.connection.reqHistoricalData(
##tickerId=1,
##contract=contract,
##endDateTime=endtime,
##durationStr='1 D',
##barSizeSetting='1 min',
##whatToShow='TRADES',
##useRTH=0,
##formatDate=1)
#
#def eDisconnect(self):
#sleep(5)
#self.connection.eDisconnect()
| agpl-3.0 |
seocam/django | django/contrib/postgres/forms/ranges.py | 59 | 2762 | from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
if isinstance(value, self.range_type):
return [value.lower, value.upper]
if value is None:
return [None, None]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
| bsd-3-clause |
Nivl/django-pipeline | tests/tests/compiler.py | 4 | 1182 | from django.test import TestCase
from pipeline.conf import settings
from pipeline.compilers import Compiler, CompilerBase
class DummyCompiler(CompilerBase):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
return
class CompilerTest(TestCase):
def setUp(self):
self.compiler = Compiler()
self.old_compilers = settings.PIPELINE_COMPILERS
settings.PIPELINE_COMPILERS = ['tests.tests.compiler.DummyCompiler']
def test_output_path(self):
output_path = self.compiler.output_path("js/helpers.coffee", "js")
self.assertEquals(output_path, "js/helpers.js")
def test_compilers_class(self):
compilers_class = self.compiler.compilers
self.assertEquals(compilers_class[0], DummyCompiler)
def test_compile(self):
paths = self.compiler.compile([
'js/dummy.coffee',
'js/application.js',
])
self.assertEquals(['js/dummy.js', 'js/application.js'], paths)
def tearDown(self):
settings.PIPELINE_COMPILERS = self.old_compilers
| mit |
40223149/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/highlight.py | 617 | 2518 | import keyword
import _jsre as re
from browser import html
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'_'
digits = '0123456789'
builtin_funcs = ("abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern")
kw_pattern = '^('+'|'.join(keyword.kwlist)+')$'
bf_pattern = '^('+builtin_funcs+')$'
def highlight(txt, string_color="blue", comment_color="green",
keyword_color="purple"):
res = html.PRE()
i = 0
name = ''
while i<len(txt):
car = txt[i]
if car in ["'",'"']:
k = i+1
while k<len(txt):
if txt[k]==car:
nb_as = 0
j = k-1
while True:
if txt[j]=='\\':
nb_as+=1
j -= 1
else:
break
if nb_as % 2 == 0:
res <= html.SPAN(txt[i:k+1],
style=dict(color=string_color))
i = k
break
k += 1
elif car == '#': # comment
end = txt.find('\n', i)
if end== -1:
res <= html.SPAN(txt[i:],style=dict(color=comment_color))
break
else:
res <= html.SPAN(txt[i:end],style=dict(color=comment_color))
i = end-1
elif car in letters:
name += car
elif car in digits and name:
name += car
else:
if name:
if re.search(kw_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
elif re.search(bf_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
else:
res <= name
name = ''
res <= car
i += 1
res <= name
return res | gpl-3.0 |
jordanemedlock/psychtruths | temboo/core/Library/Facebook/Actions/Video/WantsToWatch/ReadWantsToWatch.py | 5 | 5401 | # -*- coding: utf-8 -*-
###############################################################################
#
# ReadWantsToWatch
# Retrieves one or more video wants_to_watch actions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadWantsToWatch(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadWantsToWatch Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadWantsToWatch, self).__init__(temboo_session, '/Library/Facebook/Actions/Video/WantsToWatch/ReadWantsToWatch')
def new_input_set(self):
return ReadWantsToWatchInputSet()
def _make_result_set(self, result, path):
return ReadWantsToWatchResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadWantsToWatchChoreographyExecution(session, exec_id, path)
class ReadWantsToWatchInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadWantsToWatch
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((optional, string) The id of an action to retrieve. If an id is not provided, a list of all video wants_to_watch actions will be returned.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('ActionID', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
super(ReadWantsToWatchInputSet, self)._set_input('Fields', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('Offset', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the user's profile. Defaults to "me" indicating the authenticated user.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(ReadWantsToWatchInputSet, self)._set_input('ResponseFormat', value)
class ReadWantsToWatchResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadWantsToWatch Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
class ReadWantsToWatchChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadWantsToWatchResultSet(response, path)
| apache-2.0 |
resmo/ansible | lib/ansible/modules/network/junos/junos_logging.py | 52 | 8820 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_logging
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging
on Juniper JUNOS devices.
options:
dest:
description:
- Destination of the logs.
choices: ['console', 'host', 'file', 'user']
name:
description:
- If value of C(dest) is I(file) it indicates file-name,
for I(user) it indicates username and for I(host) indicates
the host name to be notified.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
state:
description:
- State of the logging configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
rotate_frequency:
description:
- Rotate log frequency in minutes, this is applicable if value
of I(dest) is C(file). The acceptable value is in range of 1 to 59.
This controls the frequency after which log file is rotated.
required: false
size:
description:
- Size of the file in archive, this is applicable if value
of I(dest) is C(file). The acceptable value is in range from 65536 to
1073741824 bytes.
required: false
files:
description:
- Number of files to be archived, this is applicable if value
of I(dest) is C(file). The acceptable value is in range from 1 to 1000.
required: false
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure console logging
junos_logging:
dest: console
facility: any
level: critical
- name: remove console logging configuration
junos_logging:
dest: console
state: absent
- name: configure file logging
junos_logging:
dest: file
name: test
facility: pfe
level: error
- name: configure logging parameter
junos_logging:
files: 30
size: 65536
rotate_frequency: 10
- name: Configure file logging using aggregate
junos_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
active: True
- name: Delete file logging using aggregate
junos_logging:
aggregate:
- { dest: file, name: test-1, facility: pfe, level: critical }
- { dest: file, name: test-2, facility: kernel, level: emergency }
state: absent
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit system syslog]
+ [edit system syslog]
file interactive-commands { ... }
+ file test {
+ pfe critical;
+ }
"""
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele, to_param_list
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def validate_files(value, module):
if value and not 1 <= value <= 1000:
module.fail_json(msg='files must be between 1 and 1000')
def validate_size(value, module):
if value and not 65536 <= value <= 1073741824:
module.fail_json(msg='size must be between 65536 and 1073741824')
def validate_rotate_frequency(value, module):
if value and not 1 <= value <= 59:
module.fail_json(msg='rotate_frequency must be between 1 and 59')
def validate_param_values(module, obj, param=None):
if not param:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def main():
""" main entry point for module execution
"""
element_spec = dict(
dest=dict(choices=['console', 'host', 'file', 'user']),
name=dict(),
facility=dict(),
level=dict(),
rotate_frequency=dict(type='int'),
size=dict(type='int'),
files=dict(type='int'),
src_addr=dict(),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
required_if = [('dest', 'host', ['name', 'facility', 'level']),
('dest', 'file', ['name', 'facility', 'level']),
('dest', 'user', ['name', 'facility', 'level']),
('dest', 'console', ['facility', 'level'])]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
params = to_param_list(module)
requests = list()
for param in params:
# if key doesn't exist in the item, get it from module.params
for key in param:
if param.get(key) is None:
param[key] = module.params[key]
module._check_required_if(required_if, param)
item = param.copy()
dest = item.get('dest')
if dest == 'console' and item.get('name'):
module.fail_json(msg="%s and %s are mutually exclusive" % ('console', 'name'))
top = 'system/syslog'
is_facility_key = False
field_top = None
if dest:
if dest == 'console':
field_top = dest
is_facility_key = True
else:
field_top = dest + '/contents'
is_facility_key = False
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True, 'top': dest}),
('facility', {'xpath': 'name', 'is_key': is_facility_key, 'top': field_top}),
('size', {'xpath': 'size', 'leaf_only': True, 'is_key': True, 'top': 'archive'}),
('files', {'xpath': 'files', 'leaf_only': True, 'is_key': True, 'top': 'archive'}),
('rotate_frequency', {'xpath': 'log-rotate-frequency', 'leaf_only': True}),
])
if item.get('level'):
param_to_xpath_map['level'] = {'xpath': item.get('level'), 'tag_only': True, 'top': field_top}
validate_param_values(module, param_to_xpath_map, param=item)
want = map_params_to_obj(module, param_to_xpath_map, param=item)
requests.append(map_obj_to_ele(module, want, top, param=item))
diff = None
with locked_config(module):
for req in requests:
diff = load_config(module, tostring(req), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
EraYaN/CouchPotatoServer | libs/requests/api.py | 206 | 4935 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| gpl-3.0 |
HenriHeinonen/barcoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
blablack/ams-lv2 | waflib/Node.py | 18 | 25509 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2018 (ita)
"""
Node: filesystem structure
#. Each file/folder is represented by exactly one node.
#. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc.
Unused class members can increase the `.wafpickle` file size sensibly.
#. Node objects should never be created directly, use
the methods :py:func:`Node.make_node` or :py:func:`Node.find_node` for the low-level operations
#. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` must be
used when a build context is present
#. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass required for serialization.
(:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context
owning a node is held as *self.ctx*
"""
import os, re, sys, shutil
from waflib import Utils, Errors
exclude_regs = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/*.swp
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.intlcache
**/.DS_Store'''
"""
Ant patterns for files and folders to exclude while doing the
recursive traversal in :py:meth:`waflib.Node.Node.ant_glob`
"""
def ant_matcher(s, ignorecase):
reflags = re.I if ignorecase else 0
ret = []
for x in Utils.to_list(s):
x = x.replace('\\', '/').replace('//', '/')
if x.endswith('/'):
x += '**'
accu = []
for k in x.split('/'):
if k == '**':
accu.append(k)
else:
k = k.replace('.', '[.]').replace('*','.*').replace('?', '.').replace('+', '\\+')
k = '^%s$' % k
try:
exp = re.compile(k, flags=reflags)
except Exception as e:
raise Errors.WafError('Invalid pattern: %s' % k, e)
else:
accu.append(exp)
ret.append(accu)
return ret
def ant_sub_filter(name, nn):
ret = []
for lst in nn:
if not lst:
pass
elif lst[0] == '**':
ret.append(lst)
if len(lst) > 1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def ant_sub_matcher(name, pats):
nacc = ant_sub_filter(name, pats[0])
nrej = ant_sub_filter(name, pats[1])
if [] in nrej:
nacc = []
return [nacc, nrej]
class Node(object):
"""
This class is organized in two parts:
* The basic methods meant for filesystem access (compute paths, create folders, etc)
* The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``)
"""
dict_class = dict
"""
Subclasses can provide a dict class to enable case insensitivity for example.
"""
__slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir')
def __init__(self, name, parent):
"""
.. note:: Use :py:func:`Node.make_node` or :py:func:`Node.find_node` instead of calling this constructor
"""
self.name = name
self.parent = parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent))
parent.children[name] = self
def __setstate__(self, data):
"Deserializes node information, used for persistence"
self.name = data[0]
self.parent = data[1]
if data[2] is not None:
# Issue 1480
self.children = self.dict_class(data[2])
def __getstate__(self):
"Serializes node information, used for persistence"
return (self.name, self.parent, getattr(self, 'children', None))
def __str__(self):
"""
String representation (abspath), for debugging purposes
:rtype: string
"""
return self.abspath()
def __repr__(self):
"""
String representation (abspath), for debugging purposes
:rtype: string
"""
return self.abspath()
def __copy__(self):
"""
Provided to prevent nodes from being copied
:raises: :py:class:`waflib.Errors.WafError`
"""
raise Errors.WafError('nodes are not supposed to be copied')
def read(self, flags='r', encoding='latin-1'):
"""
Reads and returns the contents of the file represented by this node, see :py:func:`waflib.Utils.readf`::
def build(bld):
bld.path.find_node('wscript').read()
:param flags: Open mode
:type flags: string
:param encoding: encoding value for Python3
:type encoding: string
:rtype: string or bytes
:return: File contents
"""
return Utils.readf(self.abspath(), flags, encoding)
def write(self, data, flags='w', encoding='latin-1'):
"""
Writes data to the file represented by this node, see :py:func:`waflib.Utils.writef`::
def build(bld):
bld.path.make_node('foo.txt').write('Hello, world!')
:param data: data to write
:type data: string
:param flags: Write mode
:type flags: string
:param encoding: encoding value for Python3
:type encoding: string
"""
Utils.writef(self.abspath(), data, flags, encoding)
def read_json(self, convert=True, encoding='utf-8'):
"""
Reads and parses the contents of this node as JSON (Python ≥ 2.6)::
def build(bld):
bld.path.find_node('abc.json').read_json()
Note that this by default automatically decodes unicode strings on Python2, unlike what the Python JSON module does.
:type convert: boolean
:param convert: Prevents decoding of unicode strings on Python2
:type encoding: string
:param encoding: The encoding of the file to read. This default to UTF8 as per the JSON standard
:rtype: object
:return: Parsed file contents
"""
import json # Python 2.6 and up
object_pairs_hook = None
if convert and sys.hexversion < 0x3000000:
try:
_type = unicode
except NameError:
_type = str
def convert(value):
if isinstance(value, list):
return [convert(element) for element in value]
elif isinstance(value, _type):
return str(value)
else:
return value
def object_pairs(pairs):
return dict((str(pair[0]), convert(pair[1])) for pair in pairs)
object_pairs_hook = object_pairs
return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook)
def write_json(self, data, pretty=True):
"""
Writes a python object as JSON to disk (Python ≥ 2.6) as UTF-8 data (JSON standard)::
def build(bld):
bld.path.find_node('xyz.json').write_json(199)
:type data: object
:param data: The data to write to disk
:type pretty: boolean
:param pretty: Determines if the JSON will be nicely space separated
"""
import json # Python 2.6 and up
indent = 2
separators = (',', ': ')
sort_keys = pretty
newline = os.linesep
if not pretty:
indent = None
separators = (',', ':')
newline = ''
output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline
self.write(output, encoding='utf-8')
def exists(self):
"""
Returns whether the Node is present on the filesystem
:rtype: bool
"""
return os.path.exists(self.abspath())
def isdir(self):
"""
Returns whether the Node represents a folder
:rtype: bool
"""
return os.path.isdir(self.abspath())
def chmod(self, val):
"""
Changes the file/dir permissions::
def build(bld):
bld.path.chmod(493) # 0755
"""
os.chmod(self.abspath(), val)
def delete(self, evict=True):
"""
Removes the file/folder from the filesystem (equivalent to `rm -rf`), and remove this object from the Node tree.
Do not use this object after calling this method.
"""
try:
try:
if os.path.isdir(self.abspath()):
shutil.rmtree(self.abspath())
else:
os.remove(self.abspath())
except OSError:
if os.path.exists(self.abspath()):
raise
finally:
if evict:
self.evict()
def evict(self):
"""
Removes this node from the Node tree
"""
del self.parent.children[self.name]
def suffix(self):
"""
Returns the file rightmost extension, for example `a.b.c.d → .d`
:rtype: string
"""
k = max(0, self.name.rfind('.'))
return self.name[k:]
def height(self):
"""
Returns the depth in the folder hierarchy from the filesystem root or from all the file drives
:returns: filesystem depth
:rtype: integer
"""
d = self
val = -1
while d:
d = d.parent
val += 1
return val
def listdir(self):
"""
Lists the folder contents
:returns: list of file/folder names ordered alphabetically
:rtype: list of string
"""
lst = Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
"""
Creates a folder represented by this node. Intermediate folders are created as needed.
:raises: :py:class:`waflib.Errors.WafError` when the folder is missing
"""
if self.isdir():
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not self.isdir():
raise Errors.WafError('Could not create the directory %r' % self)
try:
self.children
except AttributeError:
self.children = self.dict_class()
def find_node(self, lst):
"""
Finds a node on the file system (files or folders), and creates the corresponding Node objects if it exists
:param lst: relative path
:type lst: string or list of string
:returns: The corresponding Node object or None if no entry was found on the filesystem
:rtype: :py:class:´waflib.Node.Node´
"""
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
if lst and lst[0].startswith('\\\\') and not self.parent:
node = self.ctx.root.make_node(lst[0])
node.cache_isdir = True
return node.find_node(lst[1:])
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
ch = cur.children
except AttributeError:
cur.children = self.dict_class()
else:
try:
cur = ch[x]
continue
except KeyError:
pass
# optimistic: create the node first then look if it was correct to do so
cur = self.__class__(x, cur)
if not cur.exists():
cur.evict()
return None
if not cur.exists():
cur.evict()
return None
return cur
def make_node(self, lst):
"""
Returns or creates a Node object corresponding to the input path without considering the filesystem.
:param lst: relative path
:type lst: string or list of string
:rtype: :py:class:´waflib.Node.Node´
"""
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
cur = cur.children[x]
except AttributeError:
cur.children = self.dict_class()
except KeyError:
pass
else:
continue
cur = self.__class__(x, cur)
return cur
def search_node(self, lst):
"""
Returns a Node previously defined in the data structure. The filesystem is not considered.
:param lst: relative path
:type lst: string or list of string
:rtype: :py:class:´waflib.Node.Node´ or None if there is no entry in the Node datastructure
"""
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
else:
try:
cur = cur.children[x]
except (AttributeError, KeyError):
return None
return cur
def path_from(self, node):
"""
Path of this node seen from the other::
def build(bld):
n1 = bld.path.find_node('foo/bar/xyz.txt')
n2 = bld.path.find_node('foo/stuff/')
n1.path_from(n2) # '../bar/xyz.txt'
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
:returns: a relative path or an absolute one if that is better
:rtype: string
"""
c1 = self
c2 = node
c1h = c1.height()
c2h = c2.height()
lst = []
up = 0
while c1h > c2h:
lst.append(c1.name)
c1 = c1.parent
c1h -= 1
while c2h > c1h:
up += 1
c2 = c2.parent
c2h -= 1
while not c1 is c2:
lst.append(c1.name)
up += 1
c1 = c1.parent
c2 = c2.parent
if c1.parent:
lst.extend(['..'] * up)
lst.reverse()
return os.sep.join(lst) or '.'
else:
return self.abspath()
def abspath(self):
"""
Returns the absolute path. A cache is kept in the context as ``cache_node_abspath``
:rtype: string
"""
try:
return self.cache_abspath
except AttributeError:
pass
# think twice before touching this (performance + complexity + correctness)
if not self.parent:
val = os.sep
elif not self.parent.name:
val = os.sep + self.name
else:
val = self.parent.abspath() + os.sep + self.name
self.cache_abspath = val
return val
if Utils.is_win32:
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if not self.parent:
val = ''
elif not self.parent.name:
val = self.name + os.sep
else:
val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name
self.cache_abspath = val
return val
def is_child_of(self, node):
"""
Returns whether the object belongs to a subtree of the input node::
def build(bld):
node = bld.path.find_node('wscript')
node.is_child_of(bld.path) # True
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
:rtype: bool
"""
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return p is node
def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False):
"""
Recursive method used by :py:meth:`waflib.Node.ant_glob`.
:param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion
:type accept: function
:param maxdepth: maximum depth in the filesystem (25)
:type maxdepth: int
:param pats: list of patterns to accept and list of patterns to exclude
:type pats: tuple
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
:param quiet: disable build directory traversal warnings (verbose mode)
:type quiet: bool
:returns: A generator object to iterate from
:rtype: iterator
"""
dircont = self.listdir()
dircont.sort()
try:
lst = set(self.children.keys())
except AttributeError:
self.children = self.dict_class()
else:
if remove:
for x in lst - set(dircont):
self.children[x].evict()
for name in dircont:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
node = self.make_node([name])
isdir = node.isdir()
if accepted:
if isdir:
if dir:
yield node
elif src:
yield node
if isdir:
node.cache_isdir = True
if maxdepth:
for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove, quiet=quiet):
yield k
def ant_glob(self, *k, **kw):
"""
Finds files across folders and returns Node objects:
* ``**/*`` find all files recursively
* ``**/*.class`` find all files ending by .class
* ``..`` find files having two dot characters
For example::
def configure(cfg):
# find all .cpp files
cfg.path.ant_glob('**/*.cpp')
# find particular files from the root filesystem (can be slow)
cfg.root.ant_glob('etc/*.txt')
# simple exclusion rule example
cfg.path.ant_glob('*.c*', excl=['*.c'], src=True, dir=False)
For more information about the patterns, consult http://ant.apache.org/manual/dirtasks.html
Please remember that the '..' sequence does not represent the parent directory::
def configure(cfg):
cfg.path.ant_glob('../*.h') # incorrect
cfg.path.parent.ant_glob('*.h') # correct
The Node structure is itself a filesystem cache, so certain precautions must
be taken while matching files in the build or installation phases.
Nodes objects that do have a corresponding file or folder are garbage-collected by default.
This garbage collection is usually required to prevent returning files that do not
exist anymore. Yet, this may also remove Node objects of files that are yet-to-be built.
This typically happens when trying to match files in the build directory,
but there are also cases when files are created in the source directory.
Run ``waf -v`` to display any warnings, and try consider passing ``remove=False``
when matching files in the build directory.
Since ant_glob can traverse both source and build folders, it is a best practice
to call this method only from the most specific build node::
def build(bld):
# traverses the build directory, may need ``remove=False``:
bld.path.ant_glob('project/dir/**/*.h')
# better, no accidental build directory traversal:
bld.path.find_node('project/dir').ant_glob('**/*.h') # best
In addition, files and folders are listed immediately. When matching files in the
build folders, consider passing ``generator=True`` so that the generator object
returned can defer computation to a later stage. For example::
def build(bld):
bld(rule='tar xvf ${SRC}', source='arch.tar')
bld.add_group()
gen = bld.bldnode.ant_glob("*.h", generator=True, remove=True)
# files will be listed only after the arch.tar is unpacked
bld(rule='ls ${SRC}', source=gen, name='XYZ')
:param incl: ant patterns or list of patterns to include
:type incl: string or list of strings
:param excl: ant patterns or list of patterns to exclude
:type excl: string or list of strings
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param maxdepth: maximum depth of recursion
:type maxdepth: int
:param ignorecase: ignore case while matching (False by default)
:type ignorecase: bool
:param generator: Whether to evaluate the Nodes lazily
:type generator: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
:param quiet: disable build directory traversal warnings (verbose mode)
:type quiet: bool
:returns: The corresponding Node objects as a list or as a generator object (generator=True)
:rtype: by default, list of :py:class:`waflib.Node.Node` instances
"""
src = kw.get('src', True)
dir = kw.get('dir')
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
remove = kw.get('remove', True)
maxdepth = kw.get('maxdepth', 25)
ignorecase = kw.get('ignorecase', False)
quiet = kw.get('quiet', False)
pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase))
if kw.get('generator'):
return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet))
it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)
if kw.get('flat'):
# returns relative paths as a space-delimited string
# prefer Node objects whenever possible
return ' '.join(x.path_from(self) for x in it)
return list(it)
# ----------------------------------------------------------------------------
# the methods below require the source/build folders (bld.srcnode/bld.bldnode)
def is_src(self):
"""
Returns True if the node is below the source directory. Note that ``!is_src() ≠ is_bld()``
:rtype: bool
"""
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
while cur.parent:
if cur is y:
return False
if cur is x:
return True
cur = cur.parent
return False
def is_bld(self):
"""
Returns True if the node is below the build directory. Note that ``!is_bld() ≠ is_src()``
:rtype: bool
"""
cur = self
y = self.ctx.bldnode
while cur.parent:
if cur is y:
return True
cur = cur.parent
return False
def get_src(self):
"""
Returns the corresponding Node object in the source directory (or self if already
under the source directory). Use this method only if the purpose is to create
a Node object (this is common with folders but not with files, see ticket 1937)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
lst = []
while cur.parent:
if cur is y:
lst.reverse()
return x.make_node(lst)
if cur is x:
return self
lst.append(cur.name)
cur = cur.parent
return self
def get_bld(self):
"""
Return the corresponding Node object in the build directory (or self if already
under the build directory). Use this method only if the purpose is to create
a Node object (this is common with folders but not with files, see ticket 1937)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
lst = []
while cur.parent:
if cur is y:
return self
if cur is x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur = cur.parent
# the file is external to the current project, make a fake root in the current build directory
lst.reverse()
if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'):
lst[0] = lst[0][0]
return self.ctx.bldnode.make_node(['__root__'] + lst)
def find_resource(self, lst):
"""
Use this method in the build phase to find source files corresponding to the relative path given.
First it looks up the Node data structure to find any declared Node object in the build directory.
If None is found, it then considers the filesystem in the source directory.
:param lst: relative path
:type lst: string or list of string
:returns: the corresponding Node object or None
:rtype: :py:class:`waflib.Node.Node`
"""
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if not node:
node = self.get_src().find_node(lst)
if node and node.isdir():
return None
return node
def find_or_declare(self, lst):
"""
Use this method in the build phase to declare output files which
are meant to be written in the build directory.
This method creates the Node object and its parent folder
as needed.
:param lst: relative path
:type lst: string or list of string
"""
if isinstance(lst, str) and os.path.isabs(lst):
node = self.ctx.root.make_node(lst)
else:
node = self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self, lst):
"""
Searches for a folder on the filesystem (see :py:meth:`waflib.Node.Node.find_node`)
:param lst: relative path
:type lst: string or list of string
:returns: The corresponding Node object or None if there is no such folder
:rtype: :py:class:`waflib.Node.Node`
"""
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
node = self.find_node(lst)
if node and not node.isdir():
return None
return node
# helpers for building things
def change_ext(self, ext, ext_in=None):
"""
Declares a build node with a distinct extension; this is uses :py:meth:`waflib.Node.Node.find_or_declare`
:return: A build node of the same path, but with a different extension
:rtype: :py:class:`waflib.Node.Node`
"""
name = self.name
if ext_in is None:
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
else:
name = name[:- len(ext_in)] + ext
return self.parent.find_or_declare([name])
def bldpath(self):
"""
Returns the relative path seen from the build directory ``src/foo.cpp``
:rtype: string
"""
return self.path_from(self.ctx.bldnode)
def srcpath(self):
"""
Returns the relative path seen from the source directory ``../src/foo.cpp``
:rtype: string
"""
return self.path_from(self.ctx.srcnode)
def relpath(self):
"""
If a file in the build directory, returns :py:meth:`waflib.Node.Node.bldpath`,
else returns :py:meth:`waflib.Node.Node.srcpath`
:rtype: string
"""
cur = self
x = self.ctx.bldnode
while cur.parent:
if cur is x:
return self.bldpath()
cur = cur.parent
return self.srcpath()
def bld_dir(self):
"""
Equivalent to self.parent.bldpath()
:rtype: string
"""
return self.parent.bldpath()
def h_file(self):
"""
See :py:func:`waflib.Utils.h_file`
:return: a hash representing the file contents
:rtype: string or bytes
"""
return Utils.h_file(self.abspath())
def get_bld_sig(self):
"""
Returns a signature (see :py:meth:`waflib.Node.Node.h_file`) for the purpose
of build dependency calculation. This method uses a per-context cache.
:return: a hash representing the object contents
:rtype: string or bytes
"""
# previous behaviour can be set by returning self.ctx.node_sigs[self] when a build node
try:
cache = self.ctx.cache_sig
except AttributeError:
cache = self.ctx.cache_sig = {}
try:
ret = cache[self]
except KeyError:
p = self.abspath()
try:
ret = cache[self] = self.h_file()
except EnvironmentError:
if self.isdir():
# allow folders as build nodes, do not use the creation time
st = os.stat(p)
ret = cache[self] = Utils.h_list([p, st.st_ino, st.st_mode])
return ret
raise
return ret
pickle_lock = Utils.threading.Lock()
"""Lock mandatory for thread-safe node serialization"""
class Nod3(Node):
"""Mandatory subclass for thread-safe node serialization"""
pass # do not remove
| gpl-2.0 |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/json/tests/test_decode.py | 35 | 2127 | import decimal
from StringIO import StringIO
from collections import OrderedDict
from json.tests import PyTest, CTest
class TestDecode(object):
def test_decimal(self):
rval = self.loads('1.1', parse_float=decimal.Decimal)
self.assertTrue(isinstance(rval, decimal.Decimal))
self.assertEqual(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = self.loads('1', parse_int=float)
self.assertTrue(isinstance(rval, float))
self.assertEqual(rval, 1.0)
def test_decoder_optimizations(self):
# Several optimizations were made that skip over calls to
# the whitespace regex, so this test is designed to try and
# exercise the uncommon cases. The array cases are already covered.
rval = self.loads('{ "key" : "value" , "k":"v" }')
self.assertEqual(rval, {"key":"value", "k":"v"})
def test_empty_objects(self):
self.assertEqual(self.loads('{}'), {})
self.assertEqual(self.loads('[]'), [])
self.assertEqual(self.loads('""'), u"")
self.assertIsInstance(self.loads('""'), unicode)
def test_object_pairs_hook(self):
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
("qrt", 5), ("pad", 6), ("hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p)
self.assertEqual(self.json.load(StringIO(s),
object_pairs_hook=lambda x: x), p)
od = self.loads(s, object_pairs_hook=OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s,
object_pairs_hook=OrderedDict,
object_hook=lambda x: None),
OrderedDict(p))
class TestPyDecode(TestDecode, PyTest): pass
class TestCDecode(TestDecode, CTest): pass
| mit |
kobejean/tensorflow | tensorflow/python/kernel_tests/matrix_band_part_op_test.py | 27 | 5786 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class MatrixBandPartTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
def Test(self):
mat = np.ones(shape_).astype(dtype_)
batch_mat = np.tile(mat, batch_shape_ + (1, 1))
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape_ is not ():
band_np = np.tile(band_np, batch_shape_ + (1, 1))
for index_dtype in [dtypes_lib.int32, dtypes_lib.int64]:
with self.test_session(use_gpu=False):
band = array_ops.matrix_band_part(
batch_mat,
constant_op.constant(lower, index_dtype),
constant_op.constant(upper, index_dtype))
self.assertAllEqual(band_np, band.eval())
return Test
class MatrixBandPartGradTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
def Test(self):
shape = batch_shape_ + shape_
x = constant_op.constant(np.random.rand(*shape), dtype=dtype_)
with self.test_session(use_gpu=False):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
y = array_ops.matrix_band_part(x, lower, upper)
error = gradient_checker.compute_gradient_error(
x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
return Test
class MatrixBandPartBenchmark(test_lib.Benchmark):
shapes = [
(10, 16, 16),
(10, 101, 101),
(10, 256, 256),
(10, 1000, 1000),
(10, 1024, 1024),
(10, 2048, 2048),
(10, 10, 4, 4),
(10, 10, 10, 10),
(10, 10, 16, 16),
(10, 10, 101, 101),
(10, 10, 256, 256),
(10, 10, 1000, 1000),
(10, 10, 1024, 1024),
(10, 10, 2048, 2048),
]
def benchmarkMatrixBandPartOp(self):
for shape_ in self.shapes:
for limits in (-1, -1), (-1, 0), (0, -1), (2, 2):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_cpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if test_lib.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_gpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if __name__ == "__main__":
dtypes = (np.bool, np.int32, np.int64, np.float32, np.float64, np.complex64,
np.complex128)
for dtype in dtypes:
for batch_shape in ((), (2,), (1, 3, 2)):
for rows in 1, 2, 7:
for cols in 1, 2, 7:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartTest, "MatrixBandPart", name,
_GetMatrixBandPartTest(dtype, batch_shape, shape))
for dtype in (np.float32, np.float64):
for batch_shape in ((), (2,)):
for rows in 1, 2, 7:
for cols in 1, 2, 7:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartGradTest, "MatrixBandPartGrad", name,
_GetMatrixBandPartGradTest(dtype, batch_shape, shape))
test_lib.main()
| apache-2.0 |
catapult-project/catapult-csm | third_party/apiclient/googleapiclient/discovery.py | 25 | 38539 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs.
A client library for Google's discovery based APIs.
"""
from __future__ import absolute_import
import six
from six.moves import zip
__author__ = '[email protected] (Joe Gregorio)'
__all__ = [
'build',
'build_from_document',
'fix_method_name',
'key2param',
]
from six import StringIO
from six.moves.urllib.parse import urlencode, urlparse, urljoin, \
urlunparse, parse_qsl
# Standard library imports
import copy
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
import json
import keyword
import logging
import mimetypes
import os
import re
# Third-party imports
import httplib2
import uritemplate
# Local imports
from googleapiclient import mimeparse
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidJsonError
from googleapiclient.errors import MediaUploadSizeError
from googleapiclient.errors import UnacceptableMimeTypeError
from googleapiclient.errors import UnknownApiNameOrVersion
from googleapiclient.errors import UnknownFileType
from googleapiclient.http import HttpRequest
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaUpload
from googleapiclient.model import JsonModel
from googleapiclient.model import MediaModel
from googleapiclient.model import RawModel
from googleapiclient.schema import Schemas
from oauth2client.client import GoogleCredentials
from oauth2client.util import _add_query_parameter
from oauth2client.util import positional
# The client library requires a version of httplib2 that supports RETRIES.
httplib2.RETRIES = 1
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
BODY_PARAMETER_DEFAULT_VALUE = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
'description': ('The filename of the media request body, or an instance '
'of a MediaUpload object.'),
'type': 'string',
'required': False,
}
# Parameters accepted by the stack, but not visible via discovery.
# TODO(dhermes): Remove 'userip' in 'v2'.
STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
# Library-specific reserved words beyond Python keywords.
RESERVED_WORDS = frozenset(['body'])
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if keyword.iskeyword(name) or name in RESERVED_WORDS:
return name + '_'
else:
return name
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
@positional(2)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest,
credentials=None):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: googleapiclient.Model, converts to and from the wire format.
requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP
request.
credentials: oauth2client.Credentials, credentials to be used for
authentication.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: GET %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, uri=requested_url)
try:
content = content.decode('utf-8')
except AttributeError:
pass
try:
service = json.loads(content)
except ValueError as e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, base=discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder,
credentials=credentials)
@positional(1)
def build_from_document(
service,
base=None,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest,
credentials=None):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string or object, the JSON discovery document describing the API.
The value passed in may either be the JSON string or the deserialized
JSON.
base: string, base URI for all HTTP requests, usually the discovery URI.
This parameter is no longer used as rootUrl and servicePath are included
within the discovery document. (deprecated)
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
credentials: object, credentials to be used for authentication.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
if isinstance(service, six.string_types):
service = json.loads(service)
base = urljoin(service['rootUrl'], service['servicePath'])
schema = Schemas(service)
if credentials:
# If credentials were passed in, we could have two cases:
# 1. the scopes were specified, in which case the given credentials
# are used for authorizing the http;
# 2. the scopes were not provided (meaning the Application Default
# Credentials are to be used). In this case, the Application Default
# Credentials are built and used instead of the original credentials.
# If there are no scopes found (meaning the given service requires no
# authentication), there is no authorization of the http.
if (isinstance(credentials, GoogleCredentials) and
credentials.create_scoped_required()):
scopes = service.get('auth', {}).get('oauth2', {}).get('scopes', {})
if scopes:
credentials = credentials.create_scoped(list(scopes.keys()))
else:
# No need to authorize the http object
# if the service does not require authentication.
credentials = None
if credentials:
http = credentials.authorize(http)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
return Resource(http=http, baseUrl=base, model=model,
developerKey=developerKey, requestBuilder=requestBuilder,
resourceDesc=service, rootDesc=service, schema=schema)
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
if bit_shift is not None:
return int(maxSize[:-2]) << bit_shift
else:
return int(maxSize)
def _media_path_url_from_info(root_desc, path_url):
"""Creates an absolute media path URL.
Constructed using the API root URI and service path from the discovery
document and the relative path for the API method.
Args:
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
Returns:
String; the absolute URI for media upload for the API method.
"""
return '%(root)supload/%(service_path)s%(path)s' % {
'root': root_desc['rootUrl'],
'service_path': root_desc['servicePath'],
'path': path_url,
}
def _fix_up_parameters(method_desc, root_desc, http_method):
"""Updates parameters of an API method with values specific to this library.
Specifically, adds whatever global parameters are specified by the API to the
parameters for the individual method. Also adds parameters which don't
appear in the discovery document, but are available to all discovery based
APIs (these are listed in STACK_QUERY_PARAMETERS).
SIDE EFFECTS: This updates the parameters dictionary object in the method
description.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
http_method: String; the HTTP method used to call the API method described
in method_desc.
Returns:
The updated Dictionary stored in the 'parameters' key of the method
description dictionary.
"""
parameters = method_desc.setdefault('parameters', {})
# Add in the parameters common to all methods.
for name, description in six.iteritems(root_desc.get('parameters', {})):
parameters[name] = description
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
# Add 'body' (our own reserved word) to parameters if the method supports
# a request payload.
if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
body = BODY_PARAMETER_DEFAULT_VALUE.copy()
body.update(method_desc['request'])
parameters['body'] = body
return parameters
def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
"""Updates parameters of API by adding 'media_body' if supported by method.
SIDE EFFECTS: If the method supports media upload and has a required body,
sets body to be optional (required=False) instead. Also, if there is a
'mediaUpload' in the method description, adds 'media_upload' key to
parameters.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
parameters: A dictionary describing method parameters for method described
in method_desc.
Returns:
Triple (accept, max_size, media_path_url) where:
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
media_upload = method_desc.get('mediaUpload', {})
accept = media_upload.get('accept', [])
max_size = _media_size_to_long(media_upload.get('maxSize', ''))
media_path_url = None
if media_upload:
media_path_url = _media_path_url_from_info(root_desc, path_url)
parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
if 'body' in parameters:
parameters['body']['required'] = False
return accept, max_size, media_path_url
def _fix_up_method_description(method_desc, root_desc):
"""Updates a method description in a discovery document.
SIDE EFFECTS: Changes the parameters dictionary in the method description with
extra parameters which are used locally.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
Returns:
Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)
where:
- path_url is a String; the relative URL for the API method. Relative to
the API root, which is specified in the discovery document.
- http_method is a String; the HTTP method used to call the API method
described in the method description.
- method_id is a String; the name of the RPC method associated with the
API method, and is in the method description in the 'id' key.
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
path_url = method_desc['path']
http_method = method_desc['httpMethod']
method_id = method_desc['id']
parameters = _fix_up_parameters(method_desc, root_desc, http_method)
# Order is important. `_fix_up_media_upload` needs `method_desc` to have a
# 'parameters' key and needs to know if there is a 'body' parameter because it
# also sets a 'media_body' parameter.
accept, max_size, media_path_url = _fix_up_media_upload(
method_desc, root_desc, path_url, parameters)
return path_url, http_method, method_id, accept, max_size, media_path_url
def _urljoin(base, url):
"""Custom urljoin replacement supporting : before / in url."""
# In general, it's unsafe to simply join base and url. However, for
# the case of discovery documents, we know:
# * base will never contain params, query, or fragment
# * url will never contain a scheme or net_loc.
# In general, this means we can safely join on /; we just need to
# ensure we end up with precisely one / joining base and url. The
# exception here is the case of media uploads, where url will be an
# absolute url.
if url.startswith('http://') or url.startswith('https://'):
return urljoin(base, url)
new_base = base if base.endswith('/') else base + '/'
new_url = url[1:] if url.startswith('/') else url
return new_base + new_url
# TODO(dhermes): Convert this class to ResourceMethod and make it callable
class ResourceMethodParameters(object):
"""Represents the parameters associated with a method.
Attributes:
argmap: Map from method parameter name (string) to query parameter name
(string).
required_params: List of required parameters (represented by parameter
name as string).
repeated_params: List of repeated parameters (represented by parameter
name as string).
pattern_params: Map from method parameter name (string) to regular
expression (as a string). If the pattern is set for a parameter, the
value for that parameter must match the regular expression.
query_params: List of parameters (represented by parameter name as string)
that will be used in the query string.
path_params: Set of parameters (represented by parameter name as string)
that will be used in the base URL path.
param_types: Map from method parameter name (string) to parameter type. Type
can be any valid JSON schema type; valid values are 'any', 'array',
'boolean', 'integer', 'number', 'object', or 'string'. Reference:
http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
enum_params: Map from method parameter name (string) to list of strings,
where each list of strings is the list of acceptable enum values.
"""
def __init__(self, method_desc):
"""Constructor for ResourceMethodParameters.
Sets default values and defers to set_parameters to populate.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
self.argmap = {}
self.required_params = []
self.repeated_params = []
self.pattern_params = {}
self.query_params = []
# TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
# parsing is gotten rid of.
self.path_params = set()
self.param_types = {}
self.enum_params = {}
self.set_parameters(method_desc)
def set_parameters(self, method_desc):
"""Populates maps and lists based on method description.
Iterates through each parameter for the method and parses the values from
the parameter dictionary.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
for arg, desc in six.iteritems(method_desc.get('parameters', {})):
param = key2param(arg)
self.argmap[param] = arg
if desc.get('pattern'):
self.pattern_params[param] = desc['pattern']
if desc.get('enum'):
self.enum_params[param] = desc['enum']
if desc.get('required'):
self.required_params.append(param)
if desc.get('repeated'):
self.repeated_params.append(param)
if desc.get('location') == 'query':
self.query_params.append(param)
if desc.get('location') == 'path':
self.path_params.add(param)
self.param_types[param] = desc.get('type', 'string')
# TODO(dhermes): Determine if this is still necessary. Discovery based APIs
# should have all path parameters already marked with
# 'location: path'.
for match in URITEMPLATE.finditer(method_desc['path']):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
self.path_params.add(name)
if name in self.query_params:
self.query_params.remove(name)
def createMethod(methodName, methodDesc, rootDesc, schema):
"""Creates a method for attaching to a Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
methodName = fix_method_name(methodName)
(pathUrl, httpMethod, methodId, accept,
maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)
parameters = ResourceMethodParameters(methodDesc)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in six.iterkeys(kwargs):
if name not in parameters.argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = list(kwargs.keys())
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in parameters.required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in six.iteritems(parameters.pattern_params):
if name in kwargs:
if isinstance(kwargs[name], six.string_types):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in six.iteritems(parameters.enum_params):
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in parameters.repeated_params and
not isinstance(kwargs[name], six.string_types)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in six.iteritems(kwargs):
to_type = parameters.param_types.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in parameters.repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in parameters.query_params:
actual_query_params[parameters.argmap[key]] = cast_value
if key in parameters.path_params:
actual_path_params[parameters.argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = _urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, six.string_types):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename,
mimetype=media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if media_upload.size() is not None and media_upload.size() > maxSize > 0:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = _urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
# encode the body: note that we can't use `as_string`, because
# it plays games with `From ` lines.
fp = StringIO()
g = Generator(fp, mangle_from_=False)
g.flatten(msgRoot, unixfrom=False)
body = fp.getvalue()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s %s' % (httpMethod,url))
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(parameters.argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = list(rootDesc.get('parameters', {}).keys())
skip_parameters.extend(STACK_QUERY_PARAMETERS)
all_args = list(parameters.argmap.keys())
args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
# Move body to the front of the line.
if 'body' in all_args:
args_ordered.append('body')
for name in all_args:
if name not in args_ordered:
args_ordered.append(name)
for arg in args_ordered:
if arg in skip_parameters:
continue
repeated = ''
if arg in parameters.repeated_params:
repeated = ' (repeated)'
required = ''
if arg in parameters.required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
return (methodName, method)
def createNextMethod(methodName):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
methodName: string, name of the method to use.
"""
methodName = fix_method_name(methodName)
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page. (required)
previous_response: The response from the request for the previous page. (required)
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urlencode(newq)
uri = urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s %s' % (methodName,uri))
return request
return (methodName, methodNext)
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: googleapiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
googleapiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
self._dynamic_attrs = []
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
self._resourceDesc = resourceDesc
self._rootDesc = rootDesc
self._schema = schema
self._set_service_methods()
def _set_dynamic_attr(self, attr_name, value):
"""Sets an instance attribute and tracks it in a list of dynamic attributes.
Args:
attr_name: string; The name of the attribute to be set
value: The value being set on the object and tracked in the dynamic cache.
"""
self._dynamic_attrs.append(attr_name)
self.__dict__[attr_name] = value
def __getstate__(self):
"""Trim the state down to something that can be pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
state_dict = copy.copy(self.__dict__)
for dynamic_attr in self._dynamic_attrs:
del state_dict[dynamic_attr]
del state_dict['_dynamic_attrs']
return state_dict
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
self.__dict__.update(state)
self._dynamic_attrs = []
self._set_service_methods()
def _set_service_methods(self):
self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
self._add_next_methods(self._resourceDesc, self._schema)
def _add_basic_methods(self, resourceDesc, rootDesc, schema):
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
fixedMethodName, method = createMethod(
methodName, methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
fixedMethodName, method = createMethod(
methodName + '_media', methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_nested_resources(self, resourceDesc, rootDesc, schema):
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(methodName, methodDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return Resource(http=self._http, baseUrl=self._baseUrl,
model=self._model, developerKey=self._developerKey,
requestBuilder=self._requestBuilder,
resourceDesc=methodDesc, rootDesc=rootDesc,
schema=schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
return (methodName, methodResource)
for methodName, methodDesc in six.iteritems(resourceDesc['resources']):
fixedMethodName, method = createResourceMethod(methodName, methodDesc)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_next_methods(self, resourceDesc, schema):
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
fixedMethodName, method = createNextMethod(methodName + '_next')
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
| bsd-3-clause |
j3parker/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| mpl-2.0 |
40223134/w16b_test | static/Brython3.1.3-20150514-095342/Lib/xml/dom/__init__.py | 873 | 4019 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
| agpl-3.0 |
bmillham/DJDB | djdb.py | 1 | 1111 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from gi.repository import Gtk
from os.path import dirname, realpath, join, expanduser
import cPickle
from gladeobjects import get_glade_objects, field_names
from LibScan import LibScan
# Hack to fix unicode issues in Python 2.7
import sys
reload(sys)
sys.setdefaultencoding("UTF8")
home = expanduser("~")
configfile = join(home, ".djdb.p")
gladefile = join(dirname(realpath(__file__)), "djdb.glade")
try:
options = cPickle.load(open(configfile, "rb"))
except:
options = None
builder = Gtk.Builder()
try:
builder.add_from_file(gladefile)
except Exception as e:
print "Unable to find glade file: ", gladefile, e
exit()
gobject = get_glade_objects(builder)
builder.connect_signals(LibScan(builder, configfile, options, gobject))
if options is None:
print "No options file, running setup"
#gobject['firstrun_assistant'].show_all()
gobject['assistant1'].show_all()
else:
gobject['main_window'].show_all()
gobject['main_window'].set_title('DJDB: {}@{}'.format(options['db']['database'], options['db']['server']))
Gtk.main()
| gpl-2.0 |
mozilla/kitsune | kitsune/questions/tests/test_models.py | 1 | 22191 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from unittest import mock
from actstream.models import Action, Follow
from django.core.management import call_command
from django.db.models import Q
from nose.tools import eq_, ok_, raises
from taggit.models import Tag
import kitsune.sumo.models
from kitsune.flagit.models import FlaggedObject
from kitsune.questions import config, models
from kitsune.questions.models import (
AlreadyTakenException,
Answer,
InvalidUserException,
Question,
QuestionMetaData,
QuestionVisits,
VoteMetadata,
_has_beta,
_tenths_version,
)
from kitsune.questions.tasks import update_answer_pages
from kitsune.questions.tests import (
AnswerFactory,
QuestionFactory,
QuestionVoteFactory,
TestCaseBase,
tags_eq,
)
from kitsune.search.tests import Elastic7TestCase
from kitsune.sumo import googleanalytics
from kitsune.sumo.tests import TestCase
from kitsune.tags.tests import TagFactory
from kitsune.tags.utils import add_existing_tag
from kitsune.users.tests import UserFactory
from kitsune.wiki.tests import TranslatedRevisionFactory
class TestAnswer(TestCaseBase):
"""Test the Answer model"""
def test_new_answer_updates_question(self):
"""Test saving a new answer updates the corresponding question.
Specifically, last_post and num_replies should update."""
q = QuestionFactory(title="Test Question", content="Lorem Ipsum Dolor")
updated = q.updated
eq_(0, q.num_answers)
eq_(None, q.last_answer)
a = AnswerFactory(question=q, content="Test Answer")
a.save()
q = Question.objects.get(pk=q.id)
eq_(1, q.num_answers)
eq_(a, q.last_answer)
self.assertNotEqual(updated, q.updated)
def test_delete_question_removes_flag(self):
"""Deleting a question also removes the flags on that question."""
q = QuestionFactory(title="Test Question", content="Lorem Ipsum Dolor")
u = UserFactory()
FlaggedObject.objects.create(
status=0, content_object=q, reason="language", creator_id=u.id
)
eq_(1, FlaggedObject.objects.count())
q.delete()
eq_(0, FlaggedObject.objects.count())
def test_delete_answer_removes_flag(self):
"""Deleting an answer also removes the flags on that answer."""
q = QuestionFactory(title="Test Question", content="Lorem Ipsum Dolor")
a = AnswerFactory(question=q, content="Test Answer")
u = UserFactory()
FlaggedObject.objects.create(
status=0, content_object=a, reason="language", creator_id=u.id
)
eq_(1, FlaggedObject.objects.count())
a.delete()
eq_(0, FlaggedObject.objects.count())
def test_delete_last_answer_of_question(self):
"""Deleting the last_answer of a Question should update the question."""
yesterday = datetime.now() - timedelta(days=1)
q = AnswerFactory(created=yesterday).question
last_answer = q.last_answer
# add a new answer and verify last_answer updated
a = AnswerFactory(question=q, content="Test Answer")
q = Question.objects.get(pk=q.id)
eq_(q.last_answer.id, a.id)
# delete the answer and last_answer should go back to previous value
a.delete()
q = Question.objects.get(pk=q.id)
eq_(q.last_answer.id, last_answer.id)
eq_(Answer.objects.filter(pk=a.id).count(), 0)
def test_delete_solution_of_question(self):
"""Deleting the solution of a Question should update the question."""
# set a solution to the question
q = AnswerFactory().question
solution = q.last_answer
q.solution = solution
q.save()
# delete the solution and question.solution should go back to None
solution.delete()
q = Question.objects.get(pk=q.id)
eq_(q.solution, None)
def test_update_page_task(self):
a = AnswerFactory()
a.page = 4
a.save()
a = Answer.objects.get(pk=a.id)
assert a.page == 4
update_answer_pages(a.question.id)
a = Answer.objects.get(pk=a.id)
assert a.page == 1
def test_delete_updates_pages(self):
a1 = AnswerFactory()
a2 = AnswerFactory(question=a1.question)
AnswerFactory(question=a1.question)
a1.page = 7
a1.save()
a2.delete()
a3 = Answer.objects.filter(question=a1.question)[0]
assert a3.page == 1, "Page was %s" % a3.page
def test_creator_num_answers(self):
a = AnswerFactory()
eq_(a.creator_num_answers, 1)
AnswerFactory(creator=a.creator)
eq_(a.creator_num_answers, 2)
def test_creator_num_solutions(self):
a = AnswerFactory()
q = a.question
q.solution = a
q.save()
eq_(a.creator_num_solutions, 1)
def test_content_parsed_with_locale(self):
"""Make sure links to localized articles work."""
rev = TranslatedRevisionFactory(
is_approved=True, document__title="Un mejor títuolo", document__locale="es"
)
a = AnswerFactory(question__locale="es", content="[[%s]]" % rev.document.title)
assert "es/kb/%s" % rev.document.slug in a.content_parsed
def test_creator_follows(self):
a = AnswerFactory()
follows = Follow.objects.filter(user=a.creator)
# It's a pain to filter this from the DB, since follow_object is a
# ContentType field, so instead, do it in Python.
eq_(len(follows), 2)
answer_follow = [f for f in follows if f.follow_object == a][0]
question_follow = [f for f in follows if f.follow_object == a.question][0]
eq_(question_follow.actor_only, False)
eq_(answer_follow.actor_only, False)
class TestQuestionMetadata(TestCaseBase):
"""Tests handling question metadata"""
def setUp(self):
super(TestQuestionMetadata, self).setUp()
# add a new Question to test with
self.question = QuestionFactory(title="Test Question", content="Lorem Ipsum Dolor")
def test_add_metadata(self):
"""Test the saving of metadata."""
metadata = {"version": "3.6.3", "os": "Windows 7"}
self.question.add_metadata(**metadata)
saved = QuestionMetaData.objects.filter(question=self.question)
eq_(dict((x.name, x.value) for x in saved), metadata)
def test_metadata_property(self):
"""Test the metadata property on Question model."""
self.question.add_metadata(crash_id="1234567890")
eq_("1234567890", self.question.metadata["crash_id"])
def test_product_property(self):
"""Test question.product property."""
self.question.add_metadata(product="desktop")
eq_(config.products["desktop"], self.question.product_config)
def test_category_property(self):
"""Test question.category property."""
self.question.add_metadata(product="desktop")
self.question.add_metadata(category="fix-problems")
eq_(
config.products["desktop"]["categories"]["fix-problems"],
self.question.category_config,
)
def test_clear_mutable_metadata(self):
"""Make sure it works and clears the internal cache.
crash_id should get cleared, while product, category, and useragent
should remain.
"""
q = self.question
q.add_metadata(
product="desktop",
category="fix-problems",
useragent="Fyerfocks",
crash_id="7",
)
q.metadata
q.clear_mutable_metadata()
md = q.metadata
assert "crash_id" not in md, "clear_mutable_metadata() didn't clear the cached metadata."
eq_(dict(product="desktop", category="fix-problems", useragent="Fyerfocks"), md)
def test_auto_tagging(self):
"""Make sure tags get applied based on metadata on first save."""
Tag.objects.create(slug="green", name="green")
Tag.objects.create(slug="Fix problems", name="fix-problems")
q = self.question
q.add_metadata(product="desktop", category="fix-problems", ff_version="3.6.8", os="GREen")
q.save()
q.auto_tag()
tags_eq(q, ["desktop", "fix-problems", "Firefox 3.6.8", "Firefox 3.6", "green"])
def test_auto_tagging_aurora(self):
"""Make sure versions with prerelease suffix are tagged properly."""
q = self.question
q.add_metadata(ff_version="18.0a2")
q.save()
q.auto_tag()
tags_eq(q, ["Firefox 18.0"])
def test_auto_tagging_restraint(self):
"""Auto-tagging shouldn't tag unknown Firefox versions or OSes."""
q = self.question
q.add_metadata(ff_version="allyourbase", os="toaster 1.0")
q.save()
q.auto_tag()
tags_eq(q, [])
def test_tenths_version(self):
"""Test the filter that turns 1.2.3 into 1.2."""
eq_(_tenths_version("1.2.3beta3"), "1.2")
eq_(_tenths_version("1.2rc"), "1.2")
eq_(_tenths_version("1.w"), "")
def test_has_beta(self):
"""Test the _has_beta helper."""
assert _has_beta("5.0", {"5.0b3": "2011-06-01"})
assert not _has_beta("6.0", {"5.0b3": "2011-06-01"})
assert not _has_beta("5.5", {"5.0b3": "2011-06-01"})
assert _has_beta("5.7", {"5.7b1": "2011-06-01"})
assert _has_beta("11.0", {"11.0b7": "2011-06-01"})
assert not _has_beta("10.0", {"11.0b7": "2011-06-01"})
class QuestionTests(TestCaseBase):
"""Tests for Question model"""
def test_save_updated(self):
"""Saving with the `update` option should update `updated`."""
q = QuestionFactory()
updated = q.updated
q.save(update=True)
self.assertNotEqual(updated, q.updated)
def test_save_no_update(self):
"""Saving without the `update` option shouldn't update `updated`."""
q = QuestionFactory()
updated = q.updated
q.save()
eq_(updated, q.updated)
def test_default_manager(self):
"""Assert Question's default manager is SUMO's ManagerBase.
This is easy to get wrong when mixing in taggability.
"""
eq_(
Question._default_manager.__class__,
kitsune.questions.managers.QuestionManager,
)
def test_is_solved_property(self):
a = AnswerFactory()
q = a.question
assert not q.is_solved
q.solution = a
q.save()
assert q.is_solved
def test_recent_counts(self):
"""Verify recent_asked_count and recent unanswered count."""
# create a question for each of past 4 days
now = datetime.now()
QuestionFactory(created=now)
QuestionFactory(created=now - timedelta(hours=12), is_locked=True)
q = QuestionFactory(created=now - timedelta(hours=23))
AnswerFactory(question=q)
# 25 hours instead of 24 to avoid random test fails.
QuestionFactory(created=now - timedelta(hours=25))
# Only 3 are recent from last 72 hours, 1 has an answer.
eq_(3, Question.recent_asked_count())
eq_(1, Question.recent_unanswered_count())
def test_recent_counts_with_filter(self):
"""Verify that recent_asked_count and recent_unanswered_count
respect filters passed."""
now = datetime.now()
QuestionFactory(created=now, locale="en-US")
q = QuestionFactory(created=now, locale="en-US")
AnswerFactory(question=q)
QuestionFactory(created=now, locale="pt-BR")
QuestionFactory(created=now, locale="pt-BR")
q = QuestionFactory(created=now, locale="pt-BR")
AnswerFactory(question=q)
# 5 asked recently, 3 are unanswered
eq_(5, Question.recent_asked_count())
eq_(3, Question.recent_unanswered_count())
# check english (2 asked, 1 unanswered)
locale_filter = Q(locale="en-US")
eq_(2, Question.recent_asked_count(locale_filter))
eq_(1, Question.recent_unanswered_count(locale_filter))
# check pt-BR (3 asked, 2 unanswered)
locale_filter = Q(locale="pt-BR")
eq_(3, Question.recent_asked_count(locale_filter))
eq_(2, Question.recent_unanswered_count(locale_filter))
def test_from_url(self):
"""Verify question returned from valid URL."""
q = QuestionFactory()
eq_(q, Question.from_url("/en-US/questions/%s" % q.id))
eq_(q, Question.from_url("/es/questions/%s" % q.id))
eq_(q, Question.from_url("/questions/%s" % q.id))
def test_from_url_id_only(self):
"""Verify question returned from valid URL."""
# When requesting the id, the existence of the question isn't checked.
eq_(123, Question.from_url("/en-US/questions/123", id_only=True))
eq_(234, Question.from_url("/es/questions/234", id_only=True))
eq_(345, Question.from_url("/questions/345", id_only=True))
def test_from_invalid_url(self):
"""Verify question returned from valid URL."""
q = QuestionFactory()
eq_(None, Question.from_url("/en-US/questions/%s/edit" % q.id))
eq_(None, Question.from_url("/en-US/kb/%s" % q.id))
eq_(None, Question.from_url("/random/url"))
eq_(None, Question.from_url("/en-US/questions/dashboard/metrics"))
def test_editable(self):
q = QuestionFactory()
assert q.editable # unlocked/unarchived
q.is_archived = True
assert not q.editable # unlocked/archived
q.is_locked = True
assert not q.editable # locked/archived
q.is_archived = False
assert not q.editable # locked/unarchived
q.is_locked = False
assert q.editable # unlocked/unarchived
def test_age(self):
now = datetime.now()
ten_days_ago = now - timedelta(days=10)
thirty_seconds_ago = now - timedelta(seconds=30)
q1 = QuestionFactory(created=ten_days_ago)
q2 = QuestionFactory(created=thirty_seconds_ago)
# This test relies on datetime.now() being called in the age
# property, so this delta check makes it less likely to fail
# randomly.
assert abs(q1.age - 10 * 24 * 60 * 60) < 2, "q1.age (%s) != 10 days" % q1.age
assert abs(q2.age - 30) < 2, "q2.age (%s) != 30 seconds" % q2.age
def test_is_taken(self):
q = QuestionFactory()
u = UserFactory()
eq_(q.is_taken, False)
q.taken_by = u
q.taken_until = datetime.now() + timedelta(seconds=600)
q.save()
eq_(q.is_taken, True)
q.taken_by = None
q.taken_until = None
q.save()
eq_(q.is_taken, False)
def test_take(self):
u = UserFactory()
q = QuestionFactory()
q.take(u)
eq_(q.taken_by, u)
ok_(q.taken_until is not None)
@raises(InvalidUserException)
def test_take_creator(self):
q = QuestionFactory()
q.take(q.creator)
@raises(AlreadyTakenException)
def test_take_twice_fails(self):
u1 = UserFactory()
u2 = UserFactory()
q = QuestionFactory()
q.take(u1)
q.take(u2)
def test_take_twice_same_user_refreshes_time(self):
u = UserFactory()
first_taken_until = datetime.now() - timedelta(minutes=5)
q = QuestionFactory(taken_by=u, taken_until=first_taken_until)
q.take(u)
ok_(q.taken_until > first_taken_until)
def test_take_twice_forced(self):
u1 = UserFactory()
u2 = UserFactory()
q = QuestionFactory()
q.take(u1)
q.take(u2, force=True)
eq_(q.taken_by, u2)
def test_taken_until_is_set(self):
u = UserFactory()
q = QuestionFactory()
q.take(u)
assert q.taken_until > datetime.now()
def test_is_taken_clears(self):
u = UserFactory()
taken_until = datetime.now() - timedelta(seconds=30)
q = QuestionFactory(taken_by=u, taken_until=taken_until)
# Testin q.is_taken should clear out ``taken_by`` and ``taken_until``,
# since taken_until is in the past.
eq_(q.is_taken, False)
eq_(q.taken_by, None)
eq_(q.taken_until, None)
def test_creator_follows(self):
q = QuestionFactory()
f = Follow.objects.get(user=q.creator)
eq_(f.follow_object, q)
eq_(f.actor_only, False)
class AddExistingTagTests(TestCaseBase):
"""Tests for the add_existing_tag helper function."""
def setUp(self):
super(AddExistingTagTests, self).setUp()
self.untagged_question = QuestionFactory()
def test_tags_manager(self):
"""Make sure the TaggableManager exists.
Full testing of functionality is a matter for taggit's tests.
"""
tags_eq(self.untagged_question, [])
def test_add_existing_case_insensitive(self):
"""Assert add_existing_tag works case-insensitively."""
TagFactory(name="lemon", slug="lemon")
add_existing_tag("LEMON", self.untagged_question.tags)
tags_eq(self.untagged_question, ["lemon"])
@raises(Tag.DoesNotExist)
def test_add_existing_no_such_tag(self):
"""Assert add_existing_tag doesn't work when the tag doesn't exist."""
add_existing_tag("nonexistent tag", self.untagged_question.tags)
class OldQuestionsArchiveTest(Elastic7TestCase):
search_tests = True
def test_archive_old_questions(self):
last_updated = datetime.now() - timedelta(days=100)
# created just now
q1 = QuestionFactory()
# created 200 days ago
q2 = QuestionFactory(created=datetime.now() - timedelta(days=200), updated=last_updated)
# created 200 days ago, already archived
q3 = QuestionFactory(
created=datetime.now() - timedelta(days=200),
is_archived=True,
updated=last_updated,
)
self.refresh()
call_command("auto_archive_old_questions")
# There are three questions.
eq_(len(list(Question.objects.all())), 3)
# q2 and q3 are now archived and updated times are the same
archived_questions = list(Question.objects.filter(is_archived=True))
eq_(
sorted([(q.id, q.updated.date()) for q in archived_questions]),
[(q.id, q.updated.date()) for q in [q2, q3]],
)
# q1 is still unarchived.
archived_questions = list(Question.objects.filter(is_archived=False))
eq_(sorted([q.id for q in archived_questions]), [q1.id])
class QuestionVisitsTests(TestCase):
"""Tests for the pageview statistics gathering."""
# Need to monkeypatch close_old_connections out because it
# does something screwy with the testing infra around transactions.
@mock.patch.object(models, "close_old_connections")
@mock.patch.object(
googleanalytics,
"pageviews_by_question",
)
def test_visit_count_from_analytics(self, pageviews_by_question, close_old_connections):
"""Verify stored visit counts from mocked data."""
q1 = QuestionFactory()
q2 = QuestionFactory()
q3 = QuestionFactory()
pageviews_by_question.return_value = {
q1.id: 42,
q2.id: 27,
q3.id: 1337,
123459: 3,
}
QuestionVisits.reload_from_analytics()
eq_(3, QuestionVisits.objects.count())
eq_(42, QuestionVisits.objects.get(question_id=q1.id).visits)
eq_(27, QuestionVisits.objects.get(question_id=q2.id).visits)
eq_(1337, QuestionVisits.objects.get(question_id=q3.id).visits)
# Change the data and run again to cover the update case.
pageviews_by_question.return_value = {
q1.id: 100,
q2.id: 200,
q3.id: 300,
}
QuestionVisits.reload_from_analytics()
eq_(3, QuestionVisits.objects.count())
eq_(100, QuestionVisits.objects.get(question_id=q1.id).visits)
eq_(200, QuestionVisits.objects.get(question_id=q2.id).visits)
eq_(300, QuestionVisits.objects.get(question_id=q3.id).visits)
class QuestionVoteTests(TestCase):
def test_add_metadata_over_1000_chars(self):
qv = QuestionVoteFactory()
qv.add_metadata("test1", "a" * 1001)
metadata = VoteMetadata.objects.all()[0]
eq_("a" * 1000, metadata.value)
class TestActions(TestCase):
def test_question_create_action(self):
"""When a question is created, an Action is created too."""
q = QuestionFactory()
a = Action.objects.action_object(q).get()
eq_(a.actor, q.creator)
eq_(a.verb, "asked")
eq_(a.target, None)
def test_answer_create_action(self):
"""When an answer is created, an Action is created too."""
q = QuestionFactory()
ans = AnswerFactory(question=q)
act = Action.objects.action_object(ans).get()
eq_(act.actor, ans.creator)
eq_(act.verb, "answered")
eq_(act.target, q)
def test_question_change_no_action(self):
"""When a question is changed, no Action should be created."""
q = QuestionFactory()
Action.objects.all().delete()
q.save() # trigger another post_save hook
eq_(Action.objects.count(), 0)
def test_answer_change_no_action(self):
"""When an answer is changed, no Action should be created."""
q = QuestionFactory()
Action.objects.all().delete()
q.save() # trigger another post_save hook
eq_(Action.objects.count(), 0)
def test_question_solved_makes_action(self):
"""When an answer is marked as the solution to a question, an Action should be created."""
ans = AnswerFactory()
Action.objects.all().delete()
ans.question.set_solution(ans, ans.question.creator)
act = Action.objects.action_object(ans).get()
eq_(act.actor, ans.question.creator)
eq_(act.verb, "marked as a solution")
eq_(act.target, ans.question)
| bsd-3-clause |
BaconPancakes/valor | lib/cffi/vengine_cpy.py | 19 | 41325 | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| gpl-3.0 |
ulrichard/electrum | gui/kivy/uix/dialogs/choice_dialog.py | 2 | 1927 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.widget import Widget
Builder.load_string('''
<ChoiceDialog@Popup>
id: popup
title: ''
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
ScrollView:
orientation: 'vertical'
size_hint: 1, 0.8
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 2
size_hint: 1, 1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(popup.value)
popup.dismiss()
''')
class ChoiceDialog(Factory.Popup):
def __init__(self, title, choices, key, callback):
Factory.Popup.__init__(self)
for k, v in choices.items():
l = Label(text=v)
l.height = '48dp'
cb = CheckBox(group='choices')
cb.value = k
cb.height = '48dp'
def f(cb, x):
if x: self.value = cb.value
cb.bind(active=f)
if k == key:
cb.active = True
self.ids.choices.add_widget(l)
self.ids.choices.add_widget(cb)
self.ids.choices.add_widget(Widget(size_hint_y=1))
self.callback = callback
self.title = title
self.value = key
| gpl-3.0 |
w1kke/pylearn2 | pylearn2/scripts/tutorials/tests/test_dbm.py | 49 | 1067 | """
This module tests dbm_demo/rbm.yaml
"""
import os
from pylearn2.testing import skip
from pylearn2.testing import no_debug_mode
from pylearn2.config import yaml_parse
@no_debug_mode
def train_yaml(yaml_file):
train = yaml_parse.load(yaml_file)
train.main_loop()
def train(yaml_file_path, save_path):
yaml = open("{0}/rbm.yaml".format(yaml_file_path), 'r').read()
hyper_params = {'detector_layer_dim': 5,
'monitoring_batches': 2,
'train_stop': 500,
'max_epochs': 7,
'save_path': save_path}
yaml = yaml % (hyper_params)
train_yaml(yaml)
def test_dbm():
skip.skip_if_no_data()
yaml_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../dbm_demo'))
save_path = os.path.dirname(os.path.realpath(__file__))
train(yaml_file_path, save_path)
try:
os.remove("{}/dbm.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test_dbm()
| bsd-3-clause |
tianyu0915/pythoner.net | pythoner/home/views.py | 3 | 3552 | #encoding:utf-8
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.http import HttpResponse,HttpResponseRedirect,Http404
from django.template import RequestContext
from django.shortcuts import render_to_response as render
from django.contrib.auth.models import User
from django.core.paginator import Paginator,EmptyPage,InvalidPage
from accounts.models import UserProfile
from django.contrib.auth.decorators import login_required
from models import UserRlation,Develop
@login_required
def index(request,page=1):
"""
用户中心
"""
pre_url = 'home'
current_page = 'develop'
profile = UserProfile.objects.get(user=request.user)
relation_all = UserRlation.objects.filter(source_user=request.user)
follows_id_list = [r.target_user.id for r in relation_all]
follows_id_list.append(request.user.id) # 以便抽取自己的动态
develop_all = Develop.objects.filter(user__in=follows_id_list).order_by('-sub_time')
paginator = Paginator(develop_all,15)
try:
develops = paginator.page(page)
except (EmptyPage,InvalidPage):
develops = paginator.page(paginator.num_pages)
return render('home_index.html',locals(),context_instance=RequestContext(request))
def members(request,page=1):
"""
成员
"""
member_all = User.objects.order_by('-id').filter(is_active=True)
paginatior = Paginator(member_all,42)
pre_url = 'home/members'
try:
entrys = paginatior.page(page)
except(EmptyPage,InvalidPage):
entrys = paginatior.page(paginatior.num_pages)
return render('account_members.html',locals(),context_instance=RequestContext(request))
def code(request,user_id,page=1):
"""
用户发表的代码
"""
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
raise Http404()
from code.models import Base
url = 'home/%d/code' %user.id
current_page = 'user_code'
code_all = Base.objects.filter(display=True,author=user)
paginator = Paginator(code_all,20)
try:
entrys = paginator.page(page)
except (EmptyPage,InvalidPage):
entrys = paginator.page(paginator.num_pages)
return render('home_code.html',locals(),context_instance=RequestContext(request))
def topic(request,user_id,page=1):
from topic.models import Topic
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
raise Http404()
topic_all = Topic.objects.filter(author=user)
paginator = Paginator(topic_all,20)
url = 'home/%d/topic' %user.id
current_page = 'user_topic'
try:
entrys = paginator.page(page)
except (EmptyPage,InvalidPage):
entrys = paginator.page(paginator.num_pages)
return render('home_topic.html',locals(),context_instance=RequestContext(request))
| gpl-3.0 |
dd00/commandergenius | project/jni/python/src/Tools/pybench/Imports.py | 45 | 2947 | from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
| lgpl-2.1 |
cmelange/ansible | lib/ansible/modules/database/vertica/vertica_role.py | 37 | 8561 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: vertica_role
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
required: false
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
hradec/gaffer | python/GafferAppleseedUI/AppleseedShaderBallUI.py | 11 | 2742 | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferAppleseed
Gaffer.Metadata.registerNode(
GafferAppleseed.AppleseedShaderBall,
"description",
"""
Generates scenes suitable for rendering shader balls with Appleseed.
""",
plugs = {
"environment" : [
"description",
"""
An environment map used for lighting. Should be in latlong
format.
""",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", True,
"path:valid", True,
"path:bookmarks", "texture",
],
"maxSamples" : [
"description",
"""
The maximum number of samples used by appleseed to render the
shader ball. A value of 0 disables the limit.
"""
],
"threads" : [
"description",
"""
The number of threads used by appleseed to render the
shader ball. A value of 0 uses all cores, and negative
values reserve cores for other uses - to be used by
the rest of the UI for instance.
"""
],
}
)
| bsd-3-clause |
JamesMura/sentry | src/sentry/south_migrations/0001_initial.py | 36 | 11182 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupedMessage'
db.create_table('sentry_groupedmessage', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['GroupedMessage'])
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Adding model 'Message'
db.create_table('sentry_message', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('sentry', ['Message'])
# Adding model 'User'
db.create_table('auth_user', (
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['User'])
def backwards(self, orm):
# Deleting model 'GroupedMessage'
db.delete_table('sentry_groupedmessage')
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Deleting model 'Message'
db.delete_table('sentry_message')
# Deleting model 'User'
db.delete_table('sentry_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
}
complete_apps = ['sentry']
| bsd-3-clause |
dnlm92/chokoretto | temp/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| mit |
openstack/networking-sfc | networking_sfc/tests/functional/services/sfc/agent/extensions/test_ovs_agent_sfc_extension.py | 1 | 1138 | # Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.functional.agent.l2 import base
class TestOVSAgentSfcExtension(base.OVSAgentTestFramework):
def setUp(self):
super(TestOVSAgentSfcExtension, self).setUp()
self.config.set_override('extensions', ['sfc'], 'agent')
self.agent = self.create_agent()
def test_run(self):
self.agent._report_state()
agent_state = self.agent.state_rpc.report_state.call_args[0][1]
self.assertEqual(['sfc'], agent_state['configurations']['extensions'])
| apache-2.0 |
S2R2/viper | viper/modules/pymacho/MachOSourceVersionCommand.py | 6 | 1633 | # encoding: utf-8
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack, pack
from viper.modules.pymacho.MachOLoadCommand import MachOLoadCommand
from viper.modules.pymacho.Utils import int64_to_version, green
class MachOSourceVersionCommand(MachOLoadCommand):
version = 0
def __init__(self, macho_file=None, cmd=0):
self.cmd = cmd
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
self.version = unpack('<Q', macho_file.read(8))[0]
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<II', self.cmd, 0x0))
macho_file.write(pack('<Q', self.version))
after = macho_file.tell()
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_SOURCE_VERSION"
print before + "\t- version : %s" % int64_to_version(self.version)
| bsd-3-clause |
IDEO-coLAB/vehicle-rec-microservice | ConnecttoBLE/ScanDevices.py | 1 | 1362 |
# File : ScanDevices.py
# Program uses the BLE microservice to scan the BLE devices and returns the Mac Address of the specified BLE Device
# Author: Sooraj Bopanna
#!usr/bin/env python
#!/bin/bash
import os
import json
import urllib2
import socket
import ReadOBDSensorName
def setup():
global BLEDeviceName,DeviceFound
BLEDeviceName=ReadOBDSensorName.fileread()
print "BLEDevice = ",BLEDeviceName
def ScanBLEDevices():
setup()
macaddr=""
print "BLE Devince In ScanBLEDevices :%s",BLEDeviceName
Resp = urllib2.urlopen('http://localhost:10500/devices')
Resp_obj = json.load(Resp)
#print " Resp_obj = %s " %Resp_obj
#print "Length of String =%d" %len(Resp_obj)
macaddr="Device Not Found"
for i in Resp_obj:
json_dmps=json.dumps(i)
for key, di in json.loads(json_dmps).iteritems():
if key=="local_name":
if di==BLEDeviceName:
print "BLEDevice JSON String = %s",json_dmps
json_MacAddr=json_dmps
for k, v in json.loads(json_MacAddr).iteritems():
if k=="mac_address":
macaddr=v
print "MacAddr Found for BLEDevice ",macaddr
return macaddr
def loop():
print "In Main Loop :"
MAC=ScanBLEDevices()
print " MAC Address in Main Loop is ",MAC
def destroy():
pass
if __name__=='__main__':
try:
#setup()
loop()
except KeyboardInterrupt():
destroy()
| mit |
evanma92/routeh | flask/lib/python2.7/site-packages/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| bsd-3-clause |
pombredanne/pyelftools | test/test_utils.py | 2 | 2438 | #-------------------------------------------------------------------------------
# elftools tests
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
import unittest
from random import randint
from elftools.common.py3compat import int2byte, BytesIO
from elftools.common.utils import (parse_cstring_from_stream, merge_dicts,
preserve_stream_pos)
class Test_parse_cstring_from_stream(unittest.TestCase):
def _make_random_bytes(self, n):
return b''.join(int2byte(randint(32, 127)) for i in range(n))
def test_small1(self):
sio = BytesIO(b'abcdefgh\x0012345')
self.assertEqual(parse_cstring_from_stream(sio), b'abcdefgh')
self.assertEqual(parse_cstring_from_stream(sio, 2), b'cdefgh')
self.assertEqual(parse_cstring_from_stream(sio, 8), b'')
def test_small2(self):
sio = BytesIO(b'12345\x006789\x00abcdefg\x00iii')
self.assertEqual(parse_cstring_from_stream(sio), b'12345')
self.assertEqual(parse_cstring_from_stream(sio, 5), b'')
self.assertEqual(parse_cstring_from_stream(sio, 6), b'6789')
def test_large1(self):
text = b'i' * 400 + b'\x00' + b'bb'
sio = BytesIO(text)
self.assertEqual(parse_cstring_from_stream(sio), b'i' * 400)
self.assertEqual(parse_cstring_from_stream(sio, 150), b'i' * 250)
def test_large2(self):
text = self._make_random_bytes(5000) + b'\x00' + b'jujajaja'
sio = BytesIO(text)
self.assertEqual(parse_cstring_from_stream(sio), text[:5000])
self.assertEqual(parse_cstring_from_stream(sio, 2348), text[2348:5000])
class Test_preserve_stream_pos(unittest.TestCase):
def test_basic(self):
sio = BytesIO(b'abcdef')
with preserve_stream_pos(sio):
sio.seek(4)
self.assertEqual(sio.tell(), 0)
sio.seek(5)
with preserve_stream_pos(sio):
sio.seek(0)
self.assertEqual(sio.tell(), 5)
class Test_merge_dicts(unittest.TestCase):
def test_basic(self):
md = merge_dicts({10: 20, 20: 30}, {30: 40, 50: 60})
self.assertEqual(md, {10: 20, 20: 30, 30: 40, 50: 60})
def test_keys_resolve(self):
md = merge_dicts({10: 20, 20: 30}, {20: 40, 50: 60})
self.assertEqual(md, {10: 20, 20: 40, 50: 60})
if __name__ == '__main__':
unittest.main()
| unlicense |
rackerlabs/melange | melange/tests/__init__.py | 1 | 3875 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)
import os
import unittest
import urlparse
import mox
from melange.db import db_api
from melange.common import utils
def melange_root_path():
return os.path.join(os.path.dirname(__file__), "..", "..")
def melange_bin_path(filename="."):
return os.path.join(melange_root_path(), "bin", filename)
def melange_etc_path(filename="."):
return os.path.join(melange_root_path(), "etc", "melange", filename)
def test_config_file():
return melange_etc_path("melange.conf.sample")
class BaseTest(unittest.TestCase):
def setUp(self):
# maxDiff=None ensures diff output of assert methods are not truncated
self.maxDiff = None
self.mock = mox.Mox()
db_api.clean_db()
super(BaseTest, self).setUp()
def tearDown(self):
self.mock.UnsetStubs()
self.mock.VerifyAll()
super(BaseTest, self).tearDown()
def assertRaisesExcMessage(self, exception, message,
func, *args, **kwargs):
"""This is similar to assertRaisesRegexp in python 2.7"""
try:
func(*args, **kwargs)
self.fail("Expected {0} to raise {1}".format(func,
repr(exception)))
except exception as error:
self.assertIn(message, str(error))
def assertIn(self, expected, actual):
"""This is similar to assertIn in python 2.7"""
self.assertTrue(
expected in actual,
"{0} does not contain {1}".format(repr(actual), repr(expected)))
def assertNotIn(self, expected, actual):
self.assertFalse(
expected in actual,
"{0} does contains {1}".format(repr(actual), repr(expected)))
def assertIsNone(self, actual):
"""This is similar to assertIsNone in python 2.7"""
self.assertEqual(actual, None)
def assertIsNotNone(self, actual):
"""This is similar to assertIsNotNone in python 2.7"""
self.assertNotEqual(actual, None)
def assertItemsEqual(self, expected, actual):
self.assertEqual(sorted(expected), sorted(actual))
def assertModelsEqual(self, expected, actual):
self.assertEqual(sorted(expected, key=lambda model: model.id),
sorted(actual, key=lambda model: model.id))
def assertUrlEqual(self, expected, actual):
self.assertEqual(expected.partition("?")[0], actual.partition("?")[0])
# params ordering might be different in the urls
self.assertEqual(urlparse.parse_qs(expected.partition("?")[2]),
urlparse.parse_qs(actual.partition("?")[2]))
def assertErrorResponse(self, response, error_type, expected_error):
self.assertEqual(response.status_int, error_type().code)
self.assertIn(expected_error, response.body)
def setup_uuid_with(self, fake_uuid):
self.mock.StubOutWithMock(utils, "generate_uuid")
utils.generate_uuid().MultipleTimes().AndReturn(fake_uuid)
| apache-2.0 |
libclipunl/clipfiles | login.py | 1 | 1960 | # coding=utf-8
import Tkinter as tk
import ttk
import tkSimpleDialog
# FIXME: Give alternate icon
class LoginForm(tkSimpleDialog.Dialog):
def __init__(self, parent, creds, status):
self._creds = creds
self._status = status
tkSimpleDialog.Dialog.__init__(self, parent)
def body(self, master):
self.result = None
self.resizable(False, False)
self.title("Autenticação no CLIP")
ttk.Label(master, text=self._status).grid(row=0, columnspan=2)
ttk.Label(master, text="Identificador CLIP:").grid(row=1, sticky=tk.W)
ttk.Label(master, text="Palavra passe:").grid(row=2, stick=tk.W)
creds = self._creds
self.save_user = tk.IntVar()
self.save_pass = tk.IntVar()
self.e_username = ttk.Entry(master)
self.e_username.grid(row=1, column=1)
if "username" in creds.keys():
self.e_username.delete(0, tk.END)
self.e_username.insert(0, creds["username"])
self.save_user.set(1)
self.e_password = ttk.Entry(master, show="*")
self.e_password.grid(row=2, column=1)
if "password" in creds.keys():
self.e_password.delete(0, tk.END)
self.e_password.insert(0, creds["password"])
self.save_pass.set(1)
c = ttk.Checkbutton(master, text="Guardar identificador", variable=self.save_user)
c.grid(columnspan=2, sticky=tk.W)
c = ttk.Checkbutton(master, text="Guardar palavra-passe", variable=self.save_pass)
c.grid(columnspan=2, sticky=tk.W)
return self.e_username
def apply(self):
self.username = self.e_username.get()
self.password = self.e_password.get()
self.result = {
"username": self.username,
"password": self.password,
"save_user": self.save_user.get(),
"save_pass": self.save_pass.get()
}
| mit |
savoirfairelinux/django | django/contrib/sites/models.py | 9 | 3732 | import string
from contextlib import suppress
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.signals import pre_delete, pre_save
from django.http.request import split_domain_port
from django.utils.translation import gettext_lazy as _
SITE_CACHE = {}
def _simple_domain_name_validator(value):
"""
Validate that the given value contains no whitespaces to prevent common
typos.
"""
if not value:
return
checks = ((s in value) for s in string.whitespace)
if any(checks):
raise ValidationError(
_("The domain name cannot contain any spaces or tabs."),
code='invalid',
)
class SiteManager(models.Manager):
use_in_migrations = True
def _get_site_by_id(self, site_id):
if site_id not in SITE_CACHE:
site = self.get(pk=site_id)
SITE_CACHE[site_id] = site
return SITE_CACHE[site_id]
def _get_site_by_request(self, request):
host = request.get_host()
try:
# First attempt to look up the site by host with or without port.
if host not in SITE_CACHE:
SITE_CACHE[host] = self.get(domain__iexact=host)
return SITE_CACHE[host]
except Site.DoesNotExist:
# Fallback to looking up site after stripping port from the host.
domain, port = split_domain_port(host)
if domain not in SITE_CACHE:
SITE_CACHE[domain] = self.get(domain__iexact=domain)
return SITE_CACHE[domain]
def get_current(self, request=None):
"""
Return the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, return the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
"""
from django.conf import settings
if getattr(settings, 'SITE_ID', ''):
site_id = settings.SITE_ID
return self._get_site_by_id(site_id)
elif request:
return self._get_site_by_request(request)
raise ImproperlyConfigured(
"You're using the Django \"sites framework\" without having "
"set the SITE_ID setting. Create a site in your database and "
"set the SITE_ID setting or pass a request to "
"Site.objects.get_current() to fix this error."
)
def clear_cache(self):
"""Clear the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
def get_by_natural_key(self, domain):
return self.get(domain=domain)
class Site(models.Model):
domain = models.CharField(
_('domain name'),
max_length=100,
validators=[_simple_domain_name_validator],
unique=True,
)
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __str__(self):
return self.domain
def natural_key(self):
return (self.domain,)
def clear_site_cache(sender, **kwargs):
"""
Clear the cache (if primed) each time a site is saved or deleted.
"""
instance = kwargs['instance']
using = kwargs['using']
with suppress(KeyError):
del SITE_CACHE[instance.pk]
with suppress(KeyError, Site.DoesNotExist):
del SITE_CACHE[Site.objects.using(using).get(pk=instance.pk).domain]
pre_save.connect(clear_site_cache, sender=Site)
pre_delete.connect(clear_site_cache, sender=Site)
| bsd-3-clause |
tedelhourani/ansible | lib/ansible/module_utils/facts/timeout.py | 135 | 2215 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import signal
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT = None
DEFAULT_GATHER_TIMEOUT = 10
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
msg = 'Timer expired after %s seconds' % globals().get('GATHER_TIMEOUT')
raise TimeoutError(msg)
def wrapper(*args, **kwargs):
local_seconds = seconds
if local_seconds is None:
local_seconds = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(local_seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our default value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
| gpl-3.0 |
sinotradition/meridian | meridian/channels/gallbladder.py | 1 | 2065 | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
from meridian.acupoints import tongziliao232
from meridian.acupoints import tinghui14
from meridian.acupoints import shangguan41
from meridian.acupoints import heyan24
from meridian.acupoints import xuanlu22
from meridian.acupoints import xuanli22
from meridian.acupoints import qubin14
from meridian.acupoints import shuaigu43
from meridian.acupoints import tianchong11
from meridian.acupoints import fubai22
from meridian.acupoints import touqiaoyin241
from meridian.acupoints import wangu23
from meridian.acupoints import benshen32
from meridian.acupoints import yangbai22
from meridian.acupoints import toulinqi221
from meridian.acupoints import muchuang41
from meridian.acupoints import zhengying42
from meridian.acupoints import chengling22
from meridian.acupoints import naokong31
from meridian.acupoints import fengchi12
from meridian.acupoints import jianjing13
from meridian.acupoints import yuanye14
from meridian.acupoints import zhejin21
from meridian.acupoints import riyue44
from meridian.acupoints import jingmen12
from meridian.acupoints import daimai44
from meridian.acupoints import wushu31
from meridian.acupoints import weidao24
from meridian.acupoints import juliao12
from meridian.acupoints import huantiao24
from meridian.acupoints import fengshi14
from meridian.acupoints import zhongdu12
from meridian.acupoints import xiyangguan121
from meridian.acupoints import yanglingquan222
from meridian.acupoints import yangjiao21
from meridian.acupoints import waiqiu41
from meridian.acupoints import guangming12
from meridian.acupoints import yangfu23
from meridian.acupoints import xuanzhong21
from meridian.acupoints import qiuxu11
from meridian.acupoints import zulinqi224
from meridian.acupoints import diwuhui434
from meridian.acupoints import xiaxi21
from meridian.acupoints import zuqiaoyin241
SPELL=u'zúshàoyángdǎnjīng'
CN=u'足少阳胆经'
ABBR=u'GB'
NAME='gallbladder'
FULLNAME='GallbladderChannelofFoot-Shaoyang'
SEQ=8
if __name__ == '__main__':
pass
| apache-2.0 |
mastizada/kuma | vendor/packages/ipython/IPython/kernel/tests/test_engineservice.py | 7 | 2586 | # encoding: utf-8
"""This file contains unittests for the kernel.engineservice.py module.
Things that should be tested:
- Should the EngineService return Deferred objects?
- Run the same tests that are run in shell.py.
- Make sure that the Interface is really implemented.
- The startService and stopService methods.
"""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
# Tell nose to skip this module
__test__ = {}
from twisted.internet import defer
from twisted.application.service import IService
from IPython.kernel import engineservice as es
from IPython.testing.util import DeferredTestCase
from IPython.kernel.tests.engineservicetest import \
IEngineCoreTestCase, \
IEngineSerializedTestCase, \
IEngineQueuedTestCase, \
IEnginePropertiesTestCase
class BasicEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase):
def setUp(self):
self.engine = es.EngineService()
self.engine.startService()
def tearDown(self):
return self.engine.stopService()
class ThreadedEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase):
def setUp(self):
self.engine = es.ThreadedEngineService()
self.engine.startService()
def tearDown(self):
return self.engine.stopService()
class QueuedEngineServiceTest(DeferredTestCase,
IEngineCoreTestCase,
IEngineSerializedTestCase,
IEnginePropertiesTestCase,
IEngineQueuedTestCase):
def setUp(self):
self.rawEngine = es.EngineService()
self.rawEngine.startService()
self.engine = es.IEngineQueued(self.rawEngine)
def tearDown(self):
return self.rawEngine.stopService()
| mpl-2.0 |
kisna72/django | tests/select_for_update/tests.py | 203 | 9626 | from __future__ import unicode_literals
import threading
import time
from multiple_database.routers import TestRouter
from django.conf import settings
from django.db import connection, router, transaction
from django.db.utils import DEFAULT_DB_ALIAS, ConnectionHandler, DatabaseError
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import Person
# We need to set settings.DEBUG to True so we can capture the output SQL
# to examine.
@override_settings(DEBUG=True)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.person = Person.objects.create(name='Reinhardt')
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
Test that a TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
with self.assertRaises(transaction.TransactionManagementError):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
Test that no TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
with self.assertRaises(transaction.TransactionManagementError):
list(people)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
| bsd-3-clause |
imtapps/python-suds-0.4.IMT | suds/sax/parser.py | 180 | 4461 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0] | lgpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/PIL/ImageQt.py | 26 | 4678 | #
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 ([email protected])
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import PIL
from PIL._util import isPath
from io import BytesIO
qt_is_installed = True
qt_version = None
try:
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
qt_version = '5'
except ImportError:
try:
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
qt_version = '4'
except ImportError:
try:
from PySide.QtGui import QImage, qRgba, QPixmap
from PySide.QtCore import QBuffer, QIODevice
qt_version = 'side'
except ImportError:
qt_is_installed = False
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
# :param im A PIL Image object, or a file name
# (given either as Python string or a PyQt string object)
def fromqimage(im):
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
im.save(buffer, 'ppm')
b = BytesIO()
try:
b.write(buffer.data())
except TypeError:
# workaround for Python 2
b.write(str(buffer.data()))
buffer.close()
b.seek(0)
return PIL.Image.open(b)
def fromqpixmap(im):
return fromqimage(im)
# buffer = QBuffer()
# buffer.open(QIODevice.ReadWrite)
# # im.save(buffer)
# # What if png doesn't support some image features like animation?
# im.save(buffer, 'ppm')
# bytes_io = BytesIO()
# bytes_io.write(buffer.data())
# buffer.close()
# bytes_io.seek(0)
# return PIL.Image.open(bytes_io)
def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if str is bytes:
im = unicode(im.toUtf8(), "utf-8")
else:
im = str(im.toUtf8(), "utf-8")
if isPath(im):
im = PIL.Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = PIL.Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
# must keep a reference, or Qt will crash!
__data = data or im.tobytes()
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
##
# An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
# class.
#
# @param im A PIL Image object, or a file name (given either as Python
# string or a PyQt string object).
if qt_is_installed:
class ImageQt(QImage):
def __init__(self, im):
im_data = _toqclass_helper(im)
QImage.__init__(self,
im_data['data'], im_data['im'].size[0],
im_data['im'].size[1], im_data['format'])
if im_data['colortable']:
self.setColorTable(im_data['colortable'])
def toqimage(im):
return ImageQt(im)
def toqpixmap(im):
# # This doesn't work. For now using a dumb approach.
# im_data = _toqclass_helper(im)
# result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
# result.loadFromData(im_data['data'])
# Fix some strange bug that causes
if im.mode == 'RGB':
im = im.convert('RGBA')
qimage = toqimage(im)
return QPixmap.fromImage(qimage)
| mit |
pombredanne/http-repo.gem5.org-gem5- | src/mem/slicc/util.py | 83 | 3124 | # Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
class PairContainer(object):
def __init__(self, pairs=None):
self.pairs = {}
if pairs:
self.pairs.update(pairs)
def __contains__(self, item):
return item in self.pairs
def __getitem__(self, item):
return self.pairs[item]
def __setitem__(self, item, value):
self.pairs[item] = value
def get(self, item, failobj=None):
return self.pairs.get(item, failobj)
class Location(object):
def __init__(self, filename, lineno, no_warning=False):
if not isinstance(filename, basestring):
raise AttributeError, \
"filename must be a string, found '%s'" % (type(filename), )
if not isinstance(lineno, (int, long)):
raise AttributeError, \
"filename must be an integer, found '%s'" % (type(lineno), )
self.filename = filename
self.lineno = lineno
self.no_warning = no_warning
def __str__(self):
return '%s:%d' % (os.path.basename(self.filename), self.lineno)
def warning(self, message, *args):
if self.no_warning:
return
if args:
message = message % args
#raise Exception, "%s: Warning: %s" % (self, message)
print >>sys.stderr, "%s: Warning: %s" % (self, message)
def error(self, message, *args):
if args:
message = message % args
raise Exception, "%s: Error: %s" % (self, message)
sys.exit("\n%s: Error: %s" % (self, message))
__all__ = [ 'PairContainer', 'Location' ]
| bsd-3-clause |
SeerLabs/new-csx-extractor | run_extraction.py | 2 | 1500 | from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import csxextract.extractors.grobid as grobid
import csxextract.extractors.pdfbox as pdfbox
import csxextract.extractors.tei as tei
import csxextract.extractors.parscit as parscit
import csxextract.extractors.figures as figures
import csxextract.extractors.algorithms as algorithms
import csxextract.filters as filters
def get_extraction_runner():
runner = ExtractionRunner()
runner.enable_logging('~/logs/results', '~/logs/runnables')
runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
runner.add_runnable(grobid.GrobidHeaderTEIExtractor)
runner.add_runnable(tei.TEItoHeaderExtractor)
runner.add_runnable(parscit.ParsCitCitationExtractor)
runner.add_runnable(figures.PDFFiguresExtractor)
runner.add_runnable(algorithms.AlgorithmsExtractor)
return runner
if __name__ == '__main__':
runner = get_extraction_runner()
argc = len(sys.argv)
if argc == 2:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], file_prefix=file_name)
elif argc == 3:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2], file_prefix=file_name)
else:
print("USAGE: python {0} path_to_pdf [output_directory]".format(sys.argv[0]))
| apache-2.0 |
jannson/Similar | gistfile2.py | 1 | 3594 | import sys, os, os.path
import logging
import pprint
import gensim
from gensim import models, corpora, similarities
from collections import defaultdict
import json
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster, ward
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.cluster import KMeans
import numpy as np
import codecs
django_path = '/home/gan/project/source/svn_project/pull/1'
sys.path.insert(13, django_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pull.settings'
from django.db.models import Count
from django.db.models import Q
from pull.models import HtmlContent
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(levelname)s:%(name)s:%(threadName)s: %(message)s")
logger = logging.getLogger(__name__)
GENSIM_DATA_ROOT = "/home/gan/project/source/testgit/Similar"
LSI_MODEL_FILE = os.path.join(GENSIM_DATA_ROOT, "wiki_en_model.lsi")
TFIDF_MODEL_FILE = os.path.join(GENSIM_DATA_ROOT, "wiki_en_tfidf.model")
CORPUS_FILE = os.path.join(GENSIM_DATA_ROOT, "wiki_en_corpus.mm")
DICTIONARY_FILE = os.path.join(GENSIM_DATA_ROOT, "wiki_en_wordids.txt")
SHARD_DIR = os.path.join('/tmp', "index_shards")
def load_gensim_tools():
"""Load serialized objects."""
dictionary = corpora.Dictionary.load_from_text(DICTIONARY_FILE)
# TODO chain transformations
tfidf_transformation = models.tfidfmodel.TfidfModel.load(TFIDF_MODEL_FILE)
lsi_transformation = models.lsimodel.LsiModel.load(LSI_MODEL_FILE)
return dictionary, tfidf_transformation, lsi_transformation
def create_corpus():
#return [word2id.doc2bow(gensim.utils.tokenize(text, lower=True)) for text in docs]
return corpora.MmCorpus(CORPUS_FILE)
def create_index(corpus, tfidf_transformation, lsi_transformation):
"""Create an index given a corpus and transformation(s).
:param corpus: The index corpus (documents against which new unseen documents will be compared)
:param tfidf_transformation: A vector space transformation model
:param lsi_transformation: A vector space transformation model
"""
# Ensure a dir exists to store the shards
index_dir = SHARD_DIR
if not os.path.exists(index_dir):
os.makedirs(index_dir)
# Create the index
index = similarities.Similarity(index_dir + "/shard",
corpus=lsi_transformation[tfidf_transformation[corpus]],
num_features=400, # TODO don't hard code this
)
return index
if __name__ == "__main__":
dictionary, tfidf_transformation, lsi_transformation = load_gensim_tools()
corpus = create_corpus()
index = create_index(corpus, tfidf_transformation, lsi_transformation)
tfidf_vec_doc = tfidf_transformation[corpus]
lsi_vec_doc = lsi_transformation[tfidf_vec_doc]
#lsi_transformation.print_topics(10)
index_doc = index[lsi_vec_doc]
sims = [s for s in index_doc]
cluster_centers_indices, labels = affinity_propagation(sims)
#print 'start kmeans calcing'
#k = KMeans(init='k-means++', n_init=10)
#k.fit(sims)
#centroids = k.cluster_centers_
#labels = k.labels_
docs = []
for obj in HtmlContent.objects.filter(~Q(retry=3)).filter(~Q(content='')):
docs.append(obj.title.split('|')[0])
doc_arr = np.array(range(len(labels)))
with codecs.open('zzz','w','utf-8') as file:
for i in range(np.max(labels)):
output = 'group:'+str(i+1)+'\n'
for doc_num in doc_arr[labels==i]:
output += docs[doc_num] + ' / '
output += '\n'
file.write(output)
| mit |
mancoast/CPythonPyc_test | fail/342_test_platform.py | 88 | 12413 | from unittest import mock
import os
import platform
import subprocess
import sys
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["Path"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["Path"])
def get(python):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_mercurial = sys._mercurial
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._mercurial = self.save_mercurial
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
):
# branch and revision are not "parsed", but fetched
# from sys._mercurial. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, subversion, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if subversion is None:
if hasattr(sys, "_mercurial"):
del sys._mercurial
else:
sys._mercurial = subversion
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
res = platform.dist()
def test_libc_ver(self):
import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
else:
executable = sys.executable
res = platform.libc_ver(executable)
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
# Issue #17429
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
def test_main():
support.run_unittest(
PlatformTest
)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
sysadmin75/ansible | lib/ansible/plugins/lookup/template.py | 27 | 4601 | # Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: Michael DeHaan <[email protected]>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
"""
EXAMPLES = """
- name: show templating results
debug:
msg: "{{ lookup('template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
debug:
msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
from copy import deepcopy
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
ret = []
variable_start_string = kwargs.get('variable_start_string', None)
variable_end_string = kwargs.get('variable_end_string', None)
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
b_template_data, show_data = self._loader._get_file_contents(lookupfile)
template_data = to_text(b_template_data, errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path', [])
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
searchpath.insert(0, os.path.dirname(lookupfile))
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = deepcopy(variables)
vars.update(generate_ansible_template_vars(lookupfile))
vars.update(lookup_template_vars)
# do the templating
with self._templar.set_temporary_context(variable_start_string=variable_start_string,
variable_end_string=variable_end_string,
available_variables=vars, searchpath=searchpath):
res = self._templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| gpl-3.0 |
olgabrani/synnefo | snf-cyclades-app/synnefo/db/migrations/0085_nic_build_state.py | 10 | 18323 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
orm.NetworkInterface.objects.filter(state="BUILDING").update(state="BUILD")
orm.NetworkInterface.objects.filter(firewall_profile="").update(firewall_profile=None)
def backwards(self, orm):
"Write your backwards methods here."
orm.NetworkInterface.objects.filter(state="BUILD").update(state="BUILDING")
orm.NetworkInterface.objects.filter(firewall_profile=None).update(firewall_profile="")
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Network']"}),
'nic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.NetworkInterface']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Subnet']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ip_pools'", 'null': 'True', 'to': "orm['db.Subnet']"})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'external_router': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'floating_ip_pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device_owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'security_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.SecurityGroup']", 'null': 'True', 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.quotaholderserial': {
'Meta': {'ordering': "['serial']", 'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.securitygroup': {
'Meta': {'object_name': 'SecurityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'db.subnet': {
'Meta': {'object_name': 'Subnet'},
'cidr': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_nameservers': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'host_routes': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subnets'", 'to': "orm['db.Network']"})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']", 'on_delete': 'models.PROTECT'}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'BUILD'", 'max_length': '30'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'task_job_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'ordering': "['-created']", 'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db']
symmetrical = True
| gpl-3.0 |
giovaroma/bootstrap4 | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | 1786 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| mit |
MaxKellermann/xbmc | tools/EventClients/Clients/PS3BDRemote/ps3_remote.py | 38 | 7006 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# This is a quick port of brandonj's PS3 remote script to use the event server
# for sending input events.
#
# The original script and documentation regarding the remote can be found at:
# http://forum.kodi.tv/showthread.php?tid=28765
#
#
# TODO:
# 1. Send keepalive ping at least once every 60 seconds to prevent timeouts
# 2. Permanent pairing
# 3. Detect if Kodi has been restarted (non trivial until broadcasting is
# implemented, until then maybe the HELO packet could be used instead of
# PING as keepalive
#
import sys
try:
# try loading modules from source directory
sys.path.append("../../lib/python")
from xbmcclient import *
from ps3.keymaps import keymap_remote as g_keymap # look here to change the keymapping
from bt.bt import *
ICON_PATH = "../../icons/"
except:
# fallback to system wide modules
from kodi.xbmcclient import *
from kodi.ps3.keymaps import keymap_remote as g_keymap # look here to change the keymapping
from kodi.bt.bt import *
from kodi.defs import *
import os
import time
xbmc = None
bticon = ICON_PATH + "/bluetooth.png"
def get_remote_address(remote, target_name = "BD Remote Control"):
global xbmc
target_connected = False
target_address = None
while target_connected is False:
xbmc.send_notification("Action Required!",
"Hold Start+Enter on your remote.",
bticon)
print("Searching for %s" % target_name)
print("(Hold Start + Enter on remote to make it discoverable)")
time.sleep(2)
if not target_address:
try:
nearby_devices = bt_discover_devices()
except Exception as e:
print("Error performing bluetooth discovery")
print(str(e))
xbmc.send_notification("Error", "Unable to find devices.", bticon)
time.sleep(5)
continue
for bdaddr in nearby_devices:
bname = bt_lookup_name( bdaddr )
addr = bt_lookup_addr ( bdaddr )
print("%s (%s) in range" % (bname,addr))
if target_name == bname:
target_address = addr
break
if target_address is not None:
print("Found %s with address %s" % (target_name, target_address))
xbmc.send_notification("Found Device",
"Pairing %s, please wait." % target_name,
bticon)
print("Attempting to pair with remote")
try:
remote.connect((target_address,19))
target_connected = True
print("Remote Paired.\a")
xbmc.send_notification("Pairing Successful",
"Your remote was successfully "\
"paired and is ready to be used.",
bticon)
except:
del remote
remote = bt_create_socket()
target_address = None
xbmc.send_notification("Pairing Failed",
"An error occurred while attempting to "\
"pair.", bticon)
print("ERROR - Could Not Connect. Trying again...")
time.sleep(2)
else:
xbmc.send_notification("Error", "No remotes were found.", bticon)
print("Could not find BD Remote Control. Trying again...")
time.sleep(2)
return (remote,target_address)
def usage():
print("""
PS3 Blu-Ray Remote Control Client for XBMC v0.1
Usage: ps3_remote.py <address> [port]
address => address of system that XBMC is running on
("localhost" if it is this machine)
port => port to send packets to
(default 9777)
""")
def process_keys(remote, xbmc):
"""
Return codes:
0 - key was processed normally
2 - socket read timeout
3 - PS and then Skip Plus was pressed (sequentially)
4 - PS and then Skip Minus was pressed (sequentially)
FIXME: move to enums
"""
done = 0
try:
xbmc.previous_key
except:
xbmc.previous_key = ""
xbmc.connect()
datalen = 0
try:
data = remote.recv(1024)
datalen = len(data)
except Exception as e:
if str(e)=="timed out":
return 2
time.sleep(2)
# some other read exception occured, so raise it
raise e
if datalen == 13:
keycode = data.encode("hex")[10:12]
if keycode == "ff":
xbmc.release_button()
return done
try:
# if the user presses the PS button followed by skip + or skip -
# return different codes.
if xbmc.previous_key == "43":
xbmc.previous_key = keycode
if keycode == "31": # skip +
return 3
elif keycode == "30": # skip -
return 4
# save previous key press
xbmc.previous_key = keycode
if g_keymap[keycode]:
xbmc.send_remote_button(g_keymap[keycode])
except Exception as e:
print("Unknown data: %s" % str(e))
return done
def main():
global xbmc, bticon
host = "127.0.0.1"
port = 9777
if len(sys.argv)>1:
try:
host = sys.argv[1]
port = sys.argv[2]
except:
pass
else:
return usage()
loop_forever = True
xbmc = XBMCClient("PS3 Bluetooth Remote",
icon_file=bticon)
while loop_forever is True:
target_connected = False
remote = bt_create_socket()
xbmc.connect(host, port)
(remote,target_address) = get_remote_address(remote)
while True:
if process_keys(remote, xbmc):
break
print("Disconnected.")
try:
remote.close()
except:
print("Cannot close.")
if __name__=="__main__":
main()
| gpl-2.0 |
twidi/satchmo | satchmo/apps/product/modules/downloadable/migrations/0001_split.py | 5 | 19016 | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
('product', '0010_add_discountable_categories'),
)
needed_by = (
('product', '0011_split_products'),
)
def forwards(self, orm):
db.rename_table('product_downloadableproduct', 'downloadable_downloadableproduct')
# check if the table exists; might be a fresh, post 0.9 installation
try:
from django.db import connection
cursor = connection.cursor()
if not cursor:
raise Exception
table_names = connection.introspection.get_table_list(cursor)
except:
raise Exception("unable to determine if the table 'shop_downloadlink' exists")
else:
if not 'shop_downloadlink' in table_names:
# create the table
# create commands were obtained from a fresh --initial migration
db.create_table('downloadable_downloadlink', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('downloadable_product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['downloadable.DownloadableProduct'])),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shop.Order'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('num_attempts', self.gf('django.db.models.fields.IntegerField')()),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')()),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('downloadable', ['DownloadLink'])
else:
db.rename_table('shop_downloadlink', 'downloadable_downloadlink')
def backwards(self, orm):
db.rename_table('downloadable_downloadableproduct', 'product_downloadableproduct')
db.rename_table('downloadable_downloadlink', 'shop_downloadlink')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contact.contact': {
'Meta': {'object_name': 'Contact'},
'create_date': ('django.db.models.fields.DateField', [], {}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '500', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Organization']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactRole']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'contact.contactorganization': {
'Meta': {'object_name': 'ContactOrganization'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.contactorganizationrole': {
'Meta': {'object_name': 'ContactOrganizationRole'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.contactrole': {
'Meta': {'object_name': 'ContactRole'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.organization': {
'Meta': {'object_name': 'Organization'},
'create_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactOrganizationRole']", 'null': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactOrganization']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'downloadable.downloadableproduct': {
'Meta': {'object_name': 'DownloadableProduct'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expire_minutes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'num_allowed_downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'downloadable.downloadlink': {
'Meta': {'object_name': 'DownloadLink'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'downloadable_product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['downloadable.DownloadableProduct']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'num_attempts': ('django.db.models.fields.IntegerField', [], {}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Order']"}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {})
},
'product.category': {
'Meta': {'unique_together': "(('site', 'slug'),)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'related_categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_categories'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'product.product': {
'Meta': {'unique_together': "(('site', 'sku'), ('site', 'slug'))", 'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'also_purchased': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'also_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Category']", 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'height_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'length_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_items': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'shipclass': ('django.db.models.fields.CharField', [], {'default': "'DEFAULT'", 'max_length': '10'}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'taxClass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.TaxClass']", 'null': 'True', 'blank': 'True'}),
'taxable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'total_sold': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'weight_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'width_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'product.taxclass': {
'Meta': {'object_name': 'TaxClass'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'shop.order': {
'Meta': {'object_name': 'Order'},
'bill_addressee': ('django.db.models.fields.CharField', [], {'max_length': '61', 'blank': 'True'}),
'bill_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'bill_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'bill_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'bill_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'bill_street1': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'bill_street2': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Contact']"}),
'discount': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'}),
'discount_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ship_addressee': ('django.db.models.fields.CharField', [], {'max_length': '61', 'blank': 'True'}),
'ship_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ship_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'ship_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'ship_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ship_street1': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'ship_street2': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'shipping_cost': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'}),
'shipping_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'shipping_discount': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'shipping_model': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'sub_total': ('satchmo_utils.fields.CurrencyField', [], {'display_decimal': '4', 'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'}),
'tax': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'total': ('satchmo_utils.fields.CurrencyField', [], {'display_decimal': '4', 'null': 'True', 'max_digits': '18', 'decimal_places': '10', 'blank': 'True'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['downloadable']
| bsd-3-clause |
sigma-random/asuswrt-merlin | release/src/router/samba36/lib/testtools/testtools/tests/test_helpers.py | 20 | 3580 | # Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
from testtools import TestCase
from testtools.helpers import (
try_import,
try_imports,
)
from testtools.matchers import (
Equals,
Is,
)
class TestTryImport(TestCase):
def test_doesnt_exist(self):
# try_import('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_import('doesntexist', marker)
self.assertThat(result, Is(marker))
def test_None_is_default_alternative(self):
# try_import('thing') returns None if 'thing' doesn't exist.
result = try_import('doesntexist')
self.assertThat(result, Is(None))
def test_existing_module(self):
# try_import('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_import('os', object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_import('os.path', object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_import('os.doesntexist', marker)
self.assertThat(result, Is(marker))
def test_object_from_module(self):
# try_import('thing.object') imports 'thing' and returns
# 'thing.object' if 'thing' is a module and 'object' is not.
result = try_import('os.path.join')
import os
self.assertThat(result, Is(os.path.join))
class TestTryImports(TestCase):
def test_doesnt_exist(self):
# try_imports('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_imports(['doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback(self):
result = try_imports(['doesntexist', 'os'])
import os
self.assertThat(result, Is(os))
def test_None_is_default_alternative(self):
# try_imports('thing') returns None if 'thing' doesn't exist.
e = self.assertRaises(
ImportError, try_imports, ['doesntexist', 'noreally'])
self.assertThat(
str(e),
Equals("Could not import any of: doesntexist, noreally"))
def test_existing_module(self):
# try_imports('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_imports(['os'], object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_imports(['os.path'], object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_imports(['os.doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback_submodule(self):
result = try_imports(['os.doesntexist', 'os.path'])
import os
self.assertThat(result, Is(os.path))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.