repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
wisperwinter/OpenCC
|
deps/gtest-1.7.0/test/gtest_color_test.py
|
3259
|
4911
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
apache-2.0
|
erdincay/youtube-dl
|
youtube_dl/extractor/bbc.py
|
52
|
32832
|
# coding: utf-8
from __future__ import unicode_literals
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_duration,
parse_iso8601,
)
from ..compat import compat_HTTPError
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
_MEDIASELECTOR_URLS = [
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Kaleidoscope, Leonard Cohen',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
'duration': 1740,
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
'note': 'Audio',
'info_dict': {
'id': 'p02frcch',
'ext': 'flv',
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
'duration': 3507,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}
]
class MediaSelectionError(Exception):
def __init__(self, id):
self.id = id
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_connection(self, connection, programme_id):
formats = []
protocol = connection.get('protocol')
supplier = connection.get('supplier')
if protocol == 'http':
href = connection.get('href')
transfer_format = connection.get('transferFormat')
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, supplier),
})
# Skip DASH until supported
elif transfer_format == 'dash':
pass
# Direct link
else:
formats.append({
'url': href,
'format_id': supplier,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
formats.append({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
'format_id': supplier,
})
return formats
def _extract_items(self, playlist):
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
def _extract_medias(self, media_selection):
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
def _extract_connections(self, media):
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
def _extract_video(self, media, programme_id):
formats = []
vbr = int_or_none(media.get('bitrate'))
vcodec = media.get('encoding')
service = media.get('service')
width = int_or_none(media.get('width'))
height = int_or_none(media.get('height'))
file_size = int_or_none(media.get('media_file_size'))
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'width': width,
'height': height,
'vbr': vbr,
'vcodec': vcodec,
'filesize': file_size,
})
formats.extend(conn_formats)
return formats
def _extract_audio(self, media, programme_id):
formats = []
abr = int_or_none(media.get('bitrate'))
acodec = media.get('encoding')
service = media.get('service')
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'abr': abr,
'acodec': acodec,
})
formats.extend(conn_formats)
return formats
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _raise_extractor_error(self, media_selection_error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
expected=True)
def _download_media_selector(self, programme_id):
last_exception = None
for mediaselector_url in self._MEDIASELECTOR_URLS:
try:
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
if e.id == 'notukerror':
last_exception = e
continue
self._raise_extractor_error(e)
self._raise_extractor_error(last_exception)
def _download_media_selector_url(self, url, programme_id=None):
try:
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
else:
raise
return self._process_media_selector(media_selection, programme_id)
def _process_media_selector(self, media_selection, programme_id):
formats = []
subtitles = None
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind == 'audio':
formats.extend(self._extract_audio(media, programme_id))
elif kind == 'video':
formats.extend(self._extract_video(media, programme_id))
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind != 'programme' and kind != 'radioProgramme':
continue
programme_id = item.get('vpid')
duration = int_or_none(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
return self._process_legacy_playlist(playlist_id)
def _process_legacy_playlist_url(self, url, display_id):
playlist = self._download_legacy_playlist_url(url, display_id)
return self._extract_from_legacy_playlist(playlist, display_id)
def _process_legacy_playlist(self, playlist_id):
return self._process_legacy_playlist_url(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
def _download_legacy_playlist_url(self, url, playlist_id=None):
return self._download_xml(
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind != 'programme' and kind != 'radioProgramme':
continue
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
def get_programme_id(item):
def get_from_attributes(item):
for p in('identifier', 'group'):
value = item.get(p)
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator')
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
# TODO: programme_id can be None and media items can be incorporated right inside
# playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# as f4m and m3u8
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage)
description = self._search_regex(
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
webpage, 'description', fatal=False)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class BBCIE(BBCCoUkIE):
IE_NAME = 'bbc'
IE_DESC = 'BBC'
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
# Provides fewer formats, but works everywhere for everybody (hopefully)
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
]
_TESTS = [{
# article with multiple videos embedded with data-media-meta containing
# playlist.sxml, externalId and no direct video links
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
'title': 'Russia stages massive WW2 parade despite Western boycott',
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
},
'playlist_count': 2,
}, {
# article with multiple videos embedded with data-media-meta (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
'title': 'Farnborough Airshow: Video highlights',
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
},
'playlist_count': 9,
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
'title': 'BBC Blogs - Adam Curtis - BUGGER',
},
'playlist_count': 18,
}, {
# single video embedded with mediaAssetPage.init()
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with single video embedded with data-media-meta containing
# direct video links (for now these are extracted) and playlist.xml (with
# media items as f4m and m3u8 - currently unsupported)
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
'duration': 47,
'timestamp': 1434397334,
'upload_date': '20150615',
},
'params': {
'skip_download': True,
}
}, {
# single video embedded with mediaAssetPage.init() (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
'duration': 87,
'timestamp': 1434713142,
'upload_date': '20150619',
},
'params': {
'skip_download': True,
}
}, {
# single video from video playlist embedded with vxp-playlist-data JSON
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
'info_dict': {
'id': 'p02w6qjc',
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
},
'params': {
'skip_download': True,
}
}, {
# single video story with digitalData
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
'info_dict': {
'id': 'p02q6gc4',
'ext': 'flv',
'title': 'Sri Lanka’s spicy secret',
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
'timestamp': 1437674293,
'upload_date': '20150723',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video story without digitalData
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
'info_dict': {
'id': 'p018zqqg',
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
'timestamp': 1368473503,
'upload_date': '20130513',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video with playlist.sxml URL
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'md5:398fca0e2e701c609d726e034fa1fc89',
'duration': 140,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
}, {
# custom redirection to www.bbc.com
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
source_files = media_meta.get('sourceFiles')
if source_files:
return [{
'url': f['url'],
'format_id': format_id,
'ext': f.get('encoding'),
'tbr': float_or_none(f.get('bitrate'), 1000),
'filesize': int_or_none(f.get('filesize')),
} for format_id, f in source_files.items() if f.get('url')], []
programme_id = media_meta.get('externalId')
if programme_id:
return self._download_media_selector(programme_id)
# Process playlist.sxml as legacy playlist
href = media_meta.get('href')
if href:
playlist = self._download_legacy_playlist_url(href)
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
return formats, subtitles
return [], []
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
timestamp = parse_iso8601(self._search_regex(
[r'"datePublished":\s*"([^"]+)',
r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
r'itemprop="datePublished"[^>]+datetime="([^"]+)"'],
webpage, 'date', default=None))
# single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng)
playlist = self._search_regex(
r'<param[^>]+name="playlist"[^>]+value="([^"]+)"',
webpage, 'playlist', default=None)
if playlist:
programme_id, title, description, duration, formats, subtitles = \
self._process_legacy_playlist_url(playlist, playlist_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
[r'data-video-player-vpid="([\da-z]{8})"',
r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
webpage, 'vpid', default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
digital_data = self._parse_json(
self._search_regex(
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
programme_id, fatal=False)
page_info = digital_data.get('page', {}).get('pageInfo', {})
title = page_info.get('pageName') or self._og_search_title(webpage)
description = page_info.get('description') or self._og_search_description(webpage)
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
return {
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
playlist_title = self._html_search_regex(
r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
playlist_description = self._og_search_description(webpage, default=None)
def extract_all(pattern):
return list(filter(None, map(
lambda s: self._parse_json(s, playlist_id, fatal=False),
re.findall(pattern, webpage))))
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
if embed_url and re.match(EMBED_URL, embed_url):
entries.append(embed_url)
entries.extend(re.findall(
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
if entries:
return self.playlist_result(
[self.url_result(entry, 'BBCCoUk') for entry in entries],
playlist_id, playlist_title, playlist_description)
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
medias = extract_all(r"data-media-meta='({[^']+})'")
if not medias:
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
media_asset = self._search_regex(
r'mediaAssetPage\.init\(\s*({.+?}), "/',
webpage, 'media asset', default=None)
if media_asset:
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
medias = []
for video in media_asset_page.get('videos', {}).values():
medias.extend(video.values())
if not medias:
# Multiple video playlist with single `now playing` entry (e.g.
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
vxp_playlist = self._parse_json(
self._search_regex(
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
webpage, 'playlist data'),
playlist_id)
playlist_medias = []
for item in vxp_playlist:
media = item.get('media')
if not media:
continue
playlist_medias.append(media)
# Download single video if found media with asset id matching the video id from URL
if item.get('advert', {}).get('assetId') == playlist_id:
medias = [media]
break
# Fallback to the whole playlist
if not medias:
medias = playlist_medias
entries = []
for num, media_meta in enumerate(medias, start=1):
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
if not formats:
continue
self._sort_formats(formats)
video_id = media_meta.get('externalId')
if not video_id:
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
title = media_meta.get('caption')
if not title:
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
images = []
for image in media_meta.get('images', {}).values():
images.extend(image.values())
if 'image' in media_meta:
images.append(media_meta['image'])
thumbnails = [{
'url': image.get('href'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in images]
entries.append({
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
unlicense
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/space_view3d_ruler_chromoly/va/math.py
|
4
|
5702
|
# coding: utf-8
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import math
from collections import OrderedDict
from itertools import combinations # , permutations
import bpy
from bpy.props import *
import mathutils as Math
from mathutils import Matrix, Euler, Vector, Quaternion
#geo = mathutils.geometry
MIN_NUMBER = 1E-8
def is_nan(f):
return not f == f
#return f < f or f <= f
def is_inf(f):
try:
int(f)
return False
except OverflowError:
return True
except:
return False
def saacos(fac):
if fac <= -1.0:
return math.pi
elif fac >= 1.0:
return 0.0
else:
return math.acos(fac)
def saasin(fac):
if fac <= -1.0:
return -math.pi / 2.0
elif fac >= 1.0:
return math.pi / 2.0
else:
return math.asin(fac)
def angle_normalized_v3v3(v1, v2):
# rotation_between_vecs_to_quat用
# 不要 v1.angle(v2)と同じ
if v1.dot(v2) < 0.0:
vec = Vector(-v2)
return math.pi - 2.0 * saasin((vec - v1).length / 2.0)
else:
return 2.0 * saasin((v2 - v1).length / 2.0)
def cross2D(v1, v2):
return v1.x * v2.y - v1.y * v2.x
def dot2D(v1, v2):
return v1.x * v2.x + v1.y * v2.y
def axis_angle_to_quat(axis, angle):
if axis.length < MIN_NUMBER:
return Quaternion([1, 0, 0, 0])
nor = axis.normalized()
angle = angle / 2
si = math.sin(angle)
return Quaternion([math.cos(angle), nor[0] * si, nor[1] * si, nor[2] * si])
def rotation_between_vecs_to_quat(vec1, vec2):
axis = vec1.cross(vec2)
# angle = angle_normalized_v3v3(vec1, vec2)
angle = vec1.angle(vec2)
return axis_angle_to_quat(axis, angle)
def removed_same_coordinate(vecs):
d = OrderedDict(zip((tuple(v) for v in vecs), range(len(vecs))))
return [vecs[i] for i in d.values()]
'''def is_plane(*vecs):
# 与えられたベクトルが同一平面上にあるか
# 平面が定義できない場合は Noneを返す
vecs = removed_same_coorinate(vecs)
if len(vecs) <= 2:
return None
v0 = vecs[0]
vec1 = vecs[1] - v0
axis_list = [vec1.cross(v2 - v0).normalized() for v2 in vecs[2:]]
return True
'''
### 旧 ###
def intersect(vec1, vec2, vec3, ray, orig, clip=1):
v1 = vec1.copy()
v2 = vec2.copy()
v3 = vec3.copy()
dir = ray.normalized()
orig = orig.copy()
# find vectors for two edges sharing v1
e1 = v2 - v1
e2 = v3 - v1
# begin calculating determinant - also used to calculated U parameter
pvec = dir.cross(e2)
# if determinant is near zero, ray lies in plane of triangle
det = e1.dot(pvec)
if (-1E-6 < det < 1E-6):
return None
inv_det = 1.0 / det
# calculate distance from v1 to ray origin
tvec = orig - v1
# calculate U parameter and test bounds
u = tvec.dot(pvec) * inv_det
if (clip and (u < 0.0 or u > 1.0)):
return None
# prepare to test the V parameter
qvec = tvec.cross(e1)
# calculate V parameter and test bounds
v = dir.dot(qvec) * inv_det
if (clip and (v < 0.0 or u + v > 1.0)):
return None
# calculate t, ray intersects triangle
t = e2.dot(qvec) * inv_det
dir = dir * t
pvec = orig + dir
return pvec
def plane_intersect(loc, normalvec, seg1, seg2, returnstatus=0):
zaxis = Vector([0.0, 0.0, 1.0])
normal = normalvec.copy()
normal.normalize()
quat = rotation_between_vecs_to_quat(normal, zaxis)
s1 = (seg1 - loc) * quat
s2 = (seg2 - loc) * quat
t = crossStatus = None
if abs(s1[2] - s2[2]) < 1E-6:
crossPoint = None
if abs(s1[2]) < 1E-6:
crossStatus = 3 # 面と重なる
else:
crossStatus = -1 # 面と平行
elif s1[2] == 0.0:
crossPoint = seg1
t = 0
elif s2[2] == 0.0:
crossPoint = seg2
t = 1
else:
t = -(s1[2] / (s2[2] - s1[2]))
crossPoint = (seg2 - seg1) * t + seg1
if t is not None:
if 0 <= t <= 1:
crossStatus = 2 # seg
elif t > 0:
crossStatus = 1 # ray
else:
crossStatus = 0 # line
if returnstatus:
return crossPoint, crossStatus
else:
return crossPoint
def vedistance(vecp, vec1, vec2, segment=1, returnVerticalPoint=False):
dist = None
if segment:
if DotVecs(vec2 - vec1, vecp - vec1) < 0.0:
dist = (vecp - vec1).length
elif DotVecs(vec1 - vec2, vecp - vec2) < 0.0:
dist = (vecp - vec2).length
vec = vec2 - vec1
p = vecp - vec1
zaxis = Vector([0.0, 0.0, 1.0])
quat = rotation_between_vecs_to_quat(vec, zaxis)
p2 = p * quat
if dist is None:
dist = math.sqrt(p2[0] ** 2 + p2[1] ** 2)
t = p2[2] / (vec2 - vec1).length
verticalPoint = vec1 + vec * t
if returnVerticalPoint:
return dist, verticalPoint
else:
return dist
|
gpl-3.0
|
undefinedv/Jingubang
|
sqlmap/plugins/dbms/postgresql/fingerprint.py
|
2
|
6416
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.session import setDbms
from lib.core.settings import PGSQL_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.PGSQL)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.PGSQL
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
"""
References for fingerprint:
* http://www.postgresql.org/docs/9.1/interactive/release.html (up to 9.1.3)
"""
if not conf.extensiveFp and (Backend.isDbmsWithin(PGSQL_ALIASES) or (conf.dbms or "").lower() in PGSQL_ALIASES):
setDbms(DBMS.PGSQL)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.PGSQL
logger.info(infoMsg)
result = inject.checkBooleanExpression("[RANDNUM]::int=[RANDNUM]")
if result:
infoMsg = "confirming %s" % DBMS.PGSQL
logger.info(infoMsg)
result = inject.checkBooleanExpression("COALESCE([RANDNUM], NULL)=[RANDNUM]")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
setDbms(DBMS.PGSQL)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.PGSQL
logger.info(infoMsg)
if inject.checkBooleanExpression("REVERSE('sqlmap')='pamlqs'"):
Backend.setVersion(">= 9.1.0")
elif inject.checkBooleanExpression("LENGTH(TO_CHAR(1,'EEEE'))>0"):
Backend.setVersionList([">= 9.0.0", "< 9.1.0"])
elif inject.checkBooleanExpression("2=(SELECT DIV(6,3))"):
Backend.setVersionList([">= 8.4.0", "< 9.0.0"])
elif inject.checkBooleanExpression("EXTRACT(ISODOW FROM CURRENT_TIMESTAMP)<8"):
Backend.setVersionList([">= 8.3.0", "< 8.4.0"])
elif inject.checkBooleanExpression("ISFINITE(TRANSACTION_TIMESTAMP())"):
Backend.setVersionList([">= 8.2.0", "< 8.3.0"])
elif inject.checkBooleanExpression("9=(SELECT GREATEST(5,9,1))"):
Backend.setVersionList([">= 8.1.0", "< 8.2.0"])
elif inject.checkBooleanExpression("3=(SELECT WIDTH_BUCKET(5.35,0.024,10.06,5))"):
Backend.setVersionList([">= 8.0.0", "< 8.1.0"])
elif inject.checkBooleanExpression("'d'=(SELECT SUBSTR(MD5('sqlmap'),1,1))"):
Backend.setVersionList([">= 7.4.0", "< 8.0.0"])
elif inject.checkBooleanExpression("'p'=(SELECT SUBSTR(CURRENT_SCHEMA(),1,1))"):
Backend.setVersionList([">= 7.3.0", "< 7.4.0"])
elif inject.checkBooleanExpression("8=(SELECT BIT_LENGTH(1))"):
Backend.setVersionList([">= 7.2.0", "< 7.3.0"])
elif inject.checkBooleanExpression("'a'=(SELECT SUBSTR(QUOTE_LITERAL('a'),2,1))"):
Backend.setVersionList([">= 7.1.0", "< 7.2.0"])
elif inject.checkBooleanExpression("8=(SELECT POW(2,3))"):
Backend.setVersionList([">= 7.0.0", "< 7.1.0"])
elif inject.checkBooleanExpression("'a'=(SELECT MAX('a'))"):
Backend.setVersionList([">= 6.5.0", "< 6.5.3"])
elif inject.checkBooleanExpression("VERSION()=VERSION()"):
Backend.setVersionList([">= 6.4.0", "< 6.5.0"])
elif inject.checkBooleanExpression("2=(SELECT SUBSTR(CURRENT_DATE,1,1))"):
Backend.setVersionList([">= 6.3.0", "< 6.4.0"])
elif inject.checkBooleanExpression("'s'=(SELECT SUBSTRING('sqlmap',1,1))"):
Backend.setVersionList([">= 6.2.0", "< 6.3.0"])
else:
Backend.setVersion("< 6.2.0")
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
def checkDbmsOs(self, detailed=False):
if Backend.getOs():
return
infoMsg = "fingerprinting the back-end DBMS operating system"
logger.info(infoMsg)
self.createSupportTbl(self.fileTblName, self.tblField, "character(10000)")
inject.goStacked("INSERT INTO %s(%s) VALUES (%s)" % (self.fileTblName, self.tblField, "VERSION()"))
# Windows executables should always have ' Visual C++' or ' mingw'
# patterns within the banner
osWindows = (" Visual C++", "mingw")
for osPattern in osWindows:
query = "(SELECT LENGTH(%s) FROM %s WHERE %s " % (self.tblField, self.fileTblName, self.tblField)
query += "LIKE '%" + osPattern + "%')>0"
if inject.checkBooleanExpression(query):
Backend.setOs(OS.WINDOWS)
break
if Backend.getOs() is None:
Backend.setOs(OS.LINUX)
infoMsg = "the back-end DBMS operating system is %s" % Backend.getOs()
logger.info(infoMsg)
self.cleanup(onlyFileTbl=True)
|
gpl-3.0
|
yesudeep/themoneybees
|
app/jinja2/tests/test_tests.py
|
6
|
2295
|
# -*- coding: utf-8 -*-
"""
unit test for the test functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2009 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import Environment, Markup
env = Environment()
DEFINED = '''{{ missing is defined }}|{{ true is defined }}'''
EVEN = '''{{ 1 is even }}|{{ 2 is even }}'''
LOWER = '''{{ "foo" is lower }}|{{ "FOO" is lower }}'''
ODD = '''{{ 1 is odd }}|{{ 2 is odd }}'''
SEQUENCE = '''{{ [1, 2, 3] is sequence }}|\
{{ "foo" is sequence }}|\
{{ 42 is sequence }}'''
UPPER = '''{{ "FOO" is upper }}|{{ "foo" is upper }}'''
SAMEAS = '''{{ foo is sameas false }}|{{ 0 is sameas false }}'''
NOPARENFORARG1 = '''{{ foo is sameas none }}'''
TYPECHECKS = '''\
{{ 42 is undefined }}
{{ 42 is defined }}
{{ 42 is none }}
{{ none is none }}
{{ 42 is number }}
{{ 42 is string }}
{{ "foo" is string }}
{{ "foo" is sequence }}
{{ [1] is sequence }}
{{ range is callable }}
{{ 42 is callable }}
{{ range(5) is iterable }}'''
def test_defined():
tmpl = env.from_string(DEFINED)
assert tmpl.render() == 'False|True'
def test_even():
tmpl = env.from_string(EVEN)
assert tmpl.render() == 'False|True'
def test_odd():
tmpl = env.from_string(ODD)
assert tmpl.render() == 'True|False'
def test_lower():
tmpl = env.from_string(LOWER)
assert tmpl.render() == 'True|False'
def test_typechecks():
tmpl = env.from_string(TYPECHECKS)
assert tmpl.render() == ''
def test_sequence():
tmpl = env.from_string(SEQUENCE)
assert tmpl.render() == 'True|True|False'
def test_upper():
tmpl = env.from_string(UPPER)
assert tmpl.render() == 'True|False'
def test_sameas():
tmpl = env.from_string(SAMEAS)
assert tmpl.render(foo=False) == 'True|False'
def test_typechecks():
tmpl = env.from_string(TYPECHECKS)
assert tmpl.render() == (
'False\nTrue\nFalse\nTrue\nTrue\nFalse\n'
'True\nTrue\nTrue\nTrue\nFalse\nTrue'
)
def test_no_paren_for_arg1():
tmpl = env.from_string(NOPARENFORARG1)
assert tmpl.render(foo=None) == 'True'
def test_escaped():
env = Environment(autoescape=True)
tmpl = env.from_string('{{ x is escaped }}|{{ y is escaped }}')
assert tmpl.render(x='foo', y=Markup('foo')) == 'False|True'
|
mit
|
OpenNetworking/gcoin-community
|
qa/rpc-tests/test_framework/bignum.py
|
230
|
1995
|
#
#
# bignum.py
#
# This file is copied from python-bitcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
apache-2.0
|
def-/commandergenius
|
project/jni/python/src/Lib/distutils/command/bdist_rpm.py
|
53
|
20189
|
"""distutils.command.bdist_rpm
Implements the Distutils 'bdist_rpm' command (create RPM source and binary
distributions)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_rpm.py 61000 2008-02-23 17:40:11Z christian.heimes $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.util import get_platform
from distutils.file_util import write_file
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_rpm (Command):
description = "create an RPM distribution"
user_options = [
('bdist-base=', None,
"base directory for creating built distributions"),
('rpm-base=', None,
"base directory for creating RPMs (defaults to \"rpm\" under "
"--bdist-base; must be specified for RPM 2)"),
('dist-dir=', 'd',
"directory to put final RPM files in "
"(and .spec files if --spec-only)"),
('python=', None,
"path to Python interpreter to hard-code in the .spec file "
"(default: \"python\")"),
('fix-python', None,
"hard-code the exact path to the current Python interpreter in "
"the .spec file"),
('spec-only', None,
"only regenerate spec file"),
('source-only', None,
"only generate source RPM"),
('binary-only', None,
"only generate binary RPM"),
('use-bzip2', None,
"use bzip2 instead of gzip to create source distribution"),
# More meta-data: too RPM-specific to put in the setup script,
# but needs to go in the .spec file -- so we make these options
# to "bdist_rpm". The idea is that packagers would put this
# info in setup.cfg, although they are of course free to
# supply it on the command line.
('distribution-name=', None,
"name of the (Linux) distribution to which this "
"RPM applies (*not* the name of the module distribution!)"),
('group=', None,
"package classification [default: \"Development/Libraries\"]"),
('release=', None,
"RPM release number"),
('serial=', None,
"RPM serial number"),
('vendor=', None,
"RPM \"vendor\" (eg. \"Joe Blow <[email protected]>\") "
"[default: maintainer or author from setup script]"),
('packager=', None,
"RPM packager (eg. \"Jane Doe <[email protected]>\")"
"[default: vendor]"),
('doc-files=', None,
"list of documentation files (space or comma-separated)"),
('changelog=', None,
"RPM changelog"),
('icon=', None,
"name of icon file"),
('provides=', None,
"capabilities provided by this package"),
('requires=', None,
"capabilities required by this package"),
('conflicts=', None,
"capabilities which conflict with this package"),
('build-requires=', None,
"capabilities required to build this package"),
('obsoletes=', None,
"capabilities made obsolete by this package"),
('no-autoreq', None,
"do not automatically calculate dependencies"),
# Actions to take when building RPM
('keep-temp', 'k',
"don't clean up RPM build directory"),
('no-keep-temp', None,
"clean up RPM build directory [default]"),
('use-rpm-opt-flags', None,
"compile with RPM_OPT_FLAGS when building from source RPM"),
('no-rpm-opt-flags', None,
"do not pass any RPM CFLAGS to compiler"),
('rpm3-mode', None,
"RPM 3 compatibility mode (default)"),
('rpm2-mode', None,
"RPM 2 compatibility mode"),
# Add the hooks necessary for specifying custom scripts
('prep-script=', None,
"Specify a script for the PREP phase of RPM building"),
('build-script=', None,
"Specify a script for the BUILD phase of RPM building"),
('pre-install=', None,
"Specify a script for the pre-INSTALL phase of RPM building"),
('install-script=', None,
"Specify a script for the INSTALL phase of RPM building"),
('post-install=', None,
"Specify a script for the post-INSTALL phase of RPM building"),
('pre-uninstall=', None,
"Specify a script for the pre-UNINSTALL phase of RPM building"),
('post-uninstall=', None,
"Specify a script for the post-UNINSTALL phase of RPM building"),
('clean-script=', None,
"Specify a script for the CLEAN phase of RPM building"),
('verify-script=', None,
"Specify a script for the VERIFY phase of the RPM build"),
# Allow a packager to explicitly force an architecture
('force-arch=', None,
"Force an architecture onto the RPM build process"),
]
boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
'no-autoreq']
negative_opt = {'no-keep-temp': 'keep-temp',
'no-rpm-opt-flags': 'use-rpm-opt-flags',
'rpm2-mode': 'rpm3-mode'}
def initialize_options (self):
self.bdist_base = None
self.rpm_base = None
self.dist_dir = None
self.python = None
self.fix_python = None
self.spec_only = None
self.binary_only = None
self.source_only = None
self.use_bzip2 = None
self.distribution_name = None
self.group = None
self.release = None
self.serial = None
self.vendor = None
self.packager = None
self.doc_files = None
self.changelog = None
self.icon = None
self.prep_script = None
self.build_script = None
self.install_script = None
self.clean_script = None
self.verify_script = None
self.pre_install = None
self.post_install = None
self.pre_uninstall = None
self.post_uninstall = None
self.prep = None
self.provides = None
self.requires = None
self.conflicts = None
self.build_requires = None
self.obsoletes = None
self.keep_temp = 0
self.use_rpm_opt_flags = 1
self.rpm3_mode = 1
self.no_autoreq = 0
self.force_arch = None
# initialize_options()
def finalize_options (self):
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
if self.rpm_base is None:
if not self.rpm3_mode:
raise DistutilsOptionError, \
"you must specify --rpm-base in RPM 2 mode"
self.rpm_base = os.path.join(self.bdist_base, "rpm")
if self.python is None:
if self.fix_python:
self.python = sys.executable
else:
self.python = "python"
elif self.fix_python:
raise DistutilsOptionError, \
"--python and --fix-python are mutually exclusive options"
if os.name != 'posix':
raise DistutilsPlatformError, \
("don't know how to create RPM "
"distributions on platform %s" % os.name)
if self.binary_only and self.source_only:
raise DistutilsOptionError, \
"cannot supply both '--source-only' and '--binary-only'"
# don't pass CFLAGS to pure python distributions
if not self.distribution.has_ext_modules():
self.use_rpm_opt_flags = 0
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
self.finalize_package_data()
# finalize_options()
def finalize_package_data (self):
self.ensure_string('group', "Development/Libraries")
self.ensure_string('vendor',
"%s <%s>" % (self.distribution.get_contact(),
self.distribution.get_contact_email()))
self.ensure_string('packager')
self.ensure_string_list('doc_files')
if type(self.doc_files) is ListType:
for readme in ('README', 'README.txt'):
if os.path.exists(readme) and readme not in self.doc_files:
self.doc_files.append(readme)
self.ensure_string('release', "1")
self.ensure_string('serial') # should it be an int?
self.ensure_string('distribution_name')
self.ensure_string('changelog')
# Format changelog correctly
self.changelog = self._format_changelog(self.changelog)
self.ensure_filename('icon')
self.ensure_filename('prep_script')
self.ensure_filename('build_script')
self.ensure_filename('install_script')
self.ensure_filename('clean_script')
self.ensure_filename('verify_script')
self.ensure_filename('pre_install')
self.ensure_filename('post_install')
self.ensure_filename('pre_uninstall')
self.ensure_filename('post_uninstall')
# XXX don't forget we punted on summaries and descriptions -- they
# should be handled here eventually!
# Now *this* is some meta-data that belongs in the setup script...
self.ensure_string_list('provides')
self.ensure_string_list('requires')
self.ensure_string_list('conflicts')
self.ensure_string_list('build_requires')
self.ensure_string_list('obsoletes')
self.ensure_string('force_arch')
# finalize_package_data ()
def run (self):
if DEBUG:
print "before _get_package_data():"
print "vendor =", self.vendor
print "packager =", self.packager
print "doc_files =", self.doc_files
print "changelog =", self.changelog
# make directories
if self.spec_only:
spec_dir = self.dist_dir
self.mkpath(spec_dir)
else:
rpm_dir = {}
for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
rpm_dir[d] = os.path.join(self.rpm_base, d)
self.mkpath(rpm_dir[d])
spec_dir = rpm_dir['SPECS']
# Spec file goes into 'dist_dir' if '--spec-only specified',
# build/rpm.<plat> otherwise.
spec_path = os.path.join(spec_dir,
"%s.spec" % self.distribution.get_name())
self.execute(write_file,
(spec_path,
self._make_spec_file()),
"writing '%s'" % spec_path)
if self.spec_only: # stop if requested
return
# Make a source distribution and copy to SOURCES directory with
# optional icon.
saved_dist_files = self.distribution.dist_files[:]
sdist = self.reinitialize_command('sdist')
if self.use_bzip2:
sdist.formats = ['bztar']
else:
sdist.formats = ['gztar']
self.run_command('sdist')
self.distribution.dist_files = saved_dist_files
source = sdist.get_archive_files()[0]
source_dir = rpm_dir['SOURCES']
self.copy_file(source, source_dir)
if self.icon:
if os.path.exists(self.icon):
self.copy_file(self.icon, source_dir)
else:
raise DistutilsFileError, \
"icon file '%s' does not exist" % self.icon
# build package
log.info("building RPMs")
rpm_cmd = ['rpm']
if os.path.exists('/usr/bin/rpmbuild') or \
os.path.exists('/bin/rpmbuild'):
rpm_cmd = ['rpmbuild']
if self.source_only: # what kind of RPMs?
rpm_cmd.append('-bs')
elif self.binary_only:
rpm_cmd.append('-bb')
else:
rpm_cmd.append('-ba')
if self.rpm3_mode:
rpm_cmd.extend(['--define',
'_topdir %s' % os.path.abspath(self.rpm_base)])
if not self.keep_temp:
rpm_cmd.append('--clean')
rpm_cmd.append(spec_path)
# Determine the binary rpm names that should be built out of this spec
# file
# Note that some of these may not be really built (if the file
# list is empty)
nvr_string = "%{name}-%{version}-%{release}"
src_rpm = nvr_string + ".src.rpm"
non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
src_rpm, non_src_rpm, spec_path)
out = os.popen(q_cmd)
binary_rpms = []
source_rpm = None
while 1:
line = out.readline()
if not line:
break
l = string.split(string.strip(line))
assert(len(l) == 2)
binary_rpms.append(l[1])
# The source rpm is named after the first entry in the spec file
if source_rpm is None:
source_rpm = l[0]
status = out.close()
if status:
raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
self.spawn(rpm_cmd)
if not self.dry_run:
if not self.binary_only:
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
assert(os.path.exists(srpm))
self.move_file(srpm, self.dist_dir)
if not self.source_only:
for rpm in binary_rpms:
rpm = os.path.join(rpm_dir['RPMS'], rpm)
if os.path.exists(rpm):
self.move_file(rpm, self.dist_dir)
# run()
def _dist_path(self, path):
return os.path.join(self.dist_dir, os.path.basename(path))
def _make_spec_file(self):
"""Generate the text of an RPM spec file and return it as a
list of strings (one per line).
"""
# definitions and headers
spec_file = [
'%define name ' + self.distribution.get_name(),
'%define version ' + self.distribution.get_version().replace('-','_'),
'%define unmangled_version ' + self.distribution.get_version(),
'%define release ' + self.release.replace('-','_'),
'',
'Summary: ' + self.distribution.get_description(),
]
# put locale summaries into spec file
# XXX not supported for now (hard to put a dictionary
# in a config file -- arg!)
#for locale in self.summaries.keys():
# spec_file.append('Summary(%s): %s' % (locale,
# self.summaries[locale]))
spec_file.extend([
'Name: %{name}',
'Version: %{version}',
'Release: %{release}',])
# XXX yuck! this filename is available from the "sdist" command,
# but only after it has run: and we create the spec file before
# running "sdist", in case of --spec-only.
if self.use_bzip2:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
else:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
spec_file.extend([
'License: ' + self.distribution.get_license(),
'Group: ' + self.group,
'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
'Prefix: %{_prefix}', ])
if not self.force_arch:
# noarch if no extension modules
if not self.distribution.has_ext_modules():
spec_file.append('BuildArch: noarch')
else:
spec_file.append( 'BuildArch: %s' % self.force_arch )
for field in ('Vendor',
'Packager',
'Provides',
'Requires',
'Conflicts',
'Obsoletes',
):
val = getattr(self, string.lower(field))
if type(val) is ListType:
spec_file.append('%s: %s' % (field, string.join(val)))
elif val is not None:
spec_file.append('%s: %s' % (field, val))
if self.distribution.get_url() != 'UNKNOWN':
spec_file.append('Url: ' + self.distribution.get_url())
if self.distribution_name:
spec_file.append('Distribution: ' + self.distribution_name)
if self.build_requires:
spec_file.append('BuildRequires: ' +
string.join(self.build_requires))
if self.icon:
spec_file.append('Icon: ' + os.path.basename(self.icon))
if self.no_autoreq:
spec_file.append('AutoReq: 0')
spec_file.extend([
'',
'%description',
self.distribution.get_long_description()
])
# put locale descriptions into spec file
# XXX again, suppressed because config file syntax doesn't
# easily support this ;-(
#for locale in self.descriptions.keys():
# spec_file.extend([
# '',
# '%description -l ' + locale,
# self.descriptions[locale],
# ])
# rpm scripts
# figure out default build script
def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
def_build = "%s build" % def_setup_call
if self.use_rpm_opt_flags:
def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
# insert contents of files
# XXX this is kind of misleading: user-supplied options are files
# that we open and interpolate into the spec file, but the defaults
# are just text that we drop in as-is. Hmmm.
script_options = [
('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
('build', 'build_script', def_build),
('install', 'install_script',
("%s install "
"--root=$RPM_BUILD_ROOT "
"--record=INSTALLED_FILES") % def_setup_call),
('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
('verifyscript', 'verify_script', None),
('pre', 'pre_install', None),
('post', 'post_install', None),
('preun', 'pre_uninstall', None),
('postun', 'post_uninstall', None),
]
for (rpm_opt, attr, default) in script_options:
# Insert contents of file referred to, if no file is referred to
# use 'default' as contents of script
val = getattr(self, attr)
if val or default:
spec_file.extend([
'',
'%' + rpm_opt,])
if val:
spec_file.extend(string.split(open(val, 'r').read(), '\n'))
else:
spec_file.append(default)
# files section
spec_file.extend([
'',
'%files -f INSTALLED_FILES',
'%defattr(-,root,root)',
])
if self.doc_files:
spec_file.append('%doc ' + string.join(self.doc_files))
if self.changelog:
spec_file.extend([
'',
'%changelog',])
spec_file.extend(self.changelog)
return spec_file
# _make_spec_file ()
def _format_changelog(self, changelog):
"""Format the changelog correctly and convert it to a list of strings
"""
if not changelog:
return changelog
new_changelog = []
for line in string.split(string.strip(changelog), '\n'):
line = string.strip(line)
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] == '-':
new_changelog.append(line)
else:
new_changelog.append(' ' + line)
# strip trailing newline inserted by first changelog entry
if not new_changelog[0]:
del new_changelog[0]
return new_changelog
# _format_changelog()
# class bdist_rpm
|
lgpl-2.1
|
cr0wbar/fishnet
|
SettingsDialogUi.py
|
1
|
3651
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Settings.ui'
#
# Created: Sun Oct 26 18:16:48 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtWidgets
class Ui_SettingsDialog(object):
def setupUi(self, SettingsDialog):
SettingsDialog.setObjectName("SettingsDialog")
SettingsDialog.resize(400, 192)
self.verticalLayout = QtWidgets.QVBoxLayout(SettingsDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(SettingsDialog)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.loadProvidersAtStartupCheckbox = QtWidgets.QCheckBox(self.groupBox)
self.loadProvidersAtStartupCheckbox.setChecked(True)
self.loadProvidersAtStartupCheckbox.setObjectName("loadProvidersAtStartupCheckbox")
self.verticalLayout_2.addWidget(self.loadProvidersAtStartupCheckbox)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.urlOfThelistLabel = QtWidgets.QLabel(self.groupBox)
self.urlOfThelistLabel.setObjectName("urlOfThelistLabel")
self.horizontalLayout.addWidget(self.urlOfThelistLabel)
self.urlOfThelistInput = QtWidgets.QLineEdit(self.groupBox)
self.urlOfThelistInput.setObjectName("urlOfThelistInput")
self.horizontalLayout.addWidget(self.urlOfThelistInput)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.pagesSpinBox = QtWidgets.QSpinBox(self.groupBox)
self.pagesSpinBox.setMinimum(1)
self.pagesSpinBox.setProperty("value", 3)
self.pagesSpinBox.setObjectName("pagesSpinBox")
self.horizontalLayout_2.addWidget(self.pagesSpinBox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.verticalLayout.addWidget(self.groupBox)
self.buttonBox = QtWidgets.QDialogButtonBox(SettingsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(SettingsDialog)
self.buttonBox.accepted.connect(SettingsDialog.accept)
self.buttonBox.rejected.connect(SettingsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SettingsDialog)
def retranslateUi(self, SettingsDialog):
_translate = QtCore.QCoreApplication.translate
SettingsDialog.setWindowTitle(_translate("SettingsDialog", "Settings"))
self.groupBox.setTitle(_translate("SettingsDialog", "General Setttings"))
self.loadProvidersAtStartupCheckbox.setText(_translate("SettingsDialog", "Load providers from list at startup"))
self.urlOfThelistLabel.setText(_translate("SettingsDialog", "URL of the list:"))
self.label_2.setText(_translate("SettingsDialog", "Pages to fetch:"))
|
gpl-3.0
|
zackslash/scrapy
|
scrapy/utils/signal.py
|
133
|
2969
|
"""Helper functions for working with signals"""
import logging
from twisted.internet.defer import maybeDeferred, DeferredList, Deferred
from twisted.python.failure import Failure
from pydispatch.dispatcher import Any, Anonymous, liveReceivers, \
getAllReceivers, disconnect
from pydispatch.robustapply import robustApply
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class _IgnoredException(Exception):
pass
def send_catch_log(signal=Any, sender=Anonymous, *arguments, **named):
"""Like pydispatcher.robust.sendRobust but it also logs errors and returns
Failures instead of exceptions.
"""
dont_log = named.pop('dont_log', _IgnoredException)
spider = named.get('spider', None)
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
try:
response = robustApply(receiver, signal=signal, sender=sender,
*arguments, **named)
if isinstance(response, Deferred):
logger.error("Cannot return deferreds from signal handler: %(receiver)s",
{'receiver': receiver}, extra={'spider': spider})
except dont_log:
result = Failure()
except Exception:
result = Failure()
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': receiver},
exc_info=True, extra={'spider': spider})
else:
result = response
responses.append((receiver, result))
return responses
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named):
"""Like send_catch_log but supports returning deferreds on signal handlers.
Returns a deferred that gets fired once all signal handlers deferreds were
fired.
"""
def logerror(failure, recv):
if dont_log is None or not isinstance(failure.value, dont_log):
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': recv},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
dont_log = named.pop('dont_log', None)
spider = named.get('spider', None)
dfds = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender,
*arguments, **named)
d.addErrback(logerror, receiver)
d.addBoth(lambda result: (receiver, result))
dfds.append(d)
d = DeferredList(dfds)
d.addCallback(lambda out: [x[1] for x in out])
return d
def disconnect_all(signal=Any, sender=Any):
"""Disconnect all signal handlers. Useful for cleaning up after running
tests
"""
for receiver in liveReceivers(getAllReceivers(sender, signal)):
disconnect(receiver, signal=signal, sender=sender)
|
bsd-3-clause
|
icarito/sugar-git-activity
|
console/interactiveconsole.py
|
1
|
10357
|
#!/usr/bin/env python
# GTK Interactive Console
# (C) 2003, Jon Anderson
# See www.python.org/2.2/license.html for
# license details.
#
import pygtk
pygtk.require('2.0')
import gtk
import gtk.gdk
import code
import sys
import pango
import __builtin__
import __main__
from sugar.graphics import style
banner = """Taller de Artesano de Videojuegos
Python %s
""" % sys.version
class Completer:
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self, locals):
self.locals = locals
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,__builtin__.__dict__.keys(),__main__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return sorted(matches)
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, __main__.__dict__, self.locals)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
matches = list(set(matches)) #make unique
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class OutputStream:
"""
A Multiplexing output stream.
It can replace another stream, and tee output to the original stream and too
a GTK textview.
"""
def __init__(self,view,old_out,style):
self.view = view
self.buffer = view.get_buffer()
self.mark = self.buffer.create_mark("End",self.buffer.get_end_iter(), gtk.FALSE )
self.out = old_out
self.style = style
self.tee = 1
def write(self,text):
if self.tee:
self.out.write(text)
end = self.buffer.get_end_iter()
if not self.view == None:
self.view.scroll_to_mark(self.mark, 0, gtk.TRUE, 1, 1)
self.buffer.insert_with_tags(end,text,self.style)
class GTKInterpreterConsole(gtk.ScrolledWindow):
"""
An InteractiveConsole for GTK. It's an actual widget,
so it can be dropped in just about anywhere.
"""
def __init__(self, frame, callback=None):
gtk.ScrolledWindow.__init__(self)
self.set_policy (gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.text = gtk.TextView()
self.text.set_wrap_mode(True)
self.callback = callback
font = "Monospace " + str(int(11/style.ZOOM_FACTOR))
self.text.modify_font(pango.FontDescription(font))
# use exception trick to pick up the current frame
#try:
# raise None
#except:
# frame = sys.exc_info()[2].tb_frame.f_back
# evaluate commands in current namespace
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
self.interpreter = code.InteractiveInterpreter(namespace)
self.completer = Completer(self.interpreter.locals)
self.buffer = []
self.history = []
self.banner = banner
self.ps1 = ">>> "
self.ps2 = "... "
self.text.add_events( gtk.gdk.KEY_PRESS_MASK )
self.text.connect( "key_press_event", self.key_pressed )
self.current_history = -1
self.mark = self.text.get_buffer().create_mark("End",self.text.get_buffer().get_end_iter(), gtk.FALSE )
#setup colors
self.style_banner = gtk.TextTag("banner")
self.style_banner.set_property( "foreground", "saddle brown" )
self.style_ps1 = gtk.TextTag("ps1")
self.style_ps1.set_property( "foreground", "DarkOrchid4" )
self.style_ps1.set_property( "editable", gtk.FALSE )
self.style_ps1.set_property("font", "courier" )
self.style_ps2 = gtk.TextTag("ps2")
self.style_ps2.set_property( "foreground", "DarkOliveGreen" )
self.style_ps2.set_property( "editable", gtk.FALSE )
self.style_ps2.set_property("font", "courier" )
self.style_out = gtk.TextTag("stdout")
self.style_out.set_property( "foreground", "midnight blue" )
self.style_err = gtk.TextTag("stderr")
self.style_err.set_property( "style", pango.STYLE_ITALIC )
self.style_err.set_property( "foreground", "red" )
self.text.get_buffer().get_tag_table().add(self.style_banner)
self.text.get_buffer().get_tag_table().add(self.style_ps1)
self.text.get_buffer().get_tag_table().add(self.style_ps2)
self.text.get_buffer().get_tag_table().add(self.style_out)
self.text.get_buffer().get_tag_table().add(self.style_err)
self.stdout = OutputStream(self.text,sys.stdout,self.style_out)
self.stderr = OutputStream(self.text,sys.stderr,self.style_err)
sys.stderr = self.stderr
sys.stdout = self.stdout
self.current_prompt = None
self.write_line(self.banner, self.style_banner)
self.prompt_ps1()
self.add(self.text)
self.text.show()
def reset_history(self):
self.history = []
def reset_buffer(self):
self.buffer = []
def prompt_ps1(self):
self.current_prompt = self.prompt_ps1
self.write_line(self.ps1,self.style_ps1)
def prompt_ps2(self):
self.current_prompt = self.prompt_ps2
self.write_line(self.ps2,self.style_ps2)
def write_line(self,text,style=None):
start,end = self.text.get_buffer().get_bounds()
if style==None:
self.text.get_buffer().insert(end,text)
else:
self.text.get_buffer().insert_with_tags(end,text,style)
self.text.scroll_to_mark(self.mark, 0, gtk.TRUE, 1, 1)
def push(self, line):
self.buffer.append(line)
if len(line) > 0:
self.history.append(line)
source = "\n".join(self.buffer)
more = self.interpreter.runsource(source, "<<console>>")
if not more:
self.reset_buffer()
return more
def key_pressed(self,widget,event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
return self.execute_line()
if event.keyval == gtk.gdk.keyval_from_name('Up'):
self.current_history = self.current_history - 1
if self.current_history < - len(self.history):
self.current_history = - len(self.history)
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Down'):
self.current_history = self.current_history + 1
if self.current_history > 0:
self.current_history = 0
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name( 'Home'):
l = self.text.get_buffer().get_line_count() - 1
start = self.text.get_buffer().get_iter_at_line_offset(l,4)
self.text.get_buffer().place_cursor(start)
return gtk.TRUE
elif event.keyval == gtk.gdk.keyval_from_name( 'Tab'):
if self.current_line():
return self.complete_line()
return gtk.FALSE
def show_history(self):
if self.current_history == 0:
return gtk.TRUE
else:
self.replace_line( self.history[self.current_history] )
return gtk.TRUE
def current_line(self):
start,end = self.current_line_bounds()
return self.text.get_buffer().get_text(start,end, gtk.TRUE)
def current_line_bounds(self):
txt_buffer = self.text.get_buffer()
l = txt_buffer.get_line_count() - 1
start = txt_buffer.get_iter_at_line(l)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = txt_buffer.get_end_iter()
return start,end
def replace_line(self,txt):
start,end = self.current_line_bounds()
self.text.get_buffer().delete(start,end)
self.write_line(txt)
def execute_line(self):
line = self.current_line()
self.write_line("\n")
more = self.push(line)
self.text.get_buffer().place_cursor(self.text.get_buffer().get_end_iter())
if more:
self.prompt_ps2()
else:
self.prompt_ps1()
self.current_history = 0
if self.callback:
self.callback()
return gtk.TRUE
def complete_line(self):
line = self.current_line()
tokens = line.split()
try:
token = tokens[-1]
except IndexError:
pass
completions = []
p = self.completer.complete(token,len(completions))
while p != None:
completions.append(p)
p = self.completer.complete(token, len(completions))
if len(completions) != 1:
self.write_line("\n")
self.write_line("\n".join(completions), self.style_ps1)
self.write_line("\n")
self.current_prompt()
self.write_line(line)
else:
i = line.rfind(token)
line = line[0:i] + completions[0]
self.replace_line(line)
return gtk.TRUE
def main():
w = gtk.Window()
console = GTKInterpreterConsole()
console.set_size_request(640,480)
w.add(console)
def destroy(arg=None):
gtk.mainquit()
def key_event(widget,event):
if gtk.gdk.keyval_name( event.keyval) == 'd' and event.state & gtk.gdk.CONTROL_MASK:
destroy()
return gtk.FALSE
w.connect("destroy", destroy)
w.add_events( gtk.gdk.KEY_PRESS_MASK )
w.connect( 'key_press_event', key_event)
w.show_all()
gtk.main()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ehabkost/avocado-vt
|
selftests/unit/test_nfs.py
|
8
|
4071
|
#!/usr/bin/python
import unittest
import os
import sys
from avocado.utils import path
from avocado.utils import process
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest.unittest_utils import mock
from virttest import nfs
from virttest import utils_misc
from virttest.staging import service
class FakeService(object):
def __init__(self, service_name):
self.fake_cmds = [{"cmd": "status", "stdout": True},
{"cmd": "restart", "stdout": ""}]
def get_stdout(self, cmd):
for fake_cmd in self.fake_cmds:
if fake_cmd['cmd'] == cmd:
return fake_cmd['stdout']
raise ValueError("Could not locate locate '%s' on fake cmd db" % cmd)
def status(self):
return self.get_stdout("status")
def restart(self):
return self.get_stdout("restart")
class nfs_test(unittest.TestCase):
def setup_stubs_init(self):
path.find_command.expect_call("mount")
path.find_command.expect_call("service")
path.find_command.expect_call("exportfs")
service.Factory.create_service.expect_call("nfs").and_return(
FakeService("nfs"))
mount_src = self.nfs_params.get("nfs_mount_src")
export_dir = (self.nfs_params.get("export_dir") or
mount_src.split(":")[-1])
export_ip = self.nfs_params.get("export_ip", "*")
export_options = self.nfs_params.get("export_options", "").strip()
nfs.Exportfs.expect_new(export_dir, export_ip, export_options)
def setup_stubs_setup(self, nfs_obj):
os.makedirs.expect_call(nfs_obj.export_dir)
nfs_obj.exportfs.export.expect_call()
os.makedirs.expect_call(nfs_obj.mount_dir)
utils_misc.mount.expect_call(nfs_obj.mount_src, nfs_obj.mount_dir,
"nfs", perm=nfs_obj.mount_options)
def setup_stubs_is_mounted(self, nfs_obj):
utils_misc.is_mounted.expect_call(nfs_obj.mount_src,
nfs_obj.mount_dir,
"nfs").and_return(True)
def setup_stubs_cleanup(self, nfs_obj):
utils_misc.umount.expect_call(nfs_obj.mount_src,
nfs_obj.mount_dir,
"nfs")
nfs_obj.exportfs.reset_export.expect_call()
def setUp(self):
self.nfs_params = {"nfs_mount_dir": "/mnt/nfstest",
"nfs_mount_options": "rw",
"nfs_mount_src": "127.0.0.1:/mnt/nfssrc",
"setup_local_nfs": "yes",
"export_options": "rw,no_root_squash"}
self.god = mock.mock_god()
self.god.stub_function(path, "find_command")
self.god.stub_function(process, "system")
self.god.stub_function(process, "system_output")
self.god.stub_function(os.path, "isfile")
self.god.stub_function(os, "makedirs")
self.god.stub_function(utils_misc, "is_mounted")
self.god.stub_function(utils_misc, "mount")
self.god.stub_function(utils_misc, "umount")
self.god.stub_function(service.Factory, "create_service")
attr = getattr(nfs, "Exportfs")
setattr(attr, "already_exported", False)
mock_class = self.god.create_mock_class_obj(attr, "Exportfs")
self.god.stub_with(nfs, "Exportfs", mock_class)
def tearDown(self):
self.god.unstub_all()
def test_nfs_setup(self):
self.setup_stubs_init()
nfs_local = nfs.Nfs(self.nfs_params)
self.setup_stubs_setup(nfs_local)
nfs_local.setup()
self.setup_stubs_is_mounted(nfs_local)
self.assertTrue(nfs_local.is_mounted())
self.setup_stubs_cleanup(nfs_local)
nfs_local.cleanup()
self.god.check_playback()
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
EricMuller/mywebmarks-backend
|
requirements/twisted/Twisted-17.1.0/src/twisted/test/test_usage.py
|
13
|
23645
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.usage}, a command line option parsing library.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.python import usage
class WellBehaved(usage.Options):
optParameters = [['long', 'w', 'default', 'and a docstring'],
['another', 'n', 'no docstring'],
['longonly', None, 'noshort'],
['shortless', None, 'except',
'this one got docstring'],
]
optFlags = [['aflag', 'f',
"""
flagallicious docstringness for this here
"""],
['flout', 'o'],
]
def opt_myflag(self):
self.opts['myflag'] = "PONY!"
def opt_myparam(self, value):
self.opts['myparam'] = "%s WITH A PONY!" % (value,)
class ParseCorrectnessTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for correct values under
good conditions.
"""
def setUp(self):
"""
Instantiate and parseOptions a well-behaved Options class.
"""
self.niceArgV = ("--long Alpha -n Beta "
"--shortless Gamma -f --myflag "
"--myparam Tofu").split()
self.nice = WellBehaved()
self.nice.parseOptions(self.niceArgV)
def test_checkParameters(self):
"""
Parameters have correct values.
"""
self.assertEqual(self.nice.opts['long'], "Alpha")
self.assertEqual(self.nice.opts['another'], "Beta")
self.assertEqual(self.nice.opts['longonly'], "noshort")
self.assertEqual(self.nice.opts['shortless'], "Gamma")
def test_checkFlags(self):
"""
Flags have correct values.
"""
self.assertEqual(self.nice.opts['aflag'], 1)
self.assertEqual(self.nice.opts['flout'], 0)
def test_checkCustoms(self):
"""
Custom flags and parameters have correct values.
"""
self.assertEqual(self.nice.opts['myflag'], "PONY!")
self.assertEqual(self.nice.opts['myparam'], "Tofu WITH A PONY!")
class TypedOptions(usage.Options):
optParameters = [
['fooint', None, 392, 'Foo int', int],
['foofloat', None, 4.23, 'Foo float', float],
['eggint', None, None, 'Egg int without default', int],
['eggfloat', None, None, 'Egg float without default', float],
]
def opt_under_score(self, value):
"""
This option has an underscore in its name to exercise the _ to -
translation.
"""
self.underscoreValue = value
opt_u = opt_under_score
class TypedTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for options with forced types.
"""
def setUp(self):
self.usage = TypedOptions()
def test_defaultValues(self):
"""
Default values are parsed.
"""
argV = []
self.usage.parseOptions(argV)
self.assertEqual(self.usage.opts['fooint'], 392)
self.assertIsInstance(self.usage.opts['fooint'], int)
self.assertEqual(self.usage.opts['foofloat'], 4.23)
self.assertIsInstance(self.usage.opts['foofloat'], float)
self.assertIsNone(self.usage.opts['eggint'])
self.assertIsNone(self.usage.opts['eggfloat'])
def test_parsingValues(self):
"""
int and float values are parsed.
"""
argV = ("--fooint 912 --foofloat -823.1 "
"--eggint 32 --eggfloat 21").split()
self.usage.parseOptions(argV)
self.assertEqual(self.usage.opts['fooint'], 912)
self.assertIsInstance(self.usage.opts['fooint'], int)
self.assertEqual(self.usage.opts['foofloat'], -823.1)
self.assertIsInstance(self.usage.opts['foofloat'], float)
self.assertEqual(self.usage.opts['eggint'], 32)
self.assertIsInstance(self.usage.opts['eggint'], int)
self.assertEqual(self.usage.opts['eggfloat'], 21.)
self.assertIsInstance(self.usage.opts['eggfloat'], float)
def test_underscoreOption(self):
"""
A dash in an option name is translated to an underscore before being
dispatched to a handler.
"""
self.usage.parseOptions(['--under-score', 'foo'])
self.assertEqual(self.usage.underscoreValue, 'foo')
def test_underscoreOptionAlias(self):
"""
An option name with a dash in it can have an alias.
"""
self.usage.parseOptions(['-u', 'bar'])
self.assertEqual(self.usage.underscoreValue, 'bar')
def test_invalidValues(self):
"""
Passing wrong values raises an error.
"""
argV = "--fooint egg".split()
self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)
class WrongTypedOptions(usage.Options):
optParameters = [
['barwrong', None, None, 'Bar with wrong coerce', 'he']
]
class WeirdCallableOptions(usage.Options):
def _bar(value):
raise RuntimeError("Ouch")
def _foo(value):
raise ValueError("Yay")
optParameters = [
['barwrong', None, None, 'Bar with strange callable', _bar],
['foowrong', None, None, 'Foo with strange callable', _foo]
]
class WrongTypedTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for wrong coerce options.
"""
def test_nonCallable(self):
"""
Using a non-callable type fails.
"""
us = WrongTypedOptions()
argV = "--barwrong egg".split()
self.assertRaises(TypeError, us.parseOptions, argV)
def test_notCalledInDefault(self):
"""
The coerce functions are not called if no values are provided.
"""
us = WeirdCallableOptions()
argV = []
us.parseOptions(argV)
def test_weirdCallable(self):
"""
Errors raised by coerce functions are handled properly.
"""
us = WeirdCallableOptions()
argV = "--foowrong blah".split()
# ValueError is swallowed as UsageError
e = self.assertRaises(usage.UsageError, us.parseOptions, argV)
self.assertEqual(str(e), "Parameter type enforcement failed: Yay")
us = WeirdCallableOptions()
argV = "--barwrong blah".split()
# RuntimeError is not swallowed
self.assertRaises(RuntimeError, us.parseOptions, argV)
class OutputTests(unittest.TestCase):
def test_uppercasing(self):
"""
Error output case adjustment does not mangle options
"""
opt = WellBehaved()
e = self.assertRaises(usage.UsageError,
opt.parseOptions, ['-Z'])
self.assertEqual(str(e), 'option -Z not recognized')
class InquisitionOptions(usage.Options):
optFlags = [
('expect', 'e'),
]
optParameters = [
('torture-device', 't',
'comfy-chair',
'set preferred torture device'),
]
class HolyQuestOptions(usage.Options):
optFlags = [('horseback', 'h',
'use a horse'),
('for-grail', 'g'),
]
class SubCommandOptions(usage.Options):
optFlags = [('europian-swallow', None,
'set default swallow type to Europian'),
]
subCommands = [
('inquisition', 'inquest', InquisitionOptions,
'Perform an inquisition'),
('holyquest', 'quest', HolyQuestOptions,
'Embark upon a holy quest'),
]
class SubCommandTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for options with subcommands.
"""
def test_simpleSubcommand(self):
"""
A subcommand is recognized.
"""
o = SubCommandOptions()
o.parseOptions(['--europian-swallow', 'inquisition'])
self.assertTrue(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertFalse(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'comfy-chair')
def test_subcommandWithFlagsAndOptions(self):
"""
Flags and options of a subcommand are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['inquisition', '--expect', '--torture-device=feather'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertTrue(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'feather')
def test_subcommandAliasWithFlagsAndOptions(self):
"""
Flags and options of a subcommand alias are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['inquest', '--expect', '--torture-device=feather'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertTrue(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'feather')
def test_anotherSubcommandWithFlagsAndOptions(self):
"""
Flags and options of another subcommand are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['holyquest', '--for-grail'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'holyquest')
self.assertIsInstance(o.subOptions, HolyQuestOptions)
self.assertFalse(o.subOptions['horseback'])
self.assertTrue(o.subOptions['for-grail'])
def test_noSubcommand(self):
"""
If no subcommand is specified and no default subcommand is assigned,
a subcommand will not be implied.
"""
o = SubCommandOptions()
o.parseOptions(['--europian-swallow'])
self.assertTrue(o['europian-swallow'])
self.assertIsNone(o.subCommand)
self.assertFalse(hasattr(o, 'subOptions'))
def test_defaultSubcommand(self):
"""
Flags and options in the default subcommand are assigned.
"""
o = SubCommandOptions()
o.defaultSubCommand = 'inquest'
o.parseOptions(['--europian-swallow'])
self.assertTrue(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertFalse(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'comfy-chair')
def test_subCommandParseOptionsHasParent(self):
"""
The parseOptions method from the Options object specified for the
given subcommand is called.
"""
class SubOpt(usage.Options):
def parseOptions(self, *a, **kw):
self.sawParent = self.parent
usage.Options.parseOptions(self, *a, **kw)
class Opt(usage.Options):
subCommands = [
('foo', 'f', SubOpt, 'bar'),
]
o = Opt()
o.parseOptions(['foo'])
self.assertTrue(hasattr(o.subOptions, 'sawParent'))
self.assertEqual(o.subOptions.sawParent , o)
def test_subCommandInTwoPlaces(self):
"""
The .parent pointer is correct even when the same Options class is
used twice.
"""
class SubOpt(usage.Options):
pass
class OptFoo(usage.Options):
subCommands = [
('foo', 'f', SubOpt, 'quux'),
]
class OptBar(usage.Options):
subCommands = [
('bar', 'b', SubOpt, 'quux'),
]
oFoo = OptFoo()
oFoo.parseOptions(['foo'])
oBar=OptBar()
oBar.parseOptions(['bar'])
self.assertTrue(hasattr(oFoo.subOptions, 'parent'))
self.assertTrue(hasattr(oBar.subOptions, 'parent'))
self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)
self.failUnlessIdentical(oBar.subOptions.parent, oBar)
class HelpStringTests(unittest.TestCase):
"""
Test generated help strings.
"""
def setUp(self):
"""
Instantiate a well-behaved Options class.
"""
self.niceArgV = ("--long Alpha -n Beta "
"--shortless Gamma -f --myflag "
"--myparam Tofu").split()
self.nice = WellBehaved()
def test_noGoBoom(self):
"""
__str__ shouldn't go boom.
"""
try:
self.nice.__str__()
except Exception as e:
self.fail(e)
def test_whitespaceStripFlagsAndParameters(self):
"""
Extra whitespace in flag and parameters docs is stripped.
"""
# We test this by making sure aflag and it's help string are on the
# same line.
lines = [s for s in str(self.nice).splitlines() if s.find("aflag")>=0]
self.assertTrue(len(lines) > 0)
self.assertTrue(lines[0].find("flagallicious") >= 0)
class PortCoerceTests(unittest.TestCase):
"""
Test the behavior of L{usage.portCoerce}.
"""
def test_validCoerce(self):
"""
Test the answers with valid input.
"""
self.assertEqual(0, usage.portCoerce("0"))
self.assertEqual(3210, usage.portCoerce("3210"))
self.assertEqual(65535, usage.portCoerce("65535"))
def test_errorCoerce(self):
"""
Test error path.
"""
self.assertRaises(ValueError, usage.portCoerce, "")
self.assertRaises(ValueError, usage.portCoerce, "-21")
self.assertRaises(ValueError, usage.portCoerce, "212189")
self.assertRaises(ValueError, usage.portCoerce, "foo")
class ZshCompleterTests(unittest.TestCase):
"""
Test the behavior of the various L{twisted.usage.Completer} classes
for producing output usable by zsh tab-completion system.
"""
def test_completer(self):
"""
Completer produces zsh shell-code that produces no completion matches.
"""
c = usage.Completer()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:')
c = usage.Completer(descr='some action', repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:')
def test_files(self):
"""
CompleteFiles produces zsh shell-code that completes file names
according to a glob.
"""
c = usage.CompleteFiles()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option (*):_files -g "*"')
c = usage.CompleteFiles('*.py')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option (*.py):_files -g "*.py"')
c = usage.CompleteFiles('*.py', descr="some action", repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action (*.py):_files -g "*.py"')
def test_dirs(self):
"""
CompleteDirs produces zsh shell-code that completes directory names.
"""
c = usage.CompleteDirs()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_directories')
c = usage.CompleteDirs(descr="some action", repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:_directories')
def test_list(self):
"""
CompleteList produces zsh shell-code that completes words from a fixed
list of possibilities.
"""
c = usage.CompleteList('ABC')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:(A B C)')
c = usage.CompleteList(['1', '2', '3'])
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:(1 2 3)')
c = usage.CompleteList(['1', '2', '3'], descr='some action',
repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:(1 2 3)')
def test_multiList(self):
"""
CompleteMultiList produces zsh shell-code that completes multiple
comma-separated words from a fixed list of possibilities.
"""
c = usage.CompleteMultiList('ABC')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_values -s , \'some-option\' A B C')
c = usage.CompleteMultiList(['1','2','3'])
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_values -s , \'some-option\' 1 2 3')
c = usage.CompleteMultiList(['1','2','3'], descr='some action',
repeat=True)
got = c._shellCode('some-option', usage._ZSH)
expected = '*:some action:_values -s , \'some action\' 1 2 3'
self.assertEqual(got, expected)
def test_usernames(self):
"""
CompleteUsernames produces zsh shell-code that completes system
usernames.
"""
c = usage.CompleteUsernames()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_users')
c = usage.CompleteUsernames(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_users')
def test_groups(self):
"""
CompleteGroups produces zsh shell-code that completes system group
names.
"""
c = usage.CompleteGroups()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':group:_groups')
c = usage.CompleteGroups(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_groups')
def test_hostnames(self):
"""
CompleteHostnames produces zsh shell-code that completes hostnames.
"""
c = usage.CompleteHostnames()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_hosts')
c = usage.CompleteHostnames(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_hosts')
def test_userAtHost(self):
"""
CompleteUserAtHost produces zsh shell-code that completes hostnames or
a word of the form <username>@<hostname>.
"""
c = usage.CompleteUserAtHost()
out = c._shellCode('some-option', usage._ZSH)
self.assertTrue(out.startswith(':host | user@host:'))
c = usage.CompleteUserAtHost(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertTrue(out.startswith('*:some action:'))
def test_netInterfaces(self):
"""
CompleteNetInterfaces produces zsh shell-code that completes system
network interface names.
"""
c = usage.CompleteNetInterfaces()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_net_interfaces')
c = usage.CompleteNetInterfaces(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_net_interfaces')
class CompleterNotImplementedTests(unittest.TestCase):
"""
Using an unknown shell constant with the various Completer() classes
should raise NotImplementedError
"""
def test_unknownShell(self):
"""
Using an unknown shellType should raise NotImplementedError
"""
classes = [usage.Completer, usage.CompleteFiles,
usage.CompleteDirs, usage.CompleteList,
usage.CompleteMultiList, usage.CompleteUsernames,
usage.CompleteGroups, usage.CompleteHostnames,
usage.CompleteUserAtHost, usage.CompleteNetInterfaces]
for cls in classes:
try:
action = cls()
except:
action = cls(None)
self.assertRaises(NotImplementedError, action._shellCode,
None, "bad_shell_type")
class FlagFunctionTests(unittest.TestCase):
"""
Tests for L{usage.flagFunction}.
"""
class SomeClass(object):
"""
Dummy class for L{usage.flagFunction} tests.
"""
def oneArg(self, a):
"""
A one argument method to be tested by L{usage.flagFunction}.
@param a: a useless argument to satisfy the function's signature.
"""
def noArg(self):
"""
A no argument method to be tested by L{usage.flagFunction}.
"""
def manyArgs(self, a, b, c):
"""
A multiple arguments method to be tested by L{usage.flagFunction}.
@param a: a useless argument to satisfy the function's signature.
@param b: a useless argument to satisfy the function's signature.
@param c: a useless argument to satisfy the function's signature.
"""
def test_hasArg(self):
"""
L{usage.flagFunction} returns C{False} if the method checked allows
exactly one argument.
"""
self.assertIs(False, usage.flagFunction(self.SomeClass().oneArg))
def test_noArg(self):
"""
L{usage.flagFunction} returns C{True} if the method checked allows
exactly no argument.
"""
self.assertIs(True, usage.flagFunction(self.SomeClass().noArg))
def test_tooManyArguments(self):
"""
L{usage.flagFunction} raises L{usage.UsageError} if the method checked
allows more than one argument.
"""
exc = self.assertRaises(
usage.UsageError, usage.flagFunction, self.SomeClass().manyArgs)
self.assertEqual("Invalid Option function for manyArgs", str(exc))
def test_tooManyArgumentsAndSpecificErrorMessage(self):
"""
L{usage.flagFunction} uses the given method name in the error message
raised when the method allows too many arguments.
"""
exc = self.assertRaises(
usage.UsageError,
usage.flagFunction, self.SomeClass().manyArgs, "flubuduf")
self.assertEqual("Invalid Option function for flubuduf", str(exc))
class OptionsInternalTests(unittest.TestCase):
"""
Tests internal behavior of C{usage.Options}.
"""
def test_optionsAliasesOrder(self):
"""
Options which are synonyms to another option are aliases towards the
longest option name.
"""
class Opts(usage.Options):
def opt_very_very_long(self):
"""
This is an option method with a very long name, that is going to
be aliased.
"""
opt_short = opt_very_very_long
opt_s = opt_very_very_long
opts = Opts()
self.assertEqual(
dict.fromkeys(
["s", "short", "very-very-long"], "very-very-long"), {
"s": opts.synonyms["s"],
"short": opts.synonyms["short"],
"very-very-long": opts.synonyms["very-very-long"],
})
|
mit
|
xodus7/tensorflow
|
tensorflow/python/training/checkpointable/base_test.py
|
18
|
2609
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import util
class InterfaceTests(test.TestCase):
def testOverwrite(self):
root = base.CheckpointableBase()
leaf = base.CheckpointableBase()
root._track_checkpointable(leaf, name="leaf")
(current_name, current_dependency), = root._checkpoint_dependencies
self.assertIs(leaf, current_dependency)
self.assertEqual("leaf", current_name)
duplicate_name_dep = base.CheckpointableBase()
with self.assertRaises(ValueError):
root._track_checkpointable(duplicate_name_dep, name="leaf")
root._track_checkpointable(duplicate_name_dep, name="leaf", overwrite=True)
(current_name, current_dependency), = root._checkpoint_dependencies
self.assertIs(duplicate_name_dep, current_dependency)
self.assertEqual("leaf", current_name)
def testAddVariableOverwrite(self):
root = base.CheckpointableBase()
a = root._add_variable_with_custom_getter(
name="v", shape=[], getter=variable_scope.get_variable)
self.assertEqual([root, a], util.list_objects(root))
with ops.Graph().as_default():
b = root._add_variable_with_custom_getter(
name="v", shape=[], overwrite=True,
getter=variable_scope.get_variable)
self.assertEqual([root, b], util.list_objects(root))
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, "already declared as a dependency"):
root._add_variable_with_custom_getter(
name="v", shape=[], overwrite=False,
getter=variable_scope.get_variable)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
SunBuild/openoutreach-azure-webapp
|
profiles/openoutreach/libraries/openlayers/build/build.py
|
86
|
6674
|
#!/usr/bin/env python
import sys
import os
sys.path.append("../tools")
import mergejs
import optparse
def build(config_file = None, output_file = None, options = None):
have_compressor = []
try:
import jsmin
have_compressor.append("jsmin")
except ImportError:
print "No jsmin"
try:
# tools/closure_library_jscompiler.py from:
# http://code.google.com/p/closure-library/source/browse/trunk/closure/bin/build/jscompiler.py
import closure_library_jscompiler as closureCompiler
have_compressor.append("closure")
except Exception, E:
print "No closure (%s)" % E
try:
import closure_ws
have_compressor.append("closure_ws")
except ImportError:
print "No closure_ws"
try:
import minimize
have_compressor.append("minimize")
except ImportError:
print "No minimize"
try:
import uglify_js
uglify_js.check_available()
have_compressor.append("uglify-js")
except Exception, E:
print "No uglify-js (%s)" % E
use_compressor = None
if options.compressor and options.compressor in have_compressor:
use_compressor = options.compressor
sourceDirectory = "../lib"
configFilename = "full.cfg"
outputFilename = "OpenLayers.js"
if config_file:
configFilename = config_file
extension = configFilename[-4:]
if extension != ".cfg":
configFilename = config_file + ".cfg"
if output_file:
outputFilename = output_file
print "Merging libraries."
try:
if use_compressor == "closure" or use_compressor == 'uglify-js':
sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
else:
merged = mergejs.run(sourceDirectory, None, configFilename)
except mergejs.MissingImport, E:
print "\nAbnormal termination."
sys.exit("ERROR: %s" % E)
if options.amdname:
options.amdname = "'" + options.amdname + "',"
else:
options.amdname = ""
if options.amd == 'pre':
print "\nAdding AMD function."
merged = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, merged)
print "Compressing using %s" % use_compressor
if use_compressor == "jsmin":
minimized = jsmin.jsmin(merged)
elif use_compressor == "minimize":
minimized = minimize.minimize(merged)
elif use_compressor == "closure_ws":
if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
print "\nPre-compressing using jsmin"
merged = jsmin.jsmin(merged)
print "\nIs being compressed using Closure Compiler Service."
try:
minimized = closure_ws.minimize(merged)
except Exception, E:
print "\nAbnormal termination."
sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
if len(minimized) <= 2:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation using Web service failed!")
else:
print "Closure Compilation using Web service has completed successfully."
elif use_compressor == "closure":
jscompilerJar = "../tools/closure-compiler.jar"
if not os.path.isfile(jscompilerJar):
print "\nNo closure-compiler.jar; read README.txt!"
sys.exit("ERROR: Closure Compiler \"%s\" does not exist! Read README.txt" % jscompilerJar)
minimized = closureCompiler.Compile(
jscompilerJar,
sourceFiles, [
"--externs", "closure-compiler/Externs.js",
"--jscomp_warning", "checkVars", # To enable "undefinedVars"
"--jscomp_error", "checkRegExp", # Also necessary to enable "undefinedVars"
"--jscomp_error", "undefinedVars"
]
)
if minimized is None:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation failed! See compilation errors.")
print "Closure Compilation has completed successfully."
elif use_compressor == "uglify-js":
minimized = uglify_js.compile(sourceFiles)
if minimized is None:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Uglify JS compilation failed! See compilation errors.")
print "Uglify JS compilation has completed successfully."
else: # fallback
minimized = merged
if options.amd == 'post':
print "\nAdding AMD function."
minimized = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, minimized)
if options.status:
print "\nAdding status file."
minimized = "// status: " + file(options.status).read() + minimized
print "\nAdding license file."
minimized = file("license.txt").read() + minimized
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
print "Done."
if __name__ == '__main__':
opt = optparse.OptionParser(usage="%s [options] [config_file] [output_file]\n Default config_file is 'full.cfg', Default output_file is 'OpenLayers.js'")
opt.add_option("-c", "--compressor", dest="compressor", help="compression method: one of 'jsmin' (default), 'minimize', 'closure_ws', 'closure', or 'none'", default="jsmin")
opt.add_option("-s", "--status", dest="status", help="name of a file whose contents will be added as a comment at the front of the output file. For example, when building from a git repo, you can save the output of 'git describe --tags' in this file. Default is no file.", default=False)
opt.add_option("--amd", dest="amd", help="output should be AMD module; wrap merged files in define function; can be either 'pre' (before compilation) or 'post' (after compilation). Wrapping the OpenLayers var in a function means the filesize can be reduced by the closure compiler using 'pre', but be aware that a few functions depend on the OpenLayers variable being present. Either option can be used with jsmin or minimize compression. Default false, not AMD.", default=False)
opt.add_option("--amdname", dest="amdname", help="only useful with amd option. Name of AMD module. Default no name, anonymous module.", default=False)
(options, args) = opt.parse_args()
if not len(args):
build(options=options)
elif len(args) == 1:
build(args[0], options=options)
elif len(args) == 2:
build(args[0], args[1], options=options)
else:
print "Wrong number of arguments"
|
gpl-2.0
|
niknow/scipy
|
scipy/stats/kde.py
|
17
|
17717
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / norm_const / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
|
bsd-3-clause
|
fengbaicanhe/intellij-community
|
plugins/hg4idea/testData/bin/hgext/convert/monotone.py
|
94
|
12947
|
# monotone.py - monotone support for the convert extension
#
# Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <[email protected]> and
# others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, re
from mercurial import util
from common import NoRepo, commit, converter_source, checktool
from common import commandline
from mercurial.i18n import _
class monotone_source(converter_source, commandline):
def __init__(self, ui, path=None, rev=None):
converter_source.__init__(self, ui, path, rev)
commandline.__init__(self, ui, 'mtn')
self.ui = ui
self.path = path
self.automatestdio = False
self.rev = rev
norepo = NoRepo(_("%s does not look like a monotone repository")
% path)
if not os.path.exists(os.path.join(path, '_MTN')):
# Could be a monotone repository (SQLite db file)
try:
f = file(path, 'rb')
header = f.read(16)
f.close()
except IOError:
header = ''
if header != 'SQLite format 3\x00':
raise norepo
# regular expressions for parsing monotone output
space = r'\s*'
name = r'\s+"((?:\\"|[^"])*)"\s*'
value = name
revision = r'\s+\[(\w+)\]\s*'
lines = r'(?:.|\n)+'
self.dir_re = re.compile(space + "dir" + name)
self.file_re = re.compile(space + "file" + name +
"content" + revision)
self.add_file_re = re.compile(space + "add_file" + name +
"content" + revision)
self.patch_re = re.compile(space + "patch" + name +
"from" + revision + "to" + revision)
self.rename_re = re.compile(space + "rename" + name + "to" + name)
self.delete_re = re.compile(space + "delete" + name)
self.tag_re = re.compile(space + "tag" + name + "revision" +
revision)
self.cert_re = re.compile(lines + space + "name" + name +
"value" + value)
attr = space + "file" + lines + space + "attr" + space
self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
space + '"true"')
# cached data
self.manifest_rev = None
self.manifest = None
self.files = None
self.dirs = None
checktool('mtn', abort=False)
def mtnrun(self, *args, **kwargs):
if self.automatestdio:
return self.mtnrunstdio(*args, **kwargs)
else:
return self.mtnrunsingle(*args, **kwargs)
def mtnrunsingle(self, *args, **kwargs):
kwargs['d'] = self.path
return self.run0('automate', *args, **kwargs)
def mtnrunstdio(self, *args, **kwargs):
# Prepare the command in automate stdio format
command = []
for k, v in kwargs.iteritems():
command.append("%s:%s" % (len(k), k))
if v:
command.append("%s:%s" % (len(v), v))
if command:
command.insert(0, 'o')
command.append('e')
command.append('l')
for arg in args:
command += "%s:%s" % (len(arg), arg)
command.append('e')
command = ''.join(command)
self.ui.debug("mtn: sending '%s'\n" % command)
self.mtnwritefp.write(command)
self.mtnwritefp.flush()
return self.mtnstdioreadcommandoutput(command)
def mtnstdioreadpacket(self):
read = None
commandnbr = ''
while read != ':':
read = self.mtnreadfp.read(1)
if not read:
raise util.Abort(_('bad mtn packet - no end of commandnbr'))
commandnbr += read
commandnbr = commandnbr[:-1]
stream = self.mtnreadfp.read(1)
if stream not in 'mewptl':
raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
read = self.mtnreadfp.read(1)
if read != ':':
raise util.Abort(_('bad mtn packet - no divider before size'))
read = None
lengthstr = ''
while read != ':':
read = self.mtnreadfp.read(1)
if not read:
raise util.Abort(_('bad mtn packet - no end of packet size'))
lengthstr += read
try:
length = long(lengthstr[:-1])
except TypeError:
raise util.Abort(_('bad mtn packet - bad packet size %s')
% lengthstr)
read = self.mtnreadfp.read(length)
if len(read) != length:
raise util.Abort(_("bad mtn packet - unable to read full packet "
"read %s of %s") % (len(read), length))
return (commandnbr, stream, length, read)
def mtnstdioreadcommandoutput(self, command):
retval = []
while True:
commandnbr, stream, length, output = self.mtnstdioreadpacket()
self.ui.debug('mtn: read packet %s:%s:%s\n' %
(commandnbr, stream, length))
if stream == 'l':
# End of command
if output != '0':
raise util.Abort(_("mtn command '%s' returned %s") %
(command, output))
break
elif stream in 'ew':
# Error, warning output
self.ui.warn(_('%s error:\n') % self.command)
self.ui.warn(output)
elif stream == 'p':
# Progress messages
self.ui.debug('mtn: ' + output)
elif stream == 'm':
# Main stream - command output
retval.append(output)
return ''.join(retval)
def mtnloadmanifest(self, rev):
if self.manifest_rev == rev:
return
self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
self.manifest_rev = rev
self.files = {}
self.dirs = {}
for e in self.manifest:
m = self.file_re.match(e)
if m:
attr = ""
name = m.group(1)
node = m.group(2)
if self.attr_execute_re.match(e):
attr += "x"
self.files[name] = (node, attr)
m = self.dir_re.match(e)
if m:
self.dirs[m.group(1)] = True
def mtnisfile(self, name, rev):
# a non-file could be a directory or a deleted or renamed file
self.mtnloadmanifest(rev)
return name in self.files
def mtnisdir(self, name, rev):
self.mtnloadmanifest(rev)
return name in self.dirs
def mtngetcerts(self, rev):
certs = {"author":"<missing>", "date":"<missing>",
"changelog":"<missing>", "branch":"<missing>"}
certlist = self.mtnrun("certs", rev)
# mtn < 0.45:
# key "[email protected]"
# mtn >= 0.45:
# key [ff58a7ffb771907c4ff68995eada1c4da068d328]
certlist = re.split('\n\n key ["\[]', certlist)
for e in certlist:
m = self.cert_re.match(e)
if m:
name, value = m.groups()
value = value.replace(r'\"', '"')
value = value.replace(r'\\', '\\')
certs[name] = value
# Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
# and all times are stored in UTC
certs["date"] = certs["date"].split('.')[0] + " UTC"
return certs
# implement the converter_source interface:
def getheads(self):
if not self.rev:
return self.mtnrun("leaves").splitlines()
else:
return [self.rev]
def getchanges(self, rev):
revision = self.mtnrun("get_revision", rev).split("\n\n")
files = {}
ignoremove = {}
renameddirs = []
copies = {}
for e in revision:
m = self.add_file_re.match(e)
if m:
files[m.group(1)] = rev
ignoremove[m.group(1)] = rev
m = self.patch_re.match(e)
if m:
files[m.group(1)] = rev
# Delete/rename is handled later when the convert engine
# discovers an IOError exception from getfile,
# but only if we add the "from" file to the list of changes.
m = self.delete_re.match(e)
if m:
files[m.group(1)] = rev
m = self.rename_re.match(e)
if m:
toname = m.group(2)
fromname = m.group(1)
if self.mtnisfile(toname, rev):
ignoremove[toname] = 1
copies[toname] = fromname
files[toname] = rev
files[fromname] = rev
elif self.mtnisdir(toname, rev):
renameddirs.append((fromname, toname))
# Directory renames can be handled only once we have recorded
# all new files
for fromdir, todir in renameddirs:
renamed = {}
for tofile in self.files:
if tofile in ignoremove:
continue
if tofile.startswith(todir + '/'):
renamed[tofile] = fromdir + tofile[len(todir):]
# Avoid chained moves like:
# d1(/a) => d3/d1(/a)
# d2 => d3
ignoremove[tofile] = 1
for tofile, fromfile in renamed.items():
self.ui.debug (_("copying file in renamed directory "
"from '%s' to '%s'")
% (fromfile, tofile), '\n')
files[tofile] = rev
copies[tofile] = fromfile
for fromfile in renamed.values():
files[fromfile] = rev
return (files.items(), copies)
def getfile(self, name, rev):
if not self.mtnisfile(name, rev):
raise IOError # file was deleted or renamed
try:
data = self.mtnrun("get_file_of", name, r=rev)
except Exception:
raise IOError # file was deleted or renamed
self.mtnloadmanifest(rev)
node, attr = self.files.get(name, (None, ""))
return data, attr
def getcommit(self, rev):
extra = {}
certs = self.mtngetcerts(rev)
if certs.get('suspend') == certs["branch"]:
extra['close'] = '1'
return commit(
author=certs["author"],
date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
desc=certs["changelog"],
rev=rev,
parents=self.mtnrun("parents", rev).splitlines(),
branch=certs["branch"],
extra=extra)
def gettags(self):
tags = {}
for e in self.mtnrun("tags").split("\n\n"):
m = self.tag_re.match(e)
if m:
tags[m.group(1)] = m.group(2)
return tags
def getchangedfiles(self, rev, i):
# This function is only needed to support --filemap
# ... and we don't support that
raise NotImplementedError
def before(self):
# Check if we have a new enough version to use automate stdio
version = 0.0
try:
versionstr = self.mtnrunsingle("interface_version")
version = float(versionstr)
except Exception:
raise util.Abort(_("unable to determine mtn automate interface "
"version"))
if version >= 12.0:
self.automatestdio = True
self.ui.debug("mtn automate version %s - using automate stdio\n" %
version)
# launch the long-running automate stdio process
self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
'-d', self.path)
# read the headers
read = self.mtnreadfp.readline()
if read != 'format-version: 2\n':
raise util.Abort(_('mtn automate stdio header unexpected: %s')
% read)
while read != '\n':
read = self.mtnreadfp.readline()
if not read:
raise util.Abort(_("failed to reach end of mtn automate "
"stdio headers"))
else:
self.ui.debug("mtn automate version %s - not using automate stdio "
"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
def after(self):
if self.automatestdio:
self.mtnwritefp.close()
self.mtnwritefp = None
self.mtnreadfp.close()
self.mtnreadfp = None
|
apache-2.0
|
vstoykov/django-cms
|
cms/tests/management.py
|
6
|
19612
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import uuid
from django.contrib.sites.models import Site
from django.core import management
from django.core.management import CommandError
from django.utils.six.moves import StringIO
from cms.api import create_page, add_plugin, create_title
from cms.management.commands import cms
from cms.management.commands.subcommands.list import plugin_report
from cms.models import Page, StaticPlaceholder
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.fixtures.navextenders import NavextendersFixture
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from djangocms_text_ckeditor.cms_plugins import TextPlugin
APPHOOK = "SampleApp"
PLUGIN = "TextPlugin"
class ManagementTestCase(CMSTestCase):
def test_list_apphooks(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("list", "apphooks", interactive=False)
self.assertEqual(out.getvalue(), "SampleApp\n")
def test_uninstall_apphooks_without_apphook(self):
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("uninstall", "apphooks", APPHOOK, interactive=False)
self.assertEqual(out.getvalue(), "no 'SampleApp' apphooks found\n")
def test_fix_tree(self):
create_page("home", "nav_playground.html", "en")
page1 = create_page("page", "nav_playground.html", "en")
page1.depth = 3
page1.numchild = 4
page1.path = "00100010"
page1.save()
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("fix-tree", interactive=False)
self.assertEqual(out.getvalue(), 'fixing page treefixing plugin treeall done')
page1 = page1.reload()
self.assertEqual(page1.path, "0002")
self.assertEqual(page1.depth, 1)
self.assertEqual(page1.numchild, 0)
def test_uninstall_apphooks_with_apphook(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("uninstall", "apphooks", APPHOOK, interactive=False)
self.assertEqual(out.getvalue(), "1 'SampleApp' apphooks uninstalled\n")
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 0)
def test_list_plugins(self):
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
link_plugin = add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
self.assertEqual(
CMSPlugin.objects.filter(plugin_type=PLUGIN).count(),
2)
self.assertEqual(
CMSPlugin.objects.filter(plugin_type="LinkPlugin").count(),
1)
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
bogus_plugins_report["model"],
None)
self.assertEqual(
bogus_plugins_report["type"],
u'BogusPlugin')
self.assertEqual(
bogus_plugins_report["instances"][0],
bogus_plugin)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
link_plugins_report["model"],
link_plugin.__class__)
self.assertEqual(
link_plugins_report["type"],
u'LinkPlugin')
self.assertEqual(
link_plugins_report["instances"][0].get_plugin_instance()[0],
link_plugin)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
text_plugins_report["model"],
TextPlugin.model)
self.assertEqual(
text_plugins_report["type"],
u'TextPlugin')
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
text_plugins_report["instances"][2],
instanceless_plugin)
self.assertEqual(
text_plugins_report["unsaved_instances"],
[instanceless_plugin])
def test_delete_orphaned_plugins(self):
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
instanceless_plugin = CMSPlugin(
language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
len(bogus_plugins_report["instances"]),
1)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
1)
management.call_command(
'cms', 'delete_orphaned_plugins',
stdout=StringIO(), interactive=False)
report = plugin_report()
# there should be reports for two plugin types (one should have been deleted)
self.assertEqual(
len(report),
2)
# check the link plugin
link_plugins_report = report[0]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[1]
self.assertEqual(
len(text_plugins_report["instances"]),
2)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
0)
def test_uninstall_plugins_without_plugin(self):
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("uninstall", "plugins", PLUGIN, interactive=False)
self.assertEqual(out.getvalue(), "no 'TextPlugin' plugins found\n")
def test_uninstall_plugins_with_plugin(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("uninstall", "plugins", PLUGIN, interactive=False)
self.assertEqual(out.getvalue(), "1 'TextPlugin' plugins uninstalled\n")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 0)
class PageFixtureManagementTestCase(NavextendersFixture, CMSTestCase):
def _fill_page_body(self, page, lang):
ph_en = page.placeholders.get(slot="body")
# add misc plugins
mcol1 = add_plugin(ph_en, "MultiColumnPlugin", lang, position="first-child")
add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol1)
col2 = add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol1)
mcol2 = add_plugin(ph_en, "MultiColumnPlugin", lang, position="first-child", target=col2)
add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol2)
col4 = add_plugin(ph_en, "ColumnPlugin", lang, position="first-child", target=mcol2)
# add a *nested* link plugin
add_plugin(ph_en, "LinkPlugin", lang, target=col4,
name="A Link", url="https://www.django-cms.org")
static_placeholder = StaticPlaceholder(code=str(uuid.uuid4()), site_id=1)
static_placeholder.save()
add_plugin(static_placeholder.draft, "TextPlugin", lang, body="example content")
def setUp(self):
pages = Page.objects.drafts()
for page in pages:
self._fill_page_body(page, "en")
def test_copy_langs(self):
"""
Various checks here:
* plugins are exactly doubled, half per language with no orphaned plugin
* the bottom-most plugins in the nesting chain maintain the same position and the same content
* the top-most plugin are of the same type
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "en", "de")
pages = Page.objects.on_site(site).drafts()
for page in pages:
self.assertEqual(set((u'en', u'de')), set(page.get_languages()))
# These asserts that no orphaned plugin exists
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins*2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins)
root_page = Page.objects.on_site(site).get_home()
root_plugins = CMSPlugin.objects.filter(placeholder=root_page.placeholders.get(slot="body"))
first_plugin_en, _ = root_plugins.get(language='en', parent=None).get_plugin_instance()
first_plugin_de, _ = root_plugins.get(language='de', parent=None).get_plugin_instance()
self.assertEqual(first_plugin_en.plugin_type, first_plugin_de.plugin_type)
link_en, _ = root_plugins.get(language='en', plugin_type='LinkPlugin').get_plugin_instance()
link_de, _ = root_plugins.get(language='de', plugin_type='LinkPlugin').get_plugin_instance()
self.assertEqual(link_en.url, link_de.url)
self.assertEqual(link_en.get_position_in_placeholder(), link_de.get_position_in_placeholder())
stack_plugins = CMSPlugin.objects.filter(placeholder=StaticPlaceholder.objects.order_by('?')[0].draft)
stack_text_en, _ = stack_plugins.get(language='en', plugin_type='TextPlugin').get_plugin_instance()
stack_text_de, _ = stack_plugins.get(language='de', plugin_type='TextPlugin').get_plugin_instance()
self.assertEqual(stack_text_en.plugin_type, stack_text_de.plugin_type)
self.assertEqual(stack_text_en.body, stack_text_de.body)
def test_copy_existing_title(self):
"""
Even if a title already exists the copy is successfull, the original
title remains untouched
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.on_site(site).get_home()
create_title("de", "root page de", root_page)
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "en", "de")
pages = Page.objects.on_site(site).drafts()
for page in pages:
self.assertEqual(set((u'en', u'de')), set(page.get_languages()))
# Original Title untouched
self.assertEqual("root page de", Page.objects.on_site(site).get_home().get_title("de"))
# Plugins still copied
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins*2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins)
def test_copy_filled_placeholder(self):
"""
If an existing title in the target language has plugins in a placeholder
that placeholder is skipped
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.on_site(site).get_home()
create_title("de", "root page de", root_page)
ph = root_page.placeholders.get(slot="body")
add_plugin(ph, "TextPlugin", "de", body="Hello World")
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "en", "de")
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# one placeholder (with 7 plugins) is skipped, so the difference must be 6
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins-6)
def test_copy_filled_placeholder_force_copy(self):
"""
If an existing title in the target language has plugins in a placeholder
and the command is called with *force-copy*, the plugins are copied on
top of the existing one
"""
site = 1
number_start_plugins = CMSPlugin.objects.all().count()
# create an empty title language
root_page = Page.objects.on_site(site).get_home()
create_title("de", "root page de", root_page)
ph = root_page.placeholders.get(slot="body")
add_plugin(ph, "TextPlugin", "de", body="Hello World")
root_plugins = CMSPlugin.objects.filter(placeholder=ph)
text_de_orig, _ = root_plugins.get(language='de', plugin_type='TextPlugin').get_plugin_instance()
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "en", "de", "force-copy")
CMSPlugin.objects.filter(placeholder=root_page.placeholders.get(slot="body"))
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# we have an existing plugin in one placeholder, so we have one more
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_start_plugins+1)
def test_copy_from_non_existing_lang(self):
"""
If an existing title in the target language has plugins in a placeholder
and the command is called with *force-copy*, the plugins are copied on
top of the existing one
"""
site = 1
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "de", "fr", "verbose")
text = out.getvalue()
page_count = Page.objects.on_site(site).drafts().count() + 1
for idx in range(1, page_count):
self.assertTrue(text.find("Skipping page page%d, language de not defined" % idx) > -1)
def test_copy_site_safe(self):
"""
Check that copy of languages on one site does not interfere with other
sites
"""
site_other = 1
site_active = 2
origina_site1_langs = {}
number_start_plugins = CMSPlugin.objects.all().count()
site_obj = Site.objects.create(domain="sample2.com", name="sample2.com", pk=site_active)
for page in Page.objects.on_site(1).drafts():
origina_site1_langs[page.pk] = set(page.get_languages())
p1 = create_page('page1', published=True, in_navigation=True, language='de', template='nav_playground.html', site=site_obj)
create_page('page4', published=True, in_navigation=True, language='de', template='nav_playground.html', site=site_obj)
create_page('page2', published=True, in_navigation=True, parent=p1, language='de', template='nav_playground.html', site=site_obj)
for page in Page.objects.on_site(site_active).drafts():
self._fill_page_body(page, 'de')
number_site2_plugins = CMSPlugin.objects.all().count() - number_start_plugins
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("copy-lang", "de", "fr", "site=%s" % site_active)
for page in Page.objects.on_site(site_other).drafts():
self.assertEqual(origina_site1_langs[page.pk], set(page.get_languages()))
for page in Page.objects.on_site(site_active).drafts():
self.assertEqual(set(('de', 'fr')), set(page.get_languages()))
# plugins for site 1
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins)
# plugins for site 2 de
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), number_site2_plugins)
# plugins for site 2 fr
self.assertEqual(CMSPlugin.objects.filter(language='fr').count(), number_site2_plugins)
# global number of plugins
self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins + number_site2_plugins*2)
def test_copy_bad_languages(self):
out = StringIO()
command = cms.Command()
command.stdout = out
with self.assertRaises(CommandError) as command_error:
command.handle("copy-lang", "it", "fr")
self.assertEqual(str(command_error.exception), 'Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES')
|
bsd-3-clause
|
seijim/cloud-robotics-azure-platform-v1-sdk
|
CloudRoboticsApi/ClientCode_Pepper/HeadWaters/PepperCode2/lib/cloudrobotics/message.py
|
4
|
2665
|
# -*- coding: utf-8 -*-
#
# Cloud Robotics FX メッセージ
#
# @author: Hiroki Wakabayashi <[email protected]>
# @version: 0.0.1
import json
import datetime
ROUTING_TYPE_D2D = 'D2D';
ROUTING_TYPE_LOG = 'LOG';
ROUTING_TYPE_CALL = 'CALL';
API_APP_ID = 'SbrApiServices';
class CRFXMessage(object):
def __init__(self):
self.header = {
'RoutingType': None
,'RoutingKeyword': 'Default'
,'AppId': API_APP_ID
,'AppProcessingId': None
,'MessageId': None
,'MessageSeqno': None
}
self.body = {}
# シーケンス番号を設定する。
def set_seq(self, seqno):
self.header['MessageSeqno'] = seqno #str(seqno)
# シーケンス番号を取得する。
def get_seq(self):
return self.header['MessageSeqno']
# メッセージ本文を生成する。
def payload(self, seqNo=None):
now = datetime.datetime.now()
self.header['SendDateTime'] = now.strftime('%Y-%m-%d %H:%M:%S.')+'%03d' % (now.microsecond // 1000)
# Bodyにセットされてくる値に応じて処理を切り替える。
if isinstance(self.body, str):
body = self.body.decode('utf-8')
elif isinstance(self.body, dict):
body = json.loads(json.dumps(self.body))
pyld = {
'RbHeader':self.header
,'RbBody':body
}
return json.dumps(pyld, ensure_ascii=False, separators=(',', ':'))
# JSON文字列を読み込む
#
def loads(self, jsontext):
try:
print jsontext
data = json.loads(jsontext)
self.header = {}
self.body = {}
for k, v in data[u'RbHeader'].iteritems():
self.header[k.encode('utf-8')] = self.encode(v)
for k, v in data[u'RbBody'].iteritems():
self.body[k.encode('utf-8')] = self.encode(v)
except Exception as e:
print(e)
self.header = None
self.body = None
# unicode型の場合にutf-8にエンコードする。
#
def encode(self, val):
if val is None:
return None
elif isinstance(val, unicode):
return val.encode('utf-8')
elif isinstance(val, list):
newVal = []
for v in val:
newVal.append(self.encode(v))
return newVal
elif isinstance(val, dict):
newVal = {}
for i, v in val.iteritems():
newVal[i.encode('utf-8')] = self.encode(v)
return newVal
else:
return val
|
mit
|
sclabs/sitestatus-nonrel
|
django/db/models/deletion.py
|
79
|
11027
|
from operator import attrgetter
from django.db import connections, transaction, IntegrityError
from django.db.models import signals, sql
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.utils.datastructures import SortedDict
from django.utils.functional import wraps
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
# TODO change this to use super() when we drop Python 2.4
IntegrityError.__init__(self, msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.rel.to,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
return set_on_delete
SET_NULL = SET(None)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def force_managed(func):
@wraps(func)
def decorated(self, *args, **kwargs):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
forced_managed = True
else:
forced_managed = False
try:
func(self, *args, **kwargs)
if forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.using)
return decorated
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: set([instances])}, later values become lists.
self.data = {}
self.batches = {} # {model: {field: set([instances])}}
self.field_updates = {} # {model: {(field, value): set([instances])}}
self.dependencies = {} # {model: set([models])}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it
and 'nullable' should be set to True, if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if new_objs and source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(source, set()).add(model)
return new_objs
def add_batch(self, model, field, objs):
"""
Schedules a batch delete. Every instance of 'model' that is related to
an instance of 'obj' through 'field' will be deleted.
"""
self.batches.setdefault(model, {}).setdefault(field, set()).update(objs)
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogenous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogenous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
"""
if not connections[self.using].features.supports_deleting_related_objects:
collect_related = False
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
# Recursively collect parent models, but not their related objects.
# These will be found by meta.get_all_related_objects()
for parent_model, ptr in model._meta.parents.iteritems():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.rel.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for related in model._meta.get_all_related_objects(include_hidden=True):
field = related.field
if related.model._meta.auto_created:
self.add_batch(related.model, field, new_objs)
else:
sub_objs = self.related_objects(related, new_objs)
if not sub_objs:
continue
field.rel.on_delete(self, field, sub_objs, self.using)
# TODO This entire block is only needed as a special case to
# support cascade-deletes for GenericRelation. It should be
# removed/fixed when the ORM gains a proper abstraction for virtual
# or composite fields, and GFKs are reworked to fit into that.
for relation in model._meta.many_to_many:
if not relation.rel.through:
sub_objs = relation.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs,
source=model,
source_attr=relation.rel.related_name,
nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in self.data.iteritems():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
models = self.data.keys()
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model)
if not (dependencies and dependencies.difference(sorted_models)):
sorted_models.append(model)
found = True
if not found:
return
self.data = SortedDict([(model, self.data[model])
for model in sorted_models])
@force_managed
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer contraint checks until the
# end of a transaction.
self.sort()
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# update fields
for model, instances_for_fieldvalues in self.field_updates.iteritems():
query = sql.UpdateQuery(model)
for (field, value), instances in instances_for_fieldvalues.iteritems():
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.itervalues():
instances.reverse()
# delete batches
for model, batches in self.batches.iteritems():
query = sql.DeleteQuery(model)
for field, instances in batches.iteritems():
query.delete_batch([obj.pk for obj in instances], self.using, field)
# delete instances
for model, instances in self.data.iteritems():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
# send post_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in self.field_updates.iteritems():
for (field, value), instances in instances_for_fieldvalues.iteritems():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.iteritems():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
|
bsd-3-clause
|
adlnet-archive/edx-platform
|
lms/djangoapps/shoppingcart/migrations/0002_auto__add_field_paidcourseregistration_mode.py
|
182
|
8687
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PaidCourseRegistration.mode'
db.add_column('shoppingcart_paidcourseregistration', 'mode',
self.gf('django.db.models.fields.SlugField')(default='honor', max_length=50),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PaidCourseRegistration.mode'
db.delete_column('shoppingcart_paidcourseregistration', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
agpl-3.0
|
cmunk/protwis
|
residue/views.py
|
1
|
38438
|
from django.conf import settings
from django.core.cache import cache
from django.db.models import Count, F, Q
from django.shortcuts import render
from django.views.generic import TemplateView
from common.views import AbsTargetSelection
from common.definitions import FULL_AMINO_ACIDS, STRUCTURAL_RULES, STRUCTURAL_SWITCHES
from common.selection import Selection
Alignment = getattr(__import__(
'common.alignment_' + settings.SITE_NAME,
fromlist=['Alignment']
), 'Alignment')
from seqsign.sequence_signature import SequenceSignature, SignatureMatch
from alignment.functions import get_proteins_from_selection
from construct.views import create_structural_rule_trees, ConstructMutation
from contactnetwork.models import InteractingResiduePair
from interaction.models import ResidueFragmentInteraction
from mutation.models import MutationExperiment
from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs, NHSPrescribings
from protein.models import ProteinSegment, Protein, ProteinGProtein, ProteinGProteinPair
from residue.models import Residue,ResidueNumberingScheme, ResiduePositionSet, ResidueSet
from collections import OrderedDict
import re
import time
class TargetSelection(AbsTargetSelection):
pass
class ResidueTablesSelection(AbsTargetSelection):
# Left panel
step = 1
number_of_steps = 2
docs = 'generic_numbering.html'
description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
+ ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
+ ' where you can edit the list.\n\nSelect which numbering schemes to use in the middle column.\n\nOnce you' \
+ ' have selected all your receptors, click the green button.'
# Middle section
numbering_schemes = True
# Buttons
buttons = {
'continue' : {
'label' : 'Show residue numbers',
'url' : '/residue/residuetabledisplay',
'color' : 'success',
}
}
class ResidueTablesDisplay(TemplateView):
"""
A class rendering the residue numbering table.
"""
template_name = 'residue_table.html'
def get_context_data(self, **kwargs):
"""
Get the selection data (proteins and numbering schemes) and prepare it for display.
"""
context = super().get_context_data(**kwargs)
# get the user selection from session
simple_selection = self.request.session.get('selection', False)
# local protein list
proteins = []
# flatten the selection into individual proteins
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
elif target.type == 'family':
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list),
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
longest_name = 0
species_list = {}
for protein in proteins:
if protein.species.common_name not in species_list:
if len(protein.species.common_name)>10 and len(protein.species.common_name.split())>1:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])
if len(" ".join(protein.species.common_name.split()[1:]))>11:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])[:8]+".."
else:
name = protein.species.common_name
species_list[protein.species.common_name] = name
else:
name = species_list[protein.species.common_name]
if len(re.sub('<[^>]*>', '', protein.name)+" "+name)>longest_name:
longest_name = len(re.sub('<[^>]*>', '', protein.name)+" "+name)
# get the selection from session
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
# # extract numbering schemes and proteins
numbering_schemes = [x.item for x in selection.numbering_schemes]
# # get the helices (TMs only at first)
segments = ProteinSegment.objects.filter(category='helix', proteinfamily='GPCR')
if ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME) in numbering_schemes:
default_scheme = ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME)
else:
default_scheme = numbering_schemes[0]
# prepare the dictionary
# each helix has a dictionary of positions
# default_generic_number or first scheme on the list is the key
# value is a dictionary of other gn positions and residues from selected proteins
data = OrderedDict()
for segment in segments:
data[segment.slug] = OrderedDict()
residues = Residue.objects.filter(protein_segment=segment, protein_conformation__protein__in=proteins).prefetch_related('protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')
for scheme in numbering_schemes:
if scheme == default_scheme and scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
elif scheme == default_scheme:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
for residue in residues:
alternatives = residue.alternative_generic_numbers.all()
pos = residue.generic_number
for alternative in alternatives:
scheme = alternative.scheme
if default_scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
pos = residue.generic_number
if scheme == pos.scheme:
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
# Preparing the dictionary of list of lists. Dealing with tripple nested dictionary in django templates is a nightmare
flattened_data = OrderedDict.fromkeys([x.slug for x in segments], [])
for s in iter(flattened_data):
flattened_data[s] = [[data[s][x][y.slug] for y in numbering_schemes]+data[s][x]['seq'] for x in sorted(data[s])]
context['header'] = zip([x.short_name for x in numbering_schemes] + [x.name+" "+species_list[x.species.common_name] for x in proteins], [x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])
context['segments'] = [x.slug for x in segments]
context['data'] = flattened_data
context['number_of_schemes'] = len(numbering_schemes)
context['longest_name'] = {'div' : longest_name*2, 'height': longest_name*2+80}
return context
class ResidueFunctionBrowser(TemplateView):
"""
Per generic position summary of functional information
"""
template_name = 'residue_function_browser.html'
def get_context_data (self, **kwargs):
# setup caches
cache_name = "RFB"
rfb_panel = cache.get(cache_name)
# rfb_panel = None
if rfb_panel == None:
rfb_panel = {}
# Signatures
rfb_panel["signatures"] = {}
# Grab relevant segments
segments = list(ProteinSegment.objects.filter(proteinfamily='GPCR'))
# Grab High/Low CA GPCRs (class A)
high_ca = ["5ht2c_human", "acm4_human", "drd1_human", "fpr1_human", "ghsr_human", "cnr1_human", "aa1r_human", "gpr6_human", "gpr17_human", "gpr87_human"]
low_ca = ["agtr1_human", "ednrb_human", "gnrhr_human", "acthr_human", "v2r_human", "gp141_human", "gp182_human"]
# Signature High vs Low CA
high_ca_gpcrs = Protein.objects.filter(entry_name__in=high_ca).select_related('residue_numbering_scheme', 'species')
low_ca_gpcrs = Protein.objects.filter(entry_name__in=low_ca).select_related('residue_numbering_scheme', 'species')
signature = SequenceSignature()
signature.setup_alignments(segments, high_ca_gpcrs, low_ca_gpcrs)
signature.calculate_signature()
rfb_panel["signatures"]["cah"] = signature.signature
rfb_panel["signatures"]["cah_positions"] = signature.common_gn
signature = SequenceSignature()
signature.setup_alignments(segments, low_ca_gpcrs, high_ca_gpcrs)
signature.calculate_signature()
rfb_panel["signatures"]["cal"] = signature.signature
rfb_panel["signatures"]["cal_positions"] = signature.common_gn
# Grab Gi/Gs/Gq/GI12 GPCR sets (class A)
human_class_a_gpcrs = Protein.objects.filter(species_id=1, sequence_type_id=1, family__slug__startswith='001').distinct().prefetch_related('proteingprotein_set', 'residue_numbering_scheme')
gs = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_001"))
gio = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_002"))
gq = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_003"))
g12 = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_004"))
all = set(gs + gio + gq + g12)
# Create sequence signatures for the G-protein sets
for gprotein in ["gs", "gio", "gq", "g12"]:
# print("Processing " + gprotein)
# Signature receptors specific for a G-protein vs all others
signature = SequenceSignature()
signature.setup_alignments(segments, locals()[gprotein], all.difference(locals()[gprotein]))
signature.calculate_signature()
rfb_panel["signatures"][gprotein] = signature.signature
rfb_panel["signatures"][gprotein + "_positions"] = signature.common_gn
# Add class A alignment features
signature = SequenceSignature()
signature.setup_alignments(segments, human_class_a_gpcrs, [list(human_class_a_gpcrs)[0]])
signature.calculate_signature()
rfb_panel["class_a_positions"] = signature.common_gn
rfb_panel["class_a_aa"] = signature.aln_pos.consensus
rfb_panel["class_a_prop"] = signature.features_consensus_pos
# Add X-ray ligand contacts
# Optionally include the curation with the following filter: structure_ligand_pair__annotated=True
class_a_interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__family__slug__startswith="001").exclude(interaction_type__type='hidden')\
.values("rotamer__residue__generic_number__label").annotate(unique_receptors=Count("rotamer__residue__protein_conformation__protein__family_id", distinct=True))
rfb_panel["ligand_binding"] = {entry["rotamer__residue__generic_number__label"] : entry["unique_receptors"] for entry in list(class_a_interactions)}
# Add genetic variations
all_nat_muts = NaturalMutations.objects.filter(protein__family__slug__startswith="001").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["natural_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_nat_muts)}
# Add PTMs
all_ptms = PTMs.objects.filter(protein__family__slug__startswith="001").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ptms"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ptms)}
all_phos = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Phosphorylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["phos"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_phos)}
all_palm = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Palmitoylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["palm"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_palm)}
all_glyc = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification__endswith="Glycosylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["glyc"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_glyc)}
all_ubiq = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Ubiquitylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ubiq"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ubiq)}
# Thermostabilizing
all_thermo = ConstructMutation.objects.filter(construct__protein__family__slug__startswith="001", effects__slug='thermostabilising')\
.values("residue__generic_number__label").annotate(unique_receptors=Count("construct__protein__family_id", distinct=True))
rfb_panel["thermo_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_thermo)}
# Class A ligand mutations >5 fold effect - count unique receptors
all_ligand_mutations = MutationExperiment.objects.filter(Q(foldchange__gte = 5) | Q(foldchange__lte = -5), protein__family__slug__startswith="001")\
.values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ligand_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ligand_mutations)}
# Class A mutations with >30% increase/decrease basal activity
all_basal_mutations = MutationExperiment.objects.filter(Q(opt_basal_activity__gte = 130) | Q(opt_basal_activity__lte = 70), protein__family__slug__startswith="001")\
.values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["basal_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_basal_mutations)}
# Intrasegment contacts
all_contacts = InteractingResiduePair.objects.filter(~Q(res1__protein_segment_id = F('res2__protein_segment_id')), referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label").annotate(unique_receptors=Count("referenced_structure__protein_conformation__protein__family_id", distinct=True))
rfb_panel["intrasegment_contacts"] = {entry["res1__generic_number__label"] : entry["unique_receptors"] for entry in list(all_contacts)}
# Active/Inactive contacts
all_active_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\
referenced_structure__state__slug = "active", referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label", "res2__generic_number__label")
# OPTIMIZE
active_contacts = {}
for entry in list(all_active_contacts):
if entry["res1__generic_number__label"] not in active_contacts:
active_contacts[entry["res1__generic_number__label"]] = set()
active_contacts[entry["res1__generic_number__label"]].update([entry["res2__generic_number__label"]])
rfb_panel["active_contacts"] = active_contacts
all_inactive_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\
referenced_structure__state__slug = "inactive", referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label", "res2__generic_number__label")
# OPTIMIZE
inactive_contacts = {}
for entry in list(all_inactive_contacts):
if entry["res1__generic_number__label"] not in inactive_contacts:
inactive_contacts[entry["res1__generic_number__label"]] = set()
inactive_contacts[entry["res1__generic_number__label"]].update([entry["res2__generic_number__label"]])
rfb_panel["inactive_contacts"] = inactive_contacts
cache.set(cache_name, rfb_panel, 3600*24*7) # cache a week
# Other rules
# structural_rule_tree = create_structural_rule_trees(STRUCTURAL_RULES)
######## CREATE REFERENCE sets (or use structural rules)
## MICROSWITCHES
ms_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="State (micro-)switches").residue_position.all()]
## SODIUM POCKET
sp_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="Sodium ion pocket").residue_position.all()]
## ROTAMER SWITCHES
rotamer_labels = []
for entry in STRUCTURAL_SWITCHES["A"]:
if entry["Rotamer Switch"] != "-":
rotamer_labels.append(entry["AA1 Pos"])
rotamer_labels.append(entry["AA2 Pos"])
## G PROTEIN INTERACTION POSITIONS
# gprotein_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="Signalling protein pocket").residue_position.all()]
# Class A G-protein X-ray contacts
# TODO: replace with automatically generated sets from X-rays stored in database
gprotein_labels = {"1x60": {"001_006_001_001", " 001_006_001_002"},
"12x48": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"12x49": {"001_001_003_008", " 001_006_001_002"},
"12x51": {"001_006_001_002"},
"2x37": {"001_006_001_001"},
"2x39": {"001_002_022_003"},
"2x40": {"001_006_001_001"},
"3x49": {"001_001_003_008", " 001_002_022_003"},
"3x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3x53": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"3x54": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3x55": {"001_001_003_008", " 001_006_001_002"},
"3x56": {"001_006_001_002", " 001_009_001_001"},
"34x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34x51": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34x52": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34x53": {"001_001_003_008", " 001_006_001_002"},
"34x54": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34x55": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"34x57": {"001_001_001_002", " 001_002_022_003"},
"4x40": {"001_002_022_003"},
"5x61": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x64": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"5x65": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x67": {"001_001_003_008"},
"5x68": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x69": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5x71": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5x72": {"001_001_003_008", " 001_006_001_002", " 001_009_001_001"},
"5x74": {"001_001_003_008"},
"6x23": {"001_002_022_003"},
"6x24": {"001_009_001_001"},
"6x25": {"001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"6x26": {"001_002_022_003", " 001_009_001_001"},
"6x28": {"001_009_001_001"},
"6x29": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x32": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x33": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x36": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"6x37": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"7x56": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"8x47": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"8x48": {"001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"8x49": {"001_006_001_001", " 001_006_001_002"},
"8x51": {"001_006_001_002"},
"8x56": {"001_006_001_001"}}
# TODO: replace with automatically generated sets from X-rays stored in database
# Class A Arrestin X-ray contacts
arrestin_labels = {"12x49": {"001_009_001_001"},
"2x37": {"001_009_001_001"},
"2x38": {"001_009_001_001"},
"2x39": {"001_009_001_001"},
"2x40": {"001_009_001_001"},
"2x43": {"001_009_001_001"},
"3x50": {"001_009_001_001"},
"3x54": {"001_009_001_001"},
"3x55": {"001_009_001_001"},
"3x56": {"001_009_001_001"},
"34x50": {"001_009_001_001"},
"34x51": {"001_009_001_001"},
"34x53": {"001_009_001_001"},
"34x54": {"001_009_001_001"},
"34x55": {"001_009_001_001"},
"34x56": {"001_009_001_001"},
"4x38": {"001_009_001_001"},
"5x61": {"001_009_001_001"},
"5x64": {"001_009_001_001"},
"5x68": {"001_009_001_001"},
"5x69": {"001_009_001_001"},
"5x71": {"001_009_001_001"},
"5x72": {"001_009_001_001"},
"6x24": {"001_009_001_001"},
"6x25": {"001_009_001_001"},
"6x26": {"001_009_001_001"},
"6x28": {"001_009_001_001"},
"6x29": {"001_009_001_001"},
"6x32": {"001_009_001_001"},
"6x33": {"001_009_001_001"},
"6x36": {"001_009_001_001"},
"6x37": {"001_009_001_001"},
"6x40": {"001_009_001_001"},
"8x47": {"001_009_001_001"},
"8x48": {"001_009_001_001"},
"8x49": {"001_009_001_001"},
"8x50": {"001_009_001_001"}}
# Positions in center of membrane selected using 4BVN (ADRB1) together with OPM membrane positioning
# Reference: ['1x44', '2x52', '3x36', '4x54', '5x46', '6x48', '7x43']
mid_membrane_classA = {'TM1': 44,'TM2': 52,'TM3': 36,'TM4': 54,'TM5': 46, 'TM6': 48, 'TM7': 43}
# NOTE: We might need to split this into B1 and B2 when adhesion X-rays are published
# Positions in center of membrane selected using 5XEZ (GCGR) together with OPM membrane positioning
# Reference: ['1x51', '2x58', '3x41', '4x54', '5x45', '6x49', '7x50']
mid_membrane_classB = {'TM1': 51,'TM2': 58,'TM3': 41,'TM4': 54,'TM5': 45, 'TM6': 49, 'TM7': 50}
# Positions in center of membrane selected using 4OR2 (mGLUR1) together with OPM membrane positioning
# Reference: ['1x49', '2x48', '3x40', '4x41', '5x48', '6x48', '7.39x40']
mid_membrane_classC = {'TM1': 49,'TM2': 48,'TM3': 40,'TM4': 41,'TM5': 48, 'TM6': 48, 'TM7': 40}
# Positions in center of membrane selected using 6BD4 (FZD4) together with OPM membrane positioning
# Reference: ['1x43', '2x53', '3x38', '4x53', '5x53', '6x43', '7x47']
mid_membrane_classF = {'TM1': 43,'TM2': 53,'TM3': 38,'TM4': 53,'TM5': 53, 'TM6': 43, 'TM7': 47}
# Positions within membrane layer selected using 4BVN together with OPM membrane positioning
core_membrane_classA = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
# TODO: other classes
core_membrane_classB = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
core_membrane_classC = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
core_membrane_classF = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
# Residue oriented outward of bundle (based on inactive 4BVN and active 3SN6)
outward_orientation = {
'TM1' : [29, 30, 33, 34, 36, 37, 38, 40, 41, 44, 45, 48, 51, 52, 54, 55, 58],
'TM2' : [38, 41, 45, 48, 52, 55, 56, 58, 59, 60, 62, 63, 66],
'TM3' : [23, 24, 27, 31, 48, 51, 52, 55],
'TM4' : [40, 41, 43, 44, 47, 48, 50, 51, 52, 54, 55, 58, 59, 62, 63, 81],
'TM5' : [36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 52, 53, 55, 56, 57, 59, 60, 62, 63, 64, 66, 67, 68, 70, 71, 73, 74],
'TM6' : [25, 28, 29, 31, 32, 34, 35, 38, 39, 42, 43, 45, 46, 49, 50, 53, 54, 56, 57, 60],
'TM7' : [33, 34, 35, 37, 40, 41, 43, 44, 48, 51, 52, 54, 55]
}
########
# prepare context for output
context = {"signatures" : []}
index = 0
for h, segment in enumerate(rfb_panel["signatures"]["gs_positions"]["gpcrdba"]):
segment_first = True
for i, position in enumerate(rfb_panel["signatures"]["gs_positions"]["gpcrdba"][segment]):
if len(position) <= 5:
# To filter segment headers with non-GN numbering
if segment_first:
context["signatures"].append({"position" : segment})
index += 1
segment_first = False
# Add data
context["signatures"].append({})
context["signatures"][index]["segment"] = segment
context["signatures"][index]["sort"] = index
context["signatures"][index]["position"] = position
# Normalized position in TM
partial_position = int(position.split('x')[1][:2])
# RESIDUE PLACEMENT
context["signatures"][index]["membane_placement"] = "-"
context["signatures"][index]["membane_segment"] = "Extracellular"
context["signatures"][index]["residue_orientation"] = "-"
if segment in mid_membrane_classA: # TM helix
# parse position
context["signatures"][index]["membane_placement"] = partial_position - mid_membrane_classA[segment]
# negative is toward cytoplasm
if segment in ['TM1', 'TM3', 'TM5', 'TM7']: # downwards
context["signatures"][index]["membane_placement"] = -1 * context["signatures"][index]["membane_placement"]
# Segment selection
if partial_position >= core_membrane_classA[segment][0] and partial_position <= core_membrane_classA[segment][1]:
context["signatures"][index]["membane_segment"] = "Membrane"
elif segment in ['TM1', 'TM3', 'TM5', 'TM7']:
if partial_position > core_membrane_classA[segment][1]:
context["signatures"][index]["membane_segment"] = "Intracellular"
else:
if partial_position < core_membrane_classA[segment][0]:
context["signatures"][index]["membane_segment"] = "Intracellular"
# Orientation
if partial_position in outward_orientation[segment]:
context["signatures"][index]["residue_orientation"] = "Outward"
else:
if partial_position > min(outward_orientation[segment]) and partial_position < max(outward_orientation[segment]):
context["signatures"][index]["residue_orientation"] = "Inward"
# Intracellular segments
elif segment in ['ICL1', 'ICL2', 'ICL3', 'TM8', 'C-term']:
context["signatures"][index]["membane_segment"] = "Intracellular"
# COUNTS: all db results in a singe loop
for key in ["ligand_binding", "natural_mutations", "thermo_mutations", "ligand_mutations", "basal_mutations", "intrasegment_contacts", "phos", "palm", "glyc", "ubiq" ]: # Add in future "gprotein_interface", "arrestin_interface"
context["signatures"][index][key] = 0
if position in rfb_panel[key]:
context["signatures"][index][key] = rfb_panel[key][position]
# G-protein interface
context["signatures"][index]["gprotein_interface"] = 0
if position in gprotein_labels:
context["signatures"][index]["gprotein_interface"] = len(gprotein_labels[position])
# Arrestin interface
context["signatures"][index]["arrestin_interface"] = 0
if position in arrestin_labels:
context["signatures"][index]["arrestin_interface"] = len(arrestin_labels[position])
# BINARY
# Microswitch
context["signatures"][index]["microswitch"] = position in ms_labels
# Sodium pocket
context["signatures"][index]["sodium"] = position in sp_labels
# Rotamer switch
context["signatures"][index]["rotamer_switch"] = position in rotamer_labels
# contacts
context["signatures"][index]["active_contacts"] = 0
if position in rfb_panel["active_contacts"]:
if position in rfb_panel["inactive_contacts"]:
context["signatures"][index]["active_contacts"] = len(rfb_panel["active_contacts"][position].difference(rfb_panel["inactive_contacts"][position]))
else:
context["signatures"][index]["active_contacts"] = len(rfb_panel["active_contacts"][position])
context["signatures"][index]["inactive_contacts"] = 0
if position in rfb_panel["inactive_contacts"]:
if position in rfb_panel["active_contacts"]:
context["signatures"][index]["inactive_contacts"] = len(rfb_panel["inactive_contacts"][position].difference(rfb_panel["active_contacts"][position]))
else:
context["signatures"][index]["inactive_contacts"] = len(rfb_panel["inactive_contacts"][position])
# CLASS A sequence + property consensus
if position in rfb_panel["class_a_positions"]["gpcrdba"][segment]:
ca_index = list(rfb_panel["class_a_positions"]["gpcrdba"][segment]).index(position)
# Sequence consensus
context["signatures"][index]["class_a_aa"] = rfb_panel["class_a_aa"][segment][position][0]
context["signatures"][index]["class_a_aa_name"] = FULL_AMINO_ACIDS[rfb_panel["class_a_aa"][segment][position][0]]
if context["signatures"][index]["class_a_aa"] == '+':
context["signatures"][index]["class_a_aa_name"] += ": "+rfb_panel["class_a_aa"][segment][position][3]
context["signatures"][index]["class_a_aa_cons"] = rfb_panel["class_a_aa"][segment][position][2]
# Property consensus
context["signatures"][index]["class_a_symb"] = rfb_panel["class_a_prop"][segment][i][0]
context["signatures"][index]["class_a_prop"] = rfb_panel["class_a_prop"][segment][i][1]
context["signatures"][index]["class_a_prop_cons"] = rfb_panel["class_a_prop"][segment][i][2]
# SEQUENCE SIGNATURES
for signature_type in ["cah", "cal", "gs", "gio", "gq", "g12"]:
if position in rfb_panel["signatures"][signature_type + "_positions"]["gpcrdba"][segment]:
ca_index = list(rfb_panel["signatures"][signature_type + "_positions"]["gpcrdba"][segment]).index(position)
context["signatures"][index][signature_type + "_score"] = rfb_panel["signatures"][signature_type][segment][ca_index][2]
context["signatures"][index][signature_type + "_prop"] = rfb_panel["signatures"][signature_type][segment][ca_index][1]
context["signatures"][index][signature_type + "_symb"] = rfb_panel["signatures"][signature_type][segment][ca_index][0]
index += 1
# Human Class A alignment - consensus/conservation
return context
|
apache-2.0
|
Zerknechterer/pyload
|
module/lib/simplejson/scanner.py
|
674
|
2560
|
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
|
gpl-3.0
|
opencloudinfra/orchestrator
|
venv/Lib/site-packages/django/db/backends/sqlite3/features.py
|
194
|
2656
|
from __future__ import unicode_literals
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils import six
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
six.PY3 and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
|
gpl-3.0
|
puzan/ansible
|
lib/ansible/modules/windows/win_regedit.py
|
25
|
4763
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Keech <[email protected]>, Josh Ludwig <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_regedit
version_added: "2.0"
short_description: Add, change, or remove registry keys and values
description:
- Add, modify or remove registry keys and values.
- More information about the windows registry from Wikipedia (https://en.wikipedia.org/wiki/Windows_Registry).
options:
path:
description:
- Name of registry path.
- 'Should be in one of the following registry hives: HKCC, HKCR, HKCU, HKLM, HKU.'
required: true
aliases: [ key ]
name:
description:
- Name of registry entry in C(path).
- This is an entry in the above C(key) parameter.
- If not provided, or empty we use the default name '(default)'
aliases: [ entry ]
data:
description:
- Value of the registry entry C(name) in C(path).
- Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional.
type:
description:
- Registry value data type.
choices:
- binary
- dword
- expandstring
- multistring
- string
- qword
default: string
aliases: [ datatype ]
state:
description:
- State of registry entry.
choices:
- present
- absent
default: present
notes:
- Check-mode C(-C/--check) and diff output (-D/--diff) are supported, so that you can test every change against the active configuration before applying changes.
- Beware that some registry hives (HKEY_USERS in particular) do not allow to create new registry paths.
author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)"
'''
EXAMPLES = r'''
- name: Create registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: world
- name: Add or update registry path MyCompany, with entry 'hello', and containing 1337
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 1337
type: dword
- name: Add or update registry path MyCompany, with entry 'hello', and containing binary data in hex-string format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
type: binary
- name: Add or update registry path MyCompany, with entry 'hello', and containing binary data in yaml format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
type: binary
- name: Disable keyboard layout hotkey for all users (changes existing)
win_regedit:
path: HKU:\.DEFAULT\Keyboard Layout\Toggle
name: Layout Hotkey
data: 3
type: dword
- name: Disable language hotkey for current users (adds new)
win_regedit:
path: HKCU:\Keyboard Layout\Toggle
name: Language Hotkey
data: 3
type: dword
- name: Remove registry path MyCompany (including all entries it contains)
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
- name: Remove entry 'hello' from registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
state: absent
'''
RETURN = r'''
data_changed:
description: whether this invocation changed the data in the registry value
returned: success
type: boolean
sample: False
data_type_changed:
description: whether this invocation changed the datatype of the registry value
returned: success
type: boolean
sample: True
'''
|
gpl-3.0
|
gurneyalex/OpenUpgrade
|
addons/l10n_ve/__init__.py
|
975
|
1058
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
swprojects/PyRoboToolTimer
|
wxsw/dialogs/filedialog.py
|
2
|
2182
|
import wx
class CustomFileDialog(wx.FileDialog):
# class CustomSpinCtrl(wx.Window):
"""
A custom class that replicates some of the functionalities of wx.SpinCtrl
"""
def __init__(self,
parent,
valueStr=0,
message=wx.FileSelectorPromptStr):
wx.FileDialog.__init__(self,
parent,
message,
defaultDir="",
defaultFile="",
wildcard=wx.FileSelectorDefaultWildcardStr,
style=wx.FD_DEFAULT_STYLE|wx.FD_FILE_MUST_EXIST,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
name=wx.FileDialogNameStr)
# self._valueStr = 0
self.SetValueString(valueStr)
# self.ShowModal()
def ShowModal(self):
super(CustomFileDialog, self).ShowModal()
return self.GetValue()
def SetValueString(self, value=0):
""" choose the GetValue parser return value """
valueStr = {0: ["fullpath"],
1: ["file","path"],
3: ["path","file"],
4: ["file"]}
self._valueStr = valueStr[value]
def SetFocus(self):
# super(CustomFileDialog, self).SetFocus()
pass
def GetEditorType():
return "dialog"
def GetEditorName():
return "CustomFileDialog"
def GetValue(self):
"""
Returns the time h,m,s joined by separator
"""
file = self.GetFilename()
fullpath = self.GetPath()
path = fullpath[:fullpath.index(file)]
filedict = {"file":file,
"fullpath":fullpath,
"path":path,}
value = []
for s in self._valueStr:
value.append(filedict[s])
value = ",".join(value)
return value
|
gpl-3.0
|
rfguri/vimfiles
|
bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/waitress/waitress/tests/test_server.py
|
31
|
10201
|
import errno
import socket
import unittest
class TestWSGIServer(unittest.TestCase):
def _makeOne(self, application, host='127.0.0.1', port=0,
_dispatcher=None, adj=None, map=None, _start=True,
_sock=None, _server=None):
from waitress.server import create_server
return create_server(
application,
host=host,
port=port,
map=map,
_dispatcher=_dispatcher,
_start=_start,
_sock=_sock)
def _makeOneWithMap(self, adj=None, _start=True, host='127.0.0.1',
port=0, app=None):
sock = DummySock()
task_dispatcher = DummyTaskDispatcher()
map = {}
return self._makeOne(
app,
host=host,
port=port,
map=map,
_sock=sock,
_dispatcher=task_dispatcher,
_start=_start,
)
def test_ctor_start_true(self):
inst = self._makeOneWithMap(_start=True)
self.assertEqual(inst.accepting, True)
self.assertEqual(inst.socket.listened, 1024)
def test_ctor_makes_dispatcher(self):
inst = self._makeOne(None, _start=False, map={})
self.assertEqual(inst.task_dispatcher.__class__.__name__,
'ThreadedTaskDispatcher')
def test_ctor_start_false(self):
inst = self._makeOneWithMap(_start=False)
self.assertEqual(inst.accepting, False)
def test_get_server_name_empty(self):
inst = self._makeOneWithMap(_start=False)
result = inst.get_server_name('')
self.assertTrue(result)
def test_get_server_name_with_ip(self):
inst = self._makeOneWithMap(_start=False)
result = inst.get_server_name('127.0.0.1')
self.assertTrue(result)
def test_get_server_name_with_hostname(self):
inst = self._makeOneWithMap(_start=False)
result = inst.get_server_name('fred.flintstone.com')
self.assertEqual(result, 'fred.flintstone.com')
def test_get_server_name_0000(self):
inst = self._makeOneWithMap(_start=False)
result = inst.get_server_name('0.0.0.0')
self.assertEqual(result, 'localhost')
def test_run(self):
inst = self._makeOneWithMap(_start=False)
inst.asyncore = DummyAsyncore()
inst.task_dispatcher = DummyTaskDispatcher()
inst.run()
self.assertTrue(inst.task_dispatcher.was_shutdown)
def test_pull_trigger(self):
inst = self._makeOneWithMap(_start=False)
inst.trigger = DummyTrigger()
inst.pull_trigger()
self.assertEqual(inst.trigger.pulled, True)
def test_add_task(self):
task = DummyTask()
inst = self._makeOneWithMap()
inst.add_task(task)
self.assertEqual(inst.task_dispatcher.tasks, [task])
self.assertFalse(task.serviced)
def test_readable_not_accepting(self):
inst = self._makeOneWithMap()
inst.accepting = False
self.assertFalse(inst.readable())
def test_readable_maplen_gt_connection_limit(self):
inst = self._makeOneWithMap()
inst.accepting = True
inst.adj = DummyAdj
inst._map = {'a': 1, 'b': 2}
self.assertFalse(inst.readable())
def test_readable_maplen_lt_connection_limit(self):
inst = self._makeOneWithMap()
inst.accepting = True
inst.adj = DummyAdj
inst._map = {}
self.assertTrue(inst.readable())
def test_readable_maintenance_false(self):
import time
inst = self._makeOneWithMap()
then = time.time() + 1000
inst.next_channel_cleanup = then
L = []
inst.maintenance = lambda t: L.append(t)
inst.readable()
self.assertEqual(L, [])
self.assertEqual(inst.next_channel_cleanup, then)
def test_readable_maintenance_true(self):
inst = self._makeOneWithMap()
inst.next_channel_cleanup = 0
L = []
inst.maintenance = lambda t: L.append(t)
inst.readable()
self.assertEqual(len(L), 1)
self.assertNotEqual(inst.next_channel_cleanup, 0)
def test_writable(self):
inst = self._makeOneWithMap()
self.assertFalse(inst.writable())
def test_handle_read(self):
inst = self._makeOneWithMap()
self.assertEqual(inst.handle_read(), None)
def test_handle_connect(self):
inst = self._makeOneWithMap()
self.assertEqual(inst.handle_connect(), None)
def test_handle_accept_wouldblock_socket_error(self):
inst = self._makeOneWithMap()
ewouldblock = socket.error(errno.EWOULDBLOCK)
inst.socket = DummySock(toraise=ewouldblock)
inst.handle_accept()
self.assertEqual(inst.socket.accepted, False)
def test_handle_accept_other_socket_error(self):
inst = self._makeOneWithMap()
eaborted = socket.error(errno.ECONNABORTED)
inst.socket = DummySock(toraise=eaborted)
inst.adj = DummyAdj
def foo():
raise socket.error
inst.accept = foo
inst.logger = DummyLogger()
inst.handle_accept()
self.assertEqual(inst.socket.accepted, False)
self.assertEqual(len(inst.logger.logged), 1)
def test_handle_accept_noerror(self):
inst = self._makeOneWithMap()
innersock = DummySock()
inst.socket = DummySock(acceptresult=(innersock, None))
inst.adj = DummyAdj
L = []
inst.channel_class = lambda *arg, **kw: L.append(arg)
inst.handle_accept()
self.assertEqual(inst.socket.accepted, True)
self.assertEqual(innersock.opts, [('level', 'optname', 'value')])
self.assertEqual(L, [(inst, innersock, None, inst.adj)])
def test_maintenance(self):
inst = self._makeOneWithMap()
class DummyChannel(object):
requests = []
zombie = DummyChannel()
zombie.last_activity = 0
zombie.running_tasks = False
inst.active_channels[100] = zombie
inst.maintenance(10000)
self.assertEqual(zombie.will_close, True)
def test_backward_compatibility(self):
from waitress.server import WSGIServer, TcpWSGIServer
from waitress.adjustments import Adjustments
self.assertTrue(WSGIServer is TcpWSGIServer)
inst = WSGIServer(None, _start=False, port=1234)
# Ensure the adjustment was actually applied.
self.assertNotEqual(Adjustments.port, 1234)
self.assertEqual(inst.adj.port, 1234)
if hasattr(socket, 'AF_UNIX'):
class TestUnixWSGIServer(unittest.TestCase):
unix_socket = '/tmp/waitress.test.sock'
def _makeOne(self, _start=True, _sock=None):
from waitress.server import create_server
return create_server(
None,
map={},
_start=_start,
_sock=_sock,
_dispatcher=DummyTaskDispatcher(),
unix_socket=self.unix_socket,
unix_socket_perms='600'
)
def _makeDummy(self, *args, **kwargs):
sock = DummySock(*args, **kwargs)
sock.family = socket.AF_UNIX
return sock
def test_unix(self):
inst = self._makeOne(_start=False)
self.assertEqual(inst.socket.family, socket.AF_UNIX)
self.assertEqual(inst.socket.getsockname(), self.unix_socket)
def test_handle_accept(self):
# Working on the assumption that we only have to test the happy path
# for Unix domain sockets as the other paths should've been covered
# by inet sockets.
client = self._makeDummy()
listen = self._makeDummy(acceptresult=(client, None))
inst = self._makeOne(_sock=listen)
self.assertEqual(inst.accepting, True)
self.assertEqual(inst.socket.listened, 1024)
L = []
inst.channel_class = lambda *arg, **kw: L.append(arg)
inst.handle_accept()
self.assertEqual(inst.socket.accepted, True)
self.assertEqual(client.opts, [])
self.assertEqual(
L,
[(inst, client, ('localhost', None), inst.adj)]
)
class DummySock(object):
accepted = False
blocking = False
family = socket.AF_INET
def __init__(self, toraise=None, acceptresult=(None, None)):
self.toraise = toraise
self.acceptresult = acceptresult
self.bound = None
self.opts = []
def bind(self, addr):
self.bound = addr
def accept(self):
if self.toraise:
raise self.toraise
self.accepted = True
return self.acceptresult
def setblocking(self, x):
self.blocking = True
def fileno(self):
return 10
def getpeername(self):
return '127.0.0.1'
def setsockopt(self, *arg):
self.opts.append(arg)
def getsockopt(self, *arg):
return 1
def listen(self, num):
self.listened = num
def getsockname(self):
return self.bound
class DummyTaskDispatcher(object):
def __init__(self):
self.tasks = []
def add_task(self, task):
self.tasks.append(task)
def shutdown(self):
self.was_shutdown = True
class DummyTask(object):
serviced = False
start_response_called = False
wrote_header = False
status = '200 OK'
def __init__(self):
self.response_headers = {}
self.written = ''
def service(self): # pragma: no cover
self.serviced = True
class DummyAdj:
connection_limit = 1
log_socket_errors = True
socket_options = [('level', 'optname', 'value')]
cleanup_interval = 900
channel_timeout = 300
class DummyAsyncore(object):
def loop(self, timeout=30.0, use_poll=False, map=None, count=None):
raise SystemExit
class DummyTrigger(object):
def pull_trigger(self):
self.pulled = True
class DummyLogger(object):
def __init__(self):
self.logged = []
def warning(self, msg, **kw):
self.logged.append(msg)
|
mit
|
ktan2020/legacy-automation
|
win/Lib/test/test_sha.py
|
13
|
1759
|
# Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assertTrue(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assertTrue(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assertTrue(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
|
mit
|
40423234/2017springcd_hw
|
plugin/summary/summary.py
|
317
|
2852
|
"""
Summary
-------
This plugin allows easy, variable length summaries directly embedded into the
body of your articles.
"""
from __future__ import unicode_literals
from pelican import signals
from pelican.generators import ArticlesGenerator, StaticGenerator, PagesGenerator
def initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
DEFAULT_CONFIG.setdefault('SUMMARY_BEGIN_MARKER',
'<!-- PELICAN_BEGIN_SUMMARY -->')
DEFAULT_CONFIG.setdefault('SUMMARY_END_MARKER',
'<!-- PELICAN_END_SUMMARY -->')
if pelican:
pelican.settings.setdefault('SUMMARY_BEGIN_MARKER',
'<!-- PELICAN_BEGIN_SUMMARY -->')
pelican.settings.setdefault('SUMMARY_END_MARKER',
'<!-- PELICAN_END_SUMMARY -->')
def extract_summary(instance):
# if summary is already specified, use it
# if there is no content, there's nothing to do
if hasattr(instance, '_summary'):
instance.has_summary = True
return
if not instance._content:
instance.has_summary = False
return
begin_marker = instance.settings['SUMMARY_BEGIN_MARKER']
end_marker = instance.settings['SUMMARY_END_MARKER']
content = instance._content
begin_summary = -1
end_summary = -1
if begin_marker:
begin_summary = content.find(begin_marker)
if end_marker:
end_summary = content.find(end_marker)
if begin_summary == -1 and end_summary == -1:
instance.has_summary = False
return
# skip over the begin marker, if present
if begin_summary == -1:
begin_summary = 0
else:
begin_summary = begin_summary + len(begin_marker)
if end_summary == -1:
end_summary = None
summary = content[begin_summary:end_summary]
# remove the markers from the content
if begin_summary:
content = content.replace(begin_marker, '', 1)
if end_summary:
content = content.replace(end_marker, '', 1)
instance._content = content
instance._summary = summary
instance.has_summary = True
def run_plugin(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in generator.articles:
extract_summary(article)
elif isinstance(generator, PagesGenerator):
for page in generator.pages:
extract_summary(page)
def register():
signals.initialized.connect(initialized)
try:
signals.all_generators_finalized.connect(run_plugin)
except AttributeError:
# NOTE: This results in #314 so shouldn't really be relied on
# https://github.com/getpelican/pelican-plugins/issues/314
signals.content_object_init.connect(extract_summary)
|
agpl-3.0
|
henrytao-me/openerp.positionq
|
openerp/addons/l10n_be_invoice_bba/invoice.py
|
43
|
12622
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context={})['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(7, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your OpenERP support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba':
if vals.has_key('reference'):
bbacomm = vals['reference']
else:
bbacomm = inv.reference or ''
if self.check_bbacomm(bbacomm):
reference = re.sub('\D', '', bbacomm)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
phillipbonhomme/cmake.nvim
|
rplugin/python3/cmake/__init__.py
|
1
|
2610
|
import neovim
import json
from ..rtags import rtags
@neovim.plugin
class CMakeRTagsProject(object):
def __init__(self, vim):
self.vim = vim
if self.vim.vars.get("loaded_fzf") == 1:
self.selectionUI = "fzf"
else:
self.selectionUI = "location-list"
self.plugin_cmd_info = {
"chromatica": "ChromaticaStart",
"deoplete": "call deoplete#enable()"
}
self.util = rtags.CMakeRTagsPlugin()
def fzf(self, source, sink) -> None:
self.asyncCommand("""
call fzf#run(fzf#wrap({{
'source': {},
'sink': function('{}')
}}))
""".replace("\n", "").format(json.dumps(source), sink))
self.nvim.async_call(self.nvim.feedkeys, "i")
@neovim.function('fzf_rtags_source')
def fzf_rtags_source(self, args):
retVal = []
cmd = []
if str(args).find("goto"):
cursor = self.vim.command('getpos(\'.\')')
cmd = self.util.cmake_cmd_info["rtags_goto"]
cmd.extend(
[self.vim.command('expand(\'%:p\')') + cursor[1] + cursor[2]])
elif str(args).find("ref"):
cursor = self.vim.command('expand("<cword>")')
cmd = self.util.cmake_cmd_info["rtags_ref"]
cmd.extend([cursor])
elif str(args).find("sym"):
cmd = self.util.cmake_cmd_info["rtags_sym"]
else:
return None
retVal = self.util.rtags_tagrun(cmd)
return retVal
@neovim.command('CMakeProjectSetup', sync=False)
def run_cmake_setup_rtags(self):
self.util.removeOldCMakeFiles()
if self.util.cmake_build_info["build_dir"].is_dir():
self.util.removeDirtyDir()
if self.util.cmake_build_info["cmake_proj"].is_file():
self.vim.command('echo "Starting CMake Project"')
self.util.run_cmake()
self.util.setup_rtags_daemon()
self.util.connect_rtags_client()
for plugin, cmd in self.plugin_cmd_info.items():
self.vim.command(cmd)
else:
self.vim.command('echo "Not a CMake Project"')
@neovim.command('CMakeProjectTeardown', sync=False)
def run_cmake_teardown_rtags(self):
self.util.shutdown_rtags_daemon()
@neovim.command('CMakeProjectSetFile', nargs='1', sync=True)
def run_rtags_set_file(self, arg):
self.util.rtags_set_file(arg)
@neovim.command('CMakeProjectUpdateBuffers', sync=False)
def run_update_rtags_buffers(self):
buffers = self.vim.buffers
self.util.update_rtags_buffers(buffers)
|
unlicense
|
amimof/ansible-websphere
|
library/ibmim.py
|
1
|
11683
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Amir Mofasser <[email protected]> (@amimof)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibmim
short_description: Manage IBM Installation Manager packages
description:
- This module can Install, Uninstall and Update IBM Installation Manager packages on a supported Linux distribution.
- This module relies on 'imcl', the binary command line installed by the IM installer. You may use this module to install Installation Manager itself.
version_added: "1.9.4"
author: Amir Mofasser (@github)
requirements:
- IBM Installation Manager
- Installation files on remote server or local directory
options:
id:
description: The ID of the package which is to be installed
aliases:
- name
ibmim:
default: /opt/IBM/InstallationManager
description: Path to installation directory of Installation Manager
dest:
description: Path to destination installation directory
im_shared:
description: Path to Installation Manager shared resources folder
repositories:
description: A list of repositories to use. May be a path, URL or both.
type: list
aliases:
- repos
preferences:
type: list
description: Specify a preference value or a comma-delimited list of preference values to be used
properties:
type: list
description: Specify a preference value or a comma-delimited list of properties values to be used
state:
choices:
- present
- absent
- latest
default: present
description: Install a package with 'present'. Uninstall a package with 'absent'. Update all packages with 'latest'.
install_fixes:
choices:
- none
- recommended
- all
default: none
description: Install fixes if available in the repositories.
connect_passport_advantage:
default: false
type: bool
description: Append the PassportAdvantage repository to the repository list
log:
description: Specify a log file that records the result of Installation Manager operations.
'''
EXAMPLES = '''
---
- name: Install WebSphere Application Server Liberty v8.5
ibmim:
name: com.ibm.websphere.liberty.v85
repositories:
- http://was-repos/
- name: Uninstall WebSphere Application Server Liberty v8.5
ibmim:
name: com.ibm.websphere.liberty.v85
state: absent
- name: Update all packages
ibmim:
state: latest
repositories:
- http://was-repos/
'''
import os
import subprocess
import platform
import datetime
import shutil
import re
from ansible.module_utils.basic import AnsibleModule
class InstallationManager():
module = None
module_facts = dict(
installed = False,
version = None,
id = None,
path = None,
name = None,
stdout = None,
stderr = None
)
def __init__(self):
# Read arguments
self.module = AnsibleModule(
argument_spec = dict(
# install/uninstall/updateAll
state = dict(default='present', choices=['present', 'absent', 'latest']),
# /opt/IBM/InstallationManager
ibmim = dict(default='/opt/IBM/InstallationManager'),
# Package ID
id = dict(required=False, aliases=['name']),
# -installationDirectory
dest = dict(required=False),
# -sharedResourcesDirectory
im_shared = dict(required=False),
# -repositories
repositories = dict(required=False, type='list', aliases=['repos']),
# -properties
preferences = dict(required=False, type='list'),
# -properties
properties = dict(required=False, type='list'),
# -connectPassportAdvantage
connect_passport_advantage = dict(default=False, type='bool'),
# -installFixes
install_fixes = dict(default='none', choices=['none', 'recommended', 'all']),
# -log
log = dict(required=False)
),
supports_check_mode = True
)
def getItem(self, key):
"""
Returns an item at key from the global dict module_facts
"""
return self.module_facts[key]
def isProvisioned(self, dest, packageId):
"""
Checks if package is already installed at dest
:param dest: Destination installation directory of the product
:return: True if already provisioned. False if not provisioned
"""
# If destination dir does not exists then its safe to assume that IM is not installed
if dest:
if not os.path.exists(dest):
return False
return self.getVersion(packageId)["installed"]
def getVersion(self, packageId):
child = subprocess.Popen(
["{0}/eclipse/tools/imcl "
" listInstalledPackages "
" -long".format(self.module.params['ibmim'])],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout_value, stderr_value = child.communicate()
# Store stdout and stderr
self.module_facts["stdout"] = stdout_value
self.module_facts["stderr"] = stderr_value
if child.returncode != 0:
self.module.fail_json(
msg="Error getting installed version of package '{0}'".format(packageId),
stdout=stdout_value
)
for line in stdout_value.split(os.linesep):
if packageId in line:
linesplit = line.split(" : ")
self.module_facts["installed"] = True
self.module_facts["path"] = linesplit[0]
self.module_facts["id"] = linesplit[1]
self.module_facts["name"] = linesplit[2]
self.module_facts["version"] = linesplit[3]
break
return self.module_facts
def install(self, module_params):
# Check mode on
if self.module.check_mode:
self.module.exit_json(msg="Package '{0}' is to be installed".format(module_params['id']))
# Check wether package is already installed
if self.isProvisioned(module_params['dest'], module_params['id']):
self.module.exit_json(changed=False, msg="Package '{0}' is already installed".format(module_params['id']), ansible_facts=self.module_facts)
# Check if one of repositories and connectPassportAdvantage is provided
if not module_params['repositories'] and not module_params['connect_passport_advantage']:
self.module.fail_json(msg="One or more repositories are required when installing packages")
cmd = ("{0}/eclipse/tools/imcl install {1} "
"-repositories {2} "
"-acceptLicense "
"-stopBlockingProcesses ").format(module_params['ibmim'], module_params['id'], ",".join(module_params['repositories']))
if module_params['dest']:
cmd = "{0} -installationDirectory {1} ".format(cmd, module_params['dest'])
if module_params['im_shared']:
cmd = "{0} -sharedResourcesDirectory {1} ".format(cmd, module_params['im_shared'])
if module_params['properties']:
cmd = "{0} -properties {1} ".format(cmd, ",".join(module_params['properties']))
if module_params['preferences']:
cmd = "{0} -preferences {1} ".format(cmd, ",".join(module_params['preferences']))
if module_params['install_fixes']:
cmd = "{0} -installFixes {1} ".format(cmd, module_params['install_fixes'])
if module_params['connect_passport_advantage']:
cmd = "{0} -connectPassportAdvantage ".format(cmd)
if module_params['log']:
cmd = "{0} -log {1} ".format(cmd, module_params['log'])
child = subprocess.Popen(
[cmd],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
self.module.fail_json(
msg="Failed installing package '{0}'".format(module_params['id']),
stdout=stdout_value,
stderr=stderr_value
)
# After install, get versionInfo so that we can show it to the user
self.getVersion(module_params['id'])
self.module.exit_json(changed=True, msg="Package '{0}' installed".format(module_params['id']), ansible_facts=self.module_facts)
def uninstall(self, module_params):
# CHeck mode on
if self.module.check_mode:
self.module.exit_json(changed=False, msg="Package '{0}' is to be uninstalled".format(module_params['id']), ansible_facts=self.module_facts)
# Check wether package is installed
if not self.isProvisioned(module_params['dest'], module_params['id']):
self.module.exit_json(changed=False, msg="Package '{0}' is not installed".format(module_params['id']), ansible_facts=self.module_facts)
cmd = "{0}/eclipse/tools/imcl uninstall {1} ".format(module_params['ibmim'], module_params['id'])
if module_params['dest']:
cmd = "{0} -installationDirectory {1} ".format(cmd, module_params['dest'])
if module_params['preferences']:
cmd = "{0} -preferences {1} ".format(cmd, ",".join(module_params['preferences']))
if module_params['properties']:
cmd = "{0} -properties {1} ".format(cmd, ",".join(module_params['properties']))
if module_params['log']:
cmd = "{0} -log {1} ".format(cmd, module_params['log'])
child = subprocess.Popen(
[cmd],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
self.module.fail_json(msg="Failed uninstalling package '{0}'".format(module_params['id']))
# Remove AppServer dir forcefully so that it doesn't prevents us from reinstalling.
shutil.rmtree(module_params['dest'], ignore_errors=False, onerror=None)
self.module.exit_json(changed=True, msg="Package '{0}' uninstalled".format(module_params['id']), ansible_facts=self.module_facts)
def updateAll(self, module_params):
# Check mode on
if self.module.check_mode:
self.module.exit_json(changed=False, msg="All installed packages are to be updated".format(module_params['id']), ansible_facts=self.module_facts)
# Check if one of repositories and connectPassportAdvantage is provided
if not module_params['repositories'] and not module_params['connect_passport_advantage']:
self.module.fail_json(msg="One or more repositories are required when installing packages")
cmd = ("{0}/eclipse/tools/imcl updateAll "
"-acceptLicense -repositories {1}").format(module_params['ibmim'], ",".join(module_params['repositories']))
if module_params['preferences']:
cmd = "{0} -preferences {1} ".format(cmd, ",".join(module_params['preferences']))
if module_params['properties']:
cmd = "{0} -properties {1} ".format(cmd, ",".join(module_params['properties']))
if module_params['connect_passport_advantage']:
cmd = "{0} -connectPassportAdvantage ".format(cmd)
if module_params['install_fixes']:
cmd = "{0} -installFixes {1} ".format(cmd, module_params['install_fixes'])
if module_params['log']:
cmd = "{0} -log {1} ".format(cmd, module_params['log'])
child = subprocess.Popen(
[cmd],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
self.module.fail_json(msg="Failed updating packages", stdout=stdout_value, stderr=stderr_value)
self.module.exit_json(changed=True, msg="All packages updated", ansible_facts=self.module_facts)
def main(self):
# Check if paths are valid
if not os.path.exists("{0}/eclipse".format(self.module.params['ibmim'])):
self.module.fail_json(
msg="IBM Installation Manager is not installed. Install it and try again.")
# Install
if self.module.params['state'] == 'present':
self.install(self.module.params)
# Uninstall
if self.module.params['state'] == 'absent':
self.uninstall(self.module.params)
# Update everything
if self.module.params['state'] == 'latest':
self.updateAll(self.module.params)
# import module snippets
if __name__ == '__main__':
im = InstallationManager()
im.main()
|
mit
|
spadae22/odoo
|
addons/sale_journal/__init__.py
|
443
|
1067
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kzlin129/practice-typing
|
lib/python2.7/site-packages/pip/index.py
|
343
|
40403
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
|
apache-2.0
|
SunyataZero/buddhist-well-being-pyqt5
|
bwb/window/date.py
|
1
|
1841
|
from PyQt5 import QtWidgets
from PyQt5 import QtCore
# from PyQt5 import QtGui
import sys
import time
class DateTimeDialog(QtWidgets.QDialog):
"""
Inspiration: Answer by lou here:
https://stackoverflow.com/questions/18196799/how-can-i-show-a-pyqt-modal-dialog-and-get-data-out-of-its-controls-once-its-clo
"""
def __init__(self, i_unix_time_it, i_parent=None):
super(DateTimeDialog, self).__init__(i_parent)
vbox = QtWidgets.QVBoxLayout(self)
self.date_time_edit = QtWidgets.QDateTimeEdit(self)
self.date_time_edit.setCalendarPopup(True)
present_qdatetime = QtCore.QDateTime()
present_qdatetime.setMSecsSinceEpoch(1000 * i_unix_time_it)
self.date_time_edit.setDateTime(present_qdatetime)
vbox.addWidget(self.date_time_edit)
self.button_box = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self
)
vbox.addWidget(self.button_box)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
# -accept and reject are "slots" built into Qt
def get_unix_time(self):
datetime = self.date_time_edit.dateTime()
unix_time_it = datetime.toMSecsSinceEpoch() // 1000
return unix_time_it
@staticmethod
def get_date_time_dialog(i_unix_time_it):
dialog = DateTimeDialog(i_unix_time_it)
dialog_result = dialog.exec_()
unix_time = -1
if dialog_result == QtWidgets.QDialog.Accepted:
unix_time = dialog.get_unix_time()
return unix_time
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
result = DateTimeDialog.get_date_time_dialog(time.time())
sys.exit(app.exec_())
|
gpl-3.0
|
alphapapa/youtube-dl
|
youtube_dl/extractor/theplatform.py
|
54
|
11715
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import time
import hmac
import binascii
import hashlib
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
xpath_with_ns,
unsmuggle_url,
int_or_none,
url_basename,
float_or_none,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
class ThePlatformBaseIE(InfoExtractor):
def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
meta = self._download_xml(smil_url, video_id, note=note)
try:
error_msg = next(
n.attrib['abstract']
for n in meta.findall(_x('.//smil:ref'))
if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
except StopIteration:
pass
else:
raise ExtractorError(error_msg, expected=True)
formats = self._parse_smil_formats(
meta, smil_url, video_id, namespace=default_ns,
# the parameters are from syfy.com, other sites may use others,
# they also work for nbc.com
f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
for _format in formats:
ext = determine_ext(_format['url'])
if ext == 'once':
_format['ext'] = 'mp4'
self._sort_formats(formats)
subtitles = self._parse_smil_subtitles(meta, default_ns)
return formats, subtitles
def get_metadata(self, path, video_id):
info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
info = self._download_json(info_url, video_id)
subtitles = {}
captions = info.get('captions')
if isinstance(captions, list):
for caption in captions:
lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
subtitles[lang] = [{
'ext': 'srt' if mime == 'text/srt' else 'ttml',
'url': src,
}]
return {
'title': info['title'],
'subtitles': subtitles,
'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'],
'duration': int_or_none(info.get('duration'), 1000),
}
class ThePlatformIE(ThePlatformBaseIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
|theplatform:)(?P<id>[^/\?&]+)'''
_TESTS = [{
# from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
'info_dict': {
'id': 'e9I_cZgTgIPd',
'ext': 'flv',
'title': 'Blackberry\'s big, bold Z30',
'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
'duration': 247,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
'info_dict': {
'id': '22d_qsQ6MIRT',
'ext': 'flv',
'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
'title': 'Tesla Model S: A second step towards a cleaner motoring future',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
'info_dict': {
'id': 'yMBg9E8KFxZD',
'ext': 'mp4',
'description': 'md5:644ad9188d655b742f942bf2e06b002d',
'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
}
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
}, {
'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
'md5': '734f3790fb5fc4903da391beeebc4836',
'info_dict': {
'id': 'tdy_or_siri_150701',
'ext': 'mp4',
'title': 'iPhone Siri’s sassy response to a math question has people talking',
'description': 'md5:a565d1deadd5086f3331d57298ec6333',
'duration': 83.0,
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1435752600,
'upload_date': '20150701',
'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"],
},
}]
@staticmethod
def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
flags = '10' if include_qs else '00'
expiration_date = '%x' % (int(time.time()) + life)
def str_to_hex(str):
return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
def hex_to_str(hex):
return binascii.a2b_hex(hex)
relative_path = url.split('http://link.theplatform.com/s/')[1].split('?')[0]
clear_text = hex_to_str(flags + expiration_date + str_to_hex(relative_path))
checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
return '%s&sig=%s' % (url, sig)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
provider_id = mobj.group('provider_id')
video_id = mobj.group('id')
if not provider_id:
provider_id = 'dJ5BDC'
path = provider_id
if mobj.group('media'):
path += '/media'
path += '/' + video_id
qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
if 'guid' in qs_dict:
webpage = self._download_webpage(url, video_id)
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
feed_id = None
# feed id usually locates in the last script.
# Seems there's no pattern for the interested script filename, so
# I try one by one
for script in reversed(scripts):
feed_script = self._download_webpage(script, video_id, 'Downloading feed script')
feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None)
if feed_id is not None:
break
if feed_id is None:
raise ExtractorError('Unable to find feed id')
return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
provider_id, feed_id, qs_dict['guid'][0]))
if smuggled_data.get('force_smil_url', False):
smil_url = url
elif mobj.group('config'):
config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
if 'releaseUrl' in config:
release_url = config['releaseUrl']
else:
release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
smil_url = release_url + '&format=SMIL&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path
sig = smuggled_data.get('sig')
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
ret = self.get_metadata(path, video_id)
combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
ret.update({
'id': video_id,
'formats': formats,
'subtitles': combined_subtitles,
})
return ret
class ThePlatformFeedIE(ThePlatformBaseIE):
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
_TEST = {
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
'md5': '22d2b84f058d3586efcd99e57d59d314',
'info_dict': {
'id': 'n_hardball_5biden_140207',
'ext': 'mp4',
'title': 'The Biden factor: will Joe run in 2016?',
'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140208',
'timestamp': 1391824260,
'duration': 467.0,
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
provider_id = mobj.group('provider_id')
feed_id = mobj.group('feed_id')
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
feed = self._download_json(real_url, video_id)
entry = feed['entries'][0]
formats = []
subtitles = {}
first_video_id = None
duration = None
for item in entry['media$content']:
smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M'
cur_video_id = url_basename(smil_url)
if first_video_id is None:
first_video_id = cur_video_id
duration = float_or_none(item.get('plfile$duration'))
cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
formats.extend(cur_formats)
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail['plfile$url'],
'width': int_or_none(thumbnail.get('plfile$width')),
'height': int_or_none(thumbnail.get('plfile$height')),
} for thumbnail in entry.get('media$thumbnails', [])]
timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
categories = [item['media$name'] for item in entry.get('media$categories', [])]
ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
ret.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'categories': categories,
})
return ret
|
unlicense
|
andreatosatto90/NFD
|
.waf-tools/sphinx_build.py
|
83
|
2681
|
#!/usr/bin/env python
# encoding: utf-8
# inspired by code by Hans-Martin von Gaudecker, 2012
import os
from waflib import Node, Task, TaskGen, Errors, Logs, Build, Utils
class sphinx_build(Task.Task):
color = 'BLUE'
run_str = '${SPHINX_BUILD} -D ${VERSION} -D ${RELEASE} -q -b ${BUILDERNAME} -d ${DOCTREEDIR} ${SRCDIR} ${OUTDIR}'
def __str__(self):
env = self.env
src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs])
tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs])
if self.outputs: sep = ' -> '
else: sep = ''
return'%s [%s]: %s%s%s\n'%(self.__class__.__name__.replace('_task',''),
self.env['BUILDERNAME'], src_str, sep, tgt_str)
@TaskGen.extension('.py', '.rst')
def sig_hook(self, node):
node.sig=Utils.h_file(node.abspath())
@TaskGen.feature("sphinx")
@TaskGen.before_method("process_source")
def apply_sphinx(self):
"""Set up the task generator with a Sphinx instance and create a task."""
inputs = []
for i in Utils.to_list(self.source):
if not isinstance(i, Node.Node):
node = self.path.find_node(node)
else:
node = i
if not node:
raise ValueError('[%s] file not found' % i)
inputs.append(node)
task = self.create_task('sphinx_build', inputs)
conf = self.path.find_node(self.config)
task.inputs.append(conf)
confdir = conf.parent.abspath()
buildername = getattr(self, "builder", "html")
srcdir = getattr(self, "srcdir", confdir)
outdir = self.path.find_or_declare(getattr(self, "outdir", buildername)).get_bld()
doctreedir = getattr(self, "doctreedir", os.path.join(outdir.abspath(), ".doctrees"))
task.env['BUILDERNAME'] = buildername
task.env['SRCDIR'] = srcdir
task.env['DOCTREEDIR'] = doctreedir
task.env['OUTDIR'] = outdir.abspath()
task.env['VERSION'] = "version=%s" % self.VERSION
task.env['RELEASE'] = "release=%s" % self.VERSION
import imp
confData = imp.load_source('sphinx_conf', conf.abspath())
if buildername == "man":
for i in confData.man_pages:
target = outdir.find_or_declare('%s.%d' % (i[1], i[4]))
task.outputs.append(target)
if self.install_path:
self.bld.install_files("%s/man%d/" % (self.install_path, i[4]), target)
else:
task.outputs.append(outdir)
def configure(conf):
conf.find_program('sphinx-build', var='SPHINX_BUILD', mandatory=False)
# sphinx docs
from waflib.Build import BuildContext
class sphinx(BuildContext):
cmd = "sphinx"
fun = "sphinx"
|
gpl-3.0
|
alvin319/CarnotKE
|
jyhton/lib-python/2.7/plat-irix6/GET.py
|
132
|
1025
|
# Symbols from <gl/get.h>
from warnings import warnpy3k
warnpy3k("the GET module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
BCKBUFFER = 0x1
FRNTBUFFER = 0x2
DRAWZBUFFER = 0x4
DMRGB = 0
DMSINGLE = 1
DMDOUBLE = 2
DMRGBDOUBLE = 5
HZ30 = 0
HZ60 = 1
NTSC = 2
HDTV = 3
VGA = 4
IRIS3K = 5
PR60 = 6
PAL = 9
HZ30_SG = 11
A343 = 14
STR_RECT = 15
VOF0 = 16
VOF1 = 17
VOF2 = 18
VOF3 = 19
SGI0 = 20
SGI1 = 21
SGI2 = 22
HZ72 = 23
GL_VIDEO_REG = 0x00800000
GLV_GENLOCK = 0x00000001
GLV_UNBLANK = 0x00000002
GLV_SRED = 0x00000004
GLV_SGREEN = 0x00000008
GLV_SBLUE = 0x00000010
GLV_SALPHA = 0x00000020
GLV_TTLGENLOCK = 0x00000080
GLV_TTLSYNC = GLV_TTLGENLOCK
GLV_GREENGENLOCK = 0x0000100
LEFTPLANE = 0x0001
RIGHTPLANE = 0x0002
BOTTOMPLANE = 0x0004
TOPPLANE = 0x0008
NEARPLANE = 0x0010
FARPLANE = 0x0020
## GETDEF = __GL_GET_H__
NOBUFFER = 0x0
BOTHBUFFERS = 0x3
DMINTENSITYSINGLE = 3
DMINTENSITYDOUBLE = 4
MONSPECIAL = 0x20
HZ50 = 3
MONA = 5
MONB = 6
MONC = 7
MOND = 8
MON_ALL = 12
MON_GEN_ALL = 13
CMAPMULTI = 0
CMAPONE = 1
|
apache-2.0
|
grevutiu-gabriel/sympy
|
sympy/matrices/expressions/blockmatrix.py
|
66
|
14018
|
from __future__ import print_function, division
from sympy import ask, Q
from sympy.core import Basic, Add, sympify
from sympy.core.compatibility import range
from sympy.strategies import typed, exhaust, condition, do_one, unpack
from sympy.strategies.traverse import bottom_up
from sympy.utilities import sift
from sympy.matrices.expressions.matexpr import MatrixExpr, ZeroMatrix, Identity
from sympy.matrices.expressions.matmul import MatMul
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions.transpose import Transpose, transpose
from sympy.matrices.expressions.trace import Trace
from sympy.matrices.expressions.determinant import det, Determinant
from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices.expressions.inverse import Inverse
from sympy.matrices import Matrix, ShapeError
class BlockMatrix(MatrixExpr):
"""A BlockMatrix is a Matrix composed of other smaller, submatrices
The submatrices are stored in a SymPy Matrix object but accessed as part of
a Matrix Expression
>>> from sympy import (MatrixSymbol, BlockMatrix, symbols,
... Identity, ZeroMatrix, block_collapse)
>>> n,m,l = symbols('n m l')
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m ,m)
>>> Z = MatrixSymbol('Z', n, m)
>>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]])
>>> print(B)
Matrix([
[X, Z],
[0, Y]])
>>> C = BlockMatrix([[Identity(n), Z]])
>>> print(C)
Matrix([[I, Z]])
>>> print(block_collapse(C*B))
Matrix([[X, Z*Y + Z]])
"""
def __new__(cls, *args):
from sympy.matrices.immutable import ImmutableMatrix
args = map(sympify, args)
mat = ImmutableMatrix(*args)
obj = Basic.__new__(cls, mat)
return obj
@property
def shape(self):
numrows = numcols = 0
M = self.blocks
for i in range(M.shape[0]):
numrows += M[i, 0].shape[0]
for i in range(M.shape[1]):
numcols += M[0, i].shape[1]
return (numrows, numcols)
@property
def blockshape(self):
return self.blocks.shape
@property
def blocks(self):
return self.args[0]
@property
def rowblocksizes(self):
return [self.blocks[i, 0].rows for i in range(self.blockshape[0])]
@property
def colblocksizes(self):
return [self.blocks[0, i].cols for i in range(self.blockshape[1])]
def structurally_equal(self, other):
return (isinstance(other, BlockMatrix)
and self.shape == other.shape
and self.blockshape == other.blockshape
and self.rowblocksizes == other.rowblocksizes
and self.colblocksizes == other.colblocksizes)
def _blockmul(self, other):
if (isinstance(other, BlockMatrix) and
self.colblocksizes == other.rowblocksizes):
return BlockMatrix(self.blocks*other.blocks)
return self * other
def _blockadd(self, other):
if (isinstance(other, BlockMatrix)
and self.structurally_equal(other)):
return BlockMatrix(self.blocks + other.blocks)
return self + other
def _eval_transpose(self):
# Flip all the individual matrices
matrices = [transpose(matrix) for matrix in self.blocks]
# Make a copy
M = Matrix(self.blockshape[0], self.blockshape[1], matrices)
# Transpose the block structure
M = M.transpose()
return BlockMatrix(M)
def _eval_trace(self):
if self.rowblocksizes == self.colblocksizes:
return Add(*[Trace(self.blocks[i, i])
for i in range(self.blockshape[0])])
raise NotImplementedError(
"Can't perform trace of irregular blockshape")
def _eval_determinant(self):
if self.blockshape == (2, 2):
[[A, B],
[C, D]] = self.blocks.tolist()
if ask(Q.invertible(A)):
return det(A)*det(D - C*A.I*B)
elif ask(Q.invertible(D)):
return det(D)*det(A - B*D.I*C)
return Determinant(self)
def transpose(self):
"""Return transpose of matrix.
Examples
========
>>> from sympy import MatrixSymbol, BlockMatrix, ZeroMatrix
>>> from sympy.abc import l, m, n
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m ,m)
>>> Z = MatrixSymbol('Z', n, m)
>>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]])
>>> B.transpose()
Matrix([
[X', 0],
[Z', Y']])
>>> _.transpose()
Matrix([
[X, Z],
[0, Y]])
"""
return self._eval_transpose()
def _entry(self, i, j):
# Find row entry
for row_block, numrows in enumerate(self.rowblocksizes):
if (i < numrows) != False:
break
else:
i -= numrows
for col_block, numcols in enumerate(self.colblocksizes):
if (j < numcols) != False:
break
else:
j -= numcols
return self.blocks[row_block, col_block][i, j]
@property
def is_Identity(self):
if self.blockshape[0] != self.blockshape[1]:
return False
for i in range(self.blockshape[0]):
for j in range(self.blockshape[1]):
if i==j and not self.blocks[i, j].is_Identity:
return False
if i!=j and not self.blocks[i, j].is_ZeroMatrix:
return False
return True
@property
def is_structurally_symmetric(self):
return self.rowblocksizes == self.colblocksizes
def equals(self, other):
if self == other:
return True
if (isinstance(other, BlockMatrix) and self.blocks == other.blocks):
return True
return super(BlockMatrix, self).equals(other)
class BlockDiagMatrix(BlockMatrix):
"""
A BlockDiagMatrix is a BlockMatrix with matrices only along the diagonal
>>> from sympy import MatrixSymbol, BlockDiagMatrix, symbols, Identity
>>> n,m,l = symbols('n m l')
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m ,m)
>>> BlockDiagMatrix(X, Y)
Matrix([
[X, 0],
[0, Y]])
"""
def __new__(cls, *mats):
return Basic.__new__(BlockDiagMatrix, *mats)
@property
def diag(self):
return self.args
@property
def blocks(self):
from sympy.matrices.immutable import ImmutableMatrix
mats = self.args
data = [[mats[i] if i == j else ZeroMatrix(mats[i].rows, mats[j].cols)
for j in range(len(mats))]
for i in range(len(mats))]
return ImmutableMatrix(data)
@property
def shape(self):
return (sum(block.rows for block in self.args),
sum(block.cols for block in self.args))
@property
def blockshape(self):
n = len(self.args)
return (n, n)
@property
def rowblocksizes(self):
return [block.rows for block in self.args]
@property
def colblocksizes(self):
return [block.cols for block in self.args]
def _eval_inverse(self, expand='ignored'):
return BlockDiagMatrix(*[mat.inverse() for mat in self.args])
def _blockmul(self, other):
if (isinstance(other, BlockDiagMatrix) and
self.colblocksizes == other.rowblocksizes):
return BlockDiagMatrix(*[a*b for a, b in zip(self.args, other.args)])
else:
return BlockMatrix._blockmul(self, other)
def _blockadd(self, other):
if (isinstance(other, BlockDiagMatrix) and
self.blockshape == other.blockshape and
self.rowblocksizes == other.rowblocksizes and
self.colblocksizes == other.colblocksizes):
return BlockDiagMatrix(*[a + b for a, b in zip(self.args, other.args)])
else:
return BlockMatrix._blockadd(self, other)
def block_collapse(expr):
"""Evaluates a block matrix expression
>>> from sympy import MatrixSymbol, BlockMatrix, symbols, \
Identity, Matrix, ZeroMatrix, block_collapse
>>> n,m,l = symbols('n m l')
>>> X = MatrixSymbol('X', n, n)
>>> Y = MatrixSymbol('Y', m ,m)
>>> Z = MatrixSymbol('Z', n, m)
>>> B = BlockMatrix([[X, Z], [ZeroMatrix(m, n), Y]])
>>> print(B)
Matrix([
[X, Z],
[0, Y]])
>>> C = BlockMatrix([[Identity(n), Z]])
>>> print(C)
Matrix([[I, Z]])
>>> print(block_collapse(C*B))
Matrix([[X, Z*Y + Z]])
"""
hasbm = lambda expr: isinstance(expr, MatrixExpr) and expr.has(BlockMatrix)
rule = exhaust(
bottom_up(exhaust(condition(hasbm, typed(
{MatAdd: do_one(bc_matadd, bc_block_plus_ident),
MatMul: do_one(bc_matmul, bc_dist),
Transpose: bc_transpose,
Inverse: bc_inverse,
BlockMatrix: do_one(bc_unpack, deblock)})))))
result = rule(expr)
try:
return result.doit()
except AttributeError:
return result
def bc_unpack(expr):
if expr.blockshape == (1, 1):
return expr.blocks[0, 0]
return expr
def bc_matadd(expr):
args = sift(expr.args, lambda M: isinstance(M, BlockMatrix))
blocks = args[True]
if not blocks:
return expr
nonblocks = args[False]
block = blocks[0]
for b in blocks[1:]:
block = block._blockadd(b)
if nonblocks:
return MatAdd(*nonblocks) + block
else:
return block
def bc_block_plus_ident(expr):
idents = [arg for arg in expr.args if arg.is_Identity]
if not idents:
return expr
blocks = [arg for arg in expr.args if isinstance(arg, BlockMatrix)]
if (blocks and all(b.structurally_equal(blocks[0]) for b in blocks)
and blocks[0].is_structurally_symmetric):
block_id = BlockDiagMatrix(*[Identity(k)
for k in blocks[0].rowblocksizes])
return MatAdd(block_id * len(idents), *blocks).doit()
return expr
def bc_dist(expr):
""" Turn a*[X, Y] into [a*X, a*Y] """
factor, mat = expr.as_coeff_mmul()
if factor != 1 and isinstance(unpack(mat), BlockMatrix):
B = unpack(mat).blocks
return BlockMatrix([[factor * B[i, j] for j in range(B.cols)]
for i in range(B.rows)])
return expr
def bc_matmul(expr):
factor, matrices = expr.as_coeff_matrices()
i = 0
while (i+1 < len(matrices)):
A, B = matrices[i:i+2]
if isinstance(A, BlockMatrix) and isinstance(B, BlockMatrix):
matrices[i] = A._blockmul(B)
matrices.pop(i+1)
elif isinstance(A, BlockMatrix):
matrices[i] = A._blockmul(BlockMatrix([[B]]))
matrices.pop(i+1)
elif isinstance(B, BlockMatrix):
matrices[i] = BlockMatrix([[A]])._blockmul(B)
matrices.pop(i+1)
else:
i+=1
return MatMul(factor, *matrices).doit()
def bc_transpose(expr):
return BlockMatrix(block_collapse(expr.arg).blocks.applyfunc(transpose).T)
def bc_inverse(expr):
expr2 = blockinverse_1x1(expr)
if expr != expr2:
return expr2
return blockinverse_2x2(Inverse(reblock_2x2(expr.arg)))
def blockinverse_1x1(expr):
if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (1, 1):
mat = Matrix([[expr.arg.blocks[0].inverse()]])
return BlockMatrix(mat)
return expr
def blockinverse_2x2(expr):
if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (2, 2):
# Cite: The Matrix Cookbook Section 9.1.3
[[A, B],
[C, D]] = expr.arg.blocks.tolist()
return BlockMatrix([[ (A - B*D.I*C).I, (-A).I*B*(D - C*A.I*B).I],
[-(D - C*A.I*B).I*C*A.I, (D - C*A.I*B).I]])
else:
return expr
def deblock(B):
""" Flatten a BlockMatrix of BlockMatrices """
if not isinstance(B, BlockMatrix) or not B.blocks.has(BlockMatrix):
return B
wrap = lambda x: x if isinstance(x, BlockMatrix) else BlockMatrix([[x]])
bb = B.blocks.applyfunc(wrap) # everything is a block
from sympy import Matrix
try:
MM = Matrix(0, sum(bb[0, i].blocks.shape[1] for i in range(bb.shape[1])), [])
for row in range(0, bb.shape[0]):
M = Matrix(bb[row, 0].blocks)
for col in range(1, bb.shape[1]):
M = M.row_join(bb[row, col].blocks)
MM = MM.col_join(M)
return BlockMatrix(MM)
except ShapeError:
return B
def reblock_2x2(B):
""" Reblock a BlockMatrix so that it has 2x2 blocks of block matrices """
if not isinstance(B, BlockMatrix) or not all(d > 2 for d in B.blocks.shape):
return B
BM = BlockMatrix # for brevity's sake
return BM([[ B.blocks[0, 0], BM(B.blocks[0, 1:])],
[BM(B.blocks[1:, 0]), BM(B.blocks[1:, 1:])]])
def bounds(sizes):
""" Convert sequence of numbers into pairs of low-high pairs
>>> from sympy.matrices.expressions.blockmatrix import bounds
>>> bounds((1, 10, 50))
[(0, 1), (1, 11), (11, 61)]
"""
low = 0
rv = []
for size in sizes:
rv.append((low, low + size))
low += size
return rv
def blockcut(expr, rowsizes, colsizes):
""" Cut a matrix expression into Blocks
>>> from sympy import ImmutableMatrix, blockcut
>>> M = ImmutableMatrix(4, 4, range(16))
>>> B = blockcut(M, (1, 3), (1, 3))
>>> type(B).__name__
'BlockMatrix'
>>> ImmutableMatrix(B.blocks[0, 1])
Matrix([[1, 2, 3]])
"""
rowbounds = bounds(rowsizes)
colbounds = bounds(colsizes)
return BlockMatrix([[MatrixSlice(expr, rowbound, colbound)
for colbound in colbounds]
for rowbound in rowbounds])
|
bsd-3-clause
|
jmcarp/django
|
tests/template_tests/filter_tests/test_linebreaksbr.py
|
331
|
1742
|
from django.template.defaultfilters import linebreaksbr
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinebreaksbrTests(SimpleTestCase):
"""
The contents in "linebreaksbr" are escaped according to the current
autoescape setting.
"""
@setup({'linebreaksbr01': '{{ a|linebreaksbr }} {{ b|linebreaksbr }}'})
def test_linebreaksbr01(self):
output = self.engine.render_to_string('linebreaksbr01', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "x&<br />y x&<br />y")
@setup({'linebreaksbr02':
'{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}'})
def test_linebreaksbr02(self):
output = self.engine.render_to_string('linebreaksbr02', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "x&<br />y x&<br />y")
class FunctionTests(SimpleTestCase):
def test_newline(self):
self.assertEqual(linebreaksbr('line 1\nline 2'), 'line 1<br />line 2')
def test_carriage(self):
self.assertEqual(linebreaksbr('line 1\rline 2'), 'line 1<br />line 2')
def test_carriage_newline(self):
self.assertEqual(linebreaksbr('line 1\r\nline 2'), 'line 1<br />line 2')
def test_non_string_input(self):
self.assertEqual(linebreaksbr(123), '123')
def test_autoescape(self):
self.assertEqual(
linebreaksbr('foo\n<a>bar</a>\nbuz'),
'foo<br /><a>bar</a><br />buz',
)
def test_autoescape_off(self):
self.assertEqual(
linebreaksbr('foo\n<a>bar</a>\nbuz', autoescape=False),
'foo<br /><a>bar</a><br />buz',
)
|
bsd-3-clause
|
hachr/node-gyp
|
gyp/pylib/gyp/mac_tool.py
|
377
|
19309
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile('[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line):
print >>sys.stderr, line
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return dict((k, self._ExpandVariables(data[k],
substitutions)) for k in data)
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
|
ysh329/Titanic-Machine-Learning-from-Disaster
|
Titanic/class_create_model_of_bayes.py
|
1
|
6128
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_create_model_of_bayes.py
# Description:
#
# Author: Shuai Yuan
# E-mail: [email protected]
# Create: 2015-12-21 21:04:53
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
import time
import decorator_of_function
################################### PART2 CLASS && FUNCTION ###########################
class CreateBayesModel(object):
Decorator = decorator_of_function.CreateDecorator()
@Decorator.log_of_function
def __init__(self):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = 'main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateBayesModel.__name__))
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='931209', charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
@Decorator.log_of_function
def __del__(self):
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in quiting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = CreateBayesModel.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateBayesModel.__name__, delta_time = self.end - self.start))
@Decorator.log_of_function
def get_data_from_database(self, database_name, passenger_table_name):
cursor = self.con.cursor()
sql_list = []
# training set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=1"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
# test set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=0"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
for sql_idx in xrange(len(sql_list)):
sql = sql_list[sql_idx]
try:
cursor.execute(sql)
if sql_idx == 0:
train_data = cursor.fetchall()
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
elif sql_idx == 1:
test_data = cursor.fetchall()
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in fetch data from MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
train_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
train_data)
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
test_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
test_data)
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
return train_data, test_data
################################### PART3 CLASS TEST ##################################
#"""
# Initial parameters
database_name = "TitanicDB"
passenger_table_name = "passenger_table"
BayesModel = CreateBayesModel()
train_data, test_data = BayesModel.get_data_from_database(database_name = database_name,\
passenger_table_name = passenger_table_name)
#"""
|
mit
|
DuraCopter/ardupilot
|
Tools/ardupilotwaf/ap_library.py
|
21
|
6948
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Waf tool for Ardupilot libraries. The function bld.ap_library() creates the
necessary task generators for creating the objects of a library for a vehicle.
That includes the common objects, which are shared among vehicles. That
function is used by bld.ap_stlib() and shouldn't need to be called otherwise.
The environment variable AP_LIBRARIES_OBJECTS_KW is a dictionary of keyword
arguments to be passed to bld.objects() when during the creation of the task
generators. You can use it to pass extra arguments to that function (although
some of them will be rewritten, see the implementation for details).
This tool also checks if the headers used by the source files don't use
vehicle-related headers and fails the build if they do.
"""
import os
import re
from waflib import Errors, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import after_method, before_method, feature
from waflib.Tools import c_preproc
import ardupilotwaf as ap
UTILITY_SOURCE_EXTS = ['utility/' + glob for glob in ap.SOURCE_EXTS]
def _common_tgen_name(library):
return 'objs/%s' % library
def _vehicle_tgen_name(library, vehicle):
return 'objs/%s/%s' % (library, vehicle)
_vehicle_indexes = {}
def _vehicle_index(vehicle):
""" Used for the objects taskgens idx parameter """
if vehicle not in _vehicle_indexes:
_vehicle_indexes[vehicle] = len(_vehicle_indexes) + 1
return _vehicle_indexes[vehicle]
_vehicle_macros = ('SKETCHNAME', 'SKETCH', 'APM_BUILD_DIRECTORY',
'APM_BUILD_TYPE')
_macros_re = re.compile(r'\b(%s)\b' % '|'.join(_vehicle_macros))
def _remove_comments(s):
return c_preproc.re_cpp.sub(c_preproc.repl, s)
_depends_on_vehicle_cache = {}
def _depends_on_vehicle(bld, source_node):
path = source_node.srcpath()
if path not in _depends_on_vehicle_cache:
s = _remove_comments(source_node.read())
_depends_on_vehicle_cache[path] = _macros_re.search(s) is not None
return _depends_on_vehicle_cache[path]
@conf
def ap_library(bld, library, vehicle):
try:
common_tg = bld.get_tgen_by_name(_common_tgen_name(library))
except Errors.WafError:
common_tg = None
try:
vehicle_tg = bld.get_tgen_by_name(_vehicle_tgen_name(library, vehicle))
except Errors.WafError:
vehicle_tg = None
if common_tg and vehicle_tg:
return
if library.find('*') != -1:
# allow for wildcard patterns, used for submodules without direct waf support
library_dir = bld.srcnode.find_dir('.')
wildcard = library
else:
library_dir = bld.srcnode.find_dir('libraries/%s' % library)
wildcard = ap.SOURCE_EXTS + UTILITY_SOURCE_EXTS
if not library_dir:
bld.fatal('ap_library: %s not found' % library)
src = library_dir.ant_glob(wildcard)
if not common_tg:
kw = dict(bld.env.AP_LIBRARIES_OBJECTS_KW)
kw['features'] = kw.get('features', []) + ['ap_library_object']
kw.update(
name=_common_tgen_name(library),
source=[s for s in src if not _depends_on_vehicle(bld, s)],
idx=0,
)
bld.objects(**kw)
if not vehicle_tg:
source = [s for s in src if _depends_on_vehicle(bld, s)]
if not source:
return
kw = dict(bld.env.AP_LIBRARIES_OBJECTS_KW)
kw['features'] = kw.get('features', []) + ['ap_library_object']
kw.update(
name=_vehicle_tgen_name(library, vehicle),
source=source,
defines=ap.get_legacy_defines(vehicle),
idx=_vehicle_index(vehicle),
)
bld.objects(**kw)
@before_method('process_use')
@feature('cxxstlib')
def process_ap_libraries(self):
self.use = Utils.to_list(getattr(self, 'use', []))
libraries = Utils.to_list(getattr(self, 'ap_libraries', []))
vehicle = getattr(self, 'ap_vehicle', None)
for l in libraries:
self.use.append(_common_tgen_name(l))
if vehicle:
self.use.append(_vehicle_tgen_name(l, vehicle))
class ap_library_check_headers(Task.Task):
color = 'PINK'
before = 'cxx c'
dispatched_headers = set()
whitelist = (
'libraries/AP_Vehicle/AP_Vehicle_Type.h',
)
whitelist = tuple(os.path.join(*p.split('/')) for p in whitelist)
def run(self):
for n in self.headers:
s = _remove_comments(n.read())
if _macros_re.search(s):
raise Errors.WafError('%s: library header uses vehicle-dependent macros' % n.srcpath())
def uid(self):
try:
return self._uid
except AttributeError:
self._uid = 'check_headers-%s' % self.compiled_task.uid()
return self._uid
def signature(self):
bld = self.generator.bld
# force scan() to be called
bld.imp_sigs[self.uid()] = None
s = super(ap_library_check_headers, self).signature()
bld.ap_persistent_task_sigs[self.uid()] = s
return s
def scan(self):
r = []
self.headers = []
srcnode_path = self.generator.bld.srcnode.abspath()
# force dependency scan, if necessary
self.compiled_task.signature()
for n in self.generator.bld.node_deps[self.compiled_task.uid()]:
# using common Node methods doesn't work here
p = n.abspath()
if not p.startswith(srcnode_path):
continue
if os.path.relpath(p, srcnode_path) in self.whitelist:
continue
r.append(n)
if n not in self.dispatched_headers:
self.headers.append(n)
self.dispatched_headers.add(n)
return r, []
def __str__(self):
return str(self.compiled_task)
def keyword(self):
return 'Checking included headers'
@feature('ap_library_object')
@after_method('process_source')
def ap_library_register_for_check(self):
if not hasattr(self, 'compiled_tasks'):
return
if not self.env.ENABLE_HEADER_CHECKS:
return
for t in self.compiled_tasks:
tsk = self.create_task('ap_library_check_headers')
tsk.compiled_task = t
def configure(cfg):
cfg.env.AP_LIBRARIES_OBJECTS_KW = dict()
|
gpl-3.0
|
manassolanki/frappe
|
frappe/core/doctype/feedback_trigger/feedback_trigger.py
|
11
|
7279
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe import _
from frappe.utils import get_url
from frappe.model.document import Document
from frappe.utils.jinja import validate_template
class FeedbackTrigger(Document):
def validate(self):
frappe.cache().delete_value('feedback_triggers')
validate_template(self.subject)
validate_template(self.message)
self.validate_condition()
def on_trash(self):
frappe.cache().delete_value('feedback_triggers')
def validate_condition(self):
temp_doc = frappe.new_doc(self.document_type)
if self.condition:
try:
frappe.safe_eval(self.condition, None, get_context(temp_doc))
except:
frappe.throw(_("The condition '{0}' is invalid").format(self.condition))
def trigger_feedback_request(doc, method):
"""Trigger the feedback alert, or delete feedback requests on delete"""
def _get():
triggers = {}
if not (frappe.flags.in_migrate or frappe.flags.in_install):
for d in frappe.get_all('Feedback Trigger', dict(enabled=1), ['name', 'document_type']):
triggers[d.document_type] = d.name
return triggers
feedback_triggers = frappe.cache().get_value('feedback_triggers', _get)
if doc.doctype in feedback_triggers:
if doc.flags.in_delete:
frappe.enqueue('frappe.core.doctype.feedback_trigger.feedback_trigger.delete_feedback_request_and_feedback',
reference_doctype=doc.doctype, reference_name=doc.name, now=frappe.flags.in_test)
else:
frappe.enqueue('frappe.core.doctype.feedback_trigger.feedback_trigger.send_feedback_request',
trigger=feedback_triggers[doc.doctype], reference_doctype=doc.doctype,
reference_name=doc.name, now=frappe.flags.in_test)
@frappe.whitelist()
def send_feedback_request(reference_doctype, reference_name, trigger="Manual", details=None, is_manual=False):
""" send feedback alert """
if is_feedback_request_already_sent(reference_doctype, reference_name, is_manual=is_manual):
frappe.msgprint(_("Feedback Request is already sent to user"))
return None
details = json.loads(details) if details else \
get_feedback_request_details(reference_doctype, reference_name, trigger=trigger)
if not details:
return None
feedback_request, url = get_feedback_request_url(reference_doctype,
reference_name, details.get("recipients"), trigger)
feedback_msg = frappe.render_template("templates/emails/feedback_request_url.html", { "url": url })
# appending feedback url to message body
message = "{message}{feedback_msg}".format(
message=details.get("message"),
feedback_msg=feedback_msg
)
details.update({
"message": message,
"header": [details.get('subject'), 'blue']
})
if details:
frappe.sendmail(**details)
frappe.db.set_value("Feedback Request", feedback_request, "is_sent", 1)
@frappe.whitelist()
def get_feedback_request_details(reference_doctype, reference_name, trigger="Manual", request=None):
if not frappe.db.get_value(reference_doctype, reference_name):
# reference document is either deleted or renamed
return
elif not trigger and not request and not frappe.db.get_value("Feedback Trigger", { "document_type": reference_doctype }):
return
elif not trigger and request:
trigger = frappe.db.get_value("Feedback Request", request, "feedback_trigger")
else:
trigger = frappe.db.get_value("Feedback Trigger", { "document_type": reference_doctype })
if not trigger:
return
feedback_trigger = frappe.get_doc("Feedback Trigger", trigger)
doc = frappe.get_doc(reference_doctype, reference_name)
context = get_context(doc)
recipients = doc.get(feedback_trigger.email_fieldname, None)
if feedback_trigger.check_communication:
communications = frappe.get_all("Communication", filters={
"reference_doctype": reference_doctype,
"reference_name": reference_name,
"communication_type": "Communication",
"sent_or_received": "Sent"
}, fields=["name"])
if len(communications) < 1:
frappe.msgprint(_("At least one reply is mandatory before requesting feedback"))
return None
if recipients and (not feedback_trigger.condition or \
frappe.safe_eval(feedback_trigger.condition, None, context)):
subject = feedback_trigger.subject
context.update({ "feedback_trigger": feedback_trigger })
if "{" in subject:
subject = frappe.render_template(feedback_trigger.subject, context)
feedback_request_message = frappe.render_template(feedback_trigger.message, context)
return {
"subject": subject,
"recipients": recipients,
"reference_name":doc.name,
"reference_doctype":doc.doctype,
"message": feedback_request_message,
}
else:
frappe.msgprint(_("Feedback conditions do not match"))
return None
def get_feedback_request_url(reference_doctype, reference_name, recipients, trigger="Manual"):
""" prepare the feedback request url """
is_manual = 1 if trigger == "Manual" else 0
feedback_request = frappe.get_doc({
"is_manual": is_manual,
"feedback_trigger": trigger,
"doctype": "Feedback Request",
"reference_name": reference_name,
"reference_doctype": reference_doctype,
}).insert(ignore_permissions=True)
feedback_url = "{base_url}/feedback?reference_doctype={doctype}&reference_name={docname}&email={email_id}&key={nonce}".format(
base_url=get_url(),
doctype=reference_doctype,
docname=reference_name,
email_id=recipients,
nonce=feedback_request.key
)
return [ feedback_request.name, feedback_url ]
def is_feedback_request_already_sent(reference_doctype, reference_name, is_manual=False):
"""
check if feedback request mail is already sent but feedback is not submitted
to avoid sending multiple feedback request mail
"""
is_request_sent = False
filters = {
"is_sent": 1,
"reference_name": reference_name,
"is_manual": 1 if is_manual else 0,
"reference_doctype": reference_doctype
}
if is_manual:
filters.update({ "is_feedback_submitted": 0 })
feedback_request = frappe.get_all("Feedback Request", filters=filters, fields=["name"])
if feedback_request: is_request_sent = True
return is_request_sent
def get_enabled_feedback_trigger():
""" get mapper of all the enable feedback trigger """
triggers = frappe.get_all("Feedback Trigger", filters={"enabled": 1},
fields=["document_type", "name"], as_list=True)
triggers = { dt[0]: dt[1] for dt in triggers }
return triggers
def get_context(doc):
return { "doc": doc }
def delete_feedback_request_and_feedback(reference_doctype, reference_name):
""" delete all the feedback request and feedback communication """
if not all([reference_doctype, reference_name]):
return
feedback_requests = frappe.get_all("Feedback Request", filters={
"is_feedback_submitted": 0,
"reference_doctype": reference_doctype,
"reference_name": reference_name
})
communications = frappe.get_all("Communication", {
"communication_type": "Feedback",
"reference_doctype": reference_doctype,
"reference_name": reference_name
})
for request in feedback_requests:
frappe.delete_doc("Feedback Request", request.get("name"), ignore_permissions=True)
for communication in communications:
frappe.delete_doc("Communication", communication.get("name"), ignore_permissions=True)
|
mit
|
ctu-osgeorel/subdayprecip-design
|
openlayers2/proj4js/tools/mergejs.py
|
91
|
8240
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# or (ideally) within a class comment definition
#
# /**
# * @class
# *
# * @requires OpenLayers/Layer.js
# */
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2007 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
import glob
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
[last]
core/api.js
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [line.strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip()] # Skip blank lines
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
include = False
for included in cfg.include:
if glob.fnmatch.fnmatch(filepath, included):
include = True
if include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
exclude = False
for excluded in cfg.exclude:
if glob.fnmatch.fnmatch(filepath, excluded):
exclude = True
if not exclude:
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
|
gpl-2.0
|
weblabdeusto/weblabdeusto
|
server/src/experiments/romie/romie_blocks.py
|
3
|
3232
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Iban Eguia <[email protected]>
#
import weblab.experiment.experiment as Experiment
from voodoo.override import Override
from voodoo.log import logged
import json
import urllib2
class RoMIEBlocklyExperiment(Experiment.Experiment):
def __init__(self, coord_address, locator, cfg_manager, *args, **kwargs):
super(RoMIEBlocklyExperiment, self).__init__(*args, **kwargs)
self._cfg_manager = cfg_manager
self.read_base_config()
def read_base_config(self):
"""
Reads the base config parameters from the config file.
"""
pass
@Override(Experiment.Experiment)
@logged("info")
def do_get_api(self):
return "2"
@Override(Experiment.Experiment)
@logged("info")
def do_start_experiment(self, client_initial_data, server_initial_data):
"""
Callback run when the experiment is started.
"""
if(self._cfg_manager.get_value('debug')):
print "[RoMIE-Blockly] do_start_experiment called"
return json.dumps({"initial_configuration": client_initial_data})
@Override(Experiment.Experiment)
@logged("info")
def do_send_command_to_device(self, command):
"""
Callback run when the client sends a command to the experiment
@param command Command sent by the client, as a string.
"""
if(self._cfg_manager.get_value('debug')):
print "[RoMIE-Blockly] Command received: %s" % command
command = json.loads(command)
response = None
tag = None
if command['command'] == 'F':
response = urllib2.urlopen(self._cfg_manager.get_value('romie_server')+'f', timeout = 60).read()
if 'Tag' in response:
tag = response[5:19].replace(' ', ':')
elif command['command'] == 'L':
response = urllib2.urlopen(self._cfg_manager.get_value('romie_server')+'l', timeout = 60).read()
elif command['command'] == 'R':
response = urllib2.urlopen(self._cfg_manager.get_value('romie_server')+'r', timeout = 60).read()
elif command['command'] == 'S':
response = urllib2.urlopen(self._cfg_manager.get_value('romie_server')+'s', timeout = 60).read()
if 'ACK' in response and '0' in response:
response = False
elif 'ACK' in response and '1' in response:
response = True
else:
return "ERR"
if response is not None:
return json.dumps({"response": response, "tag": tag})
return "ERR"
@Override(Experiment.Experiment)
@logged("info")
def do_dispose(self):
"""
Callback to perform cleaning after the experiment ends.
"""
if(self._cfg_manager.get_value('debug')):
print "[RoMIE-Blockly] do_dispose called"
return "OK"
|
bsd-2-clause
|
joxeankoret/diaphora
|
pygments/util.py
|
26
|
11789
|
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
# Given a unicode character code
# with length greater than 16 bits,
# return the two 16 bit surrogate pair.
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future(object):
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
# Python 2/3 compatibility
if sys.version_info < (3, 0):
unichr = unichr
xrange = xrange
string_types = (str, unicode)
text_type = unicode
u_prefix = 'u'
iteritems = dict.iteritems
itervalues = dict.itervalues
import StringIO
import cStringIO
# unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
BytesIO = cStringIO.StringIO
else:
unichr = chr
xrange = range
string_types = (str,)
text_type = str
u_prefix = ''
iteritems = dict.items
itervalues = dict.values
from io import StringIO, BytesIO, TextIOWrapper
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
pass
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
agpl-3.0
|
kaixinjxq/wpt-tools
|
lint/lint.py
|
159
|
9915
|
import os
import subprocess
import re
import sys
import fnmatch
from collections import defaultdict
from .. import localpaths
from manifest.sourcefile import SourceFile
here = os.path.abspath(os.path.split(__file__)[0])
repo_root = localpaths.repo_root
def git(command, *args):
args = list(args)
proc_kwargs = {"cwd": repo_root}
command_line = ["git", command] + args
try:
return subprocess.check_output(command_line, **proc_kwargs)
except subprocess.CalledProcessError:
raise
def iter_files():
for item in git("ls-tree", "-r", "--name-only", "HEAD").split("\n"):
yield item
def check_path_length(path):
if len(path) + 1 > 150:
return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), None)]
return []
def set_type(error_type, errors):
return [(error_type,) + error for error in errors]
def parse_whitelist_file(filename):
data = defaultdict(lambda:defaultdict(set))
with open(filename) as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_type, file_match, line_number = parts
data[file_match][error_type].add(line_number)
def inner(path, errors):
whitelisted = [False for item in xrange(len(errors))]
for file_match, whitelist_errors in data.iteritems():
if fnmatch.fnmatch(path, file_match):
for i, (error_type, msg, line) in enumerate(errors):
if "*" in whitelist_errors:
whitelisted[i] = True
elif error_type in whitelist_errors:
allowed_lines = whitelist_errors[error_type]
if None in allowed_lines or line in allowed_lines:
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
return inner
_whitelist_fn = None
def whitelist_errors(path, errors):
global _whitelist_fn
if _whitelist_fn is None:
_whitelist_fn = parse_whitelist_file(os.path.join(repo_root, "lint.whitelist"))
return _whitelist_fn(path, errors)
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = "[ \t\f\v]$"
error = "TRAILING WHITESPACE"
class TabsRegexp(Regexp):
pattern = "^\t"
error = "INDENT TABS"
class CRRegexp(Regexp):
pattern = "\r$"
error = "CR AT EOL"
class W3CTestOrgRegexp(Regexp):
pattern = "w3c\-test\.org"
error = "W3C-TEST.ORG"
class Webidl2Regexp(Regexp):
pattern = "webidl2\.js"
error = "WEBIDL2.JS"
class ConsoleRegexp(Regexp):
pattern = "console\.[a-zA-Z]+\s*\("
error = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".html", ".svg"]
class PrintRegexp(Regexp):
pattern = "print(?:\s|\s*\()"
error = "PRINT STATEMENT"
file_extensions = [".py"]
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp,
W3CTestOrgRegexp,
Webidl2Regexp,
ConsoleRegexp,
PrintRegexp]]
def check_regexp_line(path, f):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, "%s line %i" % (path, i+1), i+1))
return errors
def check_parsed(path, f):
source_file = SourceFile(repo_root, path, "/")
errors = []
if source_file.name_is_non_test or source_file.name_is_manual:
return []
if source_file.markup_type is None:
return []
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file %s" % path, None)]
if len(source_file.timeout_nodes) > 1:
errors.append(("MULTIPLE-TIMEOUT", "%s more than one meta name='timeout'" % path, None))
for timeout_node in source_file.timeout_nodes:
timeout_value = timeout_node.attrib.get("content", "").lower()
if timeout_value != "long":
errors.append(("INVALID-TIMEOUT", "%s invalid timeout value %s" % (path, timeout_value), None))
if source_file.testharness_nodes:
if len(source_file.testharness_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESS",
"%s more than one <script src='/resources/testharness.js'>" % path, None))
testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
if not testharnessreport_nodes:
errors.append(("MISSING-TESTHARNESSREPORT",
"%s missing <script src='/resources/testharnessreport.js'>" % path, None))
else:
if len(testharnessreport_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESSREPORT",
"%s more than one <script src='/resources/testharnessreport.js'>" % path, None))
for element in source_file.variant_nodes:
if "content" not in element.attrib:
errors.append(("VARIANT-MISSING",
"%s has <meta name=variant> missing 'content' attribute" % path, None))
else:
variant = element.attrib["content"]
if variant != "" and variant[0] not in ("?", "#"):
errors.append(("MALFORMED-VARIANT",
"%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))
seen_elements = {"timeout": False,
"testharness": False,
"testharnessreport": False}
required_elements = [key for key, value in {"testharness": True,
"testharnessreport": len(testharnessreport_nodes) > 0,
"timeout": len(source_file.timeout_nodes) > 0}.iteritems()
if value]
for elem in source_file.root.iter():
if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
seen_elements["timeout"] = True
if seen_elements["testharness"]:
errors.append(("LATE-TIMEOUT",
"%s <meta name=timeout> seen after testharness.js script" % path, None))
elif elem == source_file.testharness_nodes[0]:
seen_elements["testharness"] = True
elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
seen_elements["testharnessreport"] = True
if not seen_elements["testharness"]:
errors.append(("EARLY-TESTHARNESSREPORT",
"%s testharnessreport.js script seen before testharness.js script" % path, None))
if all(seen_elements[name] for name in required_elements):
break
return errors
def output_errors(errors):
for error_type, error, line_number in errors:
print "%s: %s" % (error_type, error)
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.iteritems())
count = sum(error_count.values())
if count == 1:
print "There was 1 error (%s)" % (by_type,)
else:
print "There were %d errors (%s)" % (count, by_type)
def main():
error_count = defaultdict(int)
last = None
def run_lint(path, fn, last, *args):
errors = whitelist_errors(path, fn(path, *args))
if errors:
last = (errors[-1][0], path)
output_errors(errors)
for error_type, error, line in errors:
error_count[error_type] += 1
return last
for path in iter_files():
abs_path = os.path.join(repo_root, path)
if not os.path.exists(path):
continue
for path_fn in path_lints:
last = run_lint(path, path_fn, last)
if not os.path.isdir(abs_path):
with open(abs_path) as f:
for file_fn in file_lints:
last = run_lint(path, file_fn, last, f)
f.seek(0)
output_error_count(error_count)
if error_count:
print
print "You must fix all errors; for details on how to fix them, see"
print "https://github.com/w3c/web-platform-tests/blob/master/docs/lint-tool.md"
print
print "However, instead of fixing a particular error, it's sometimes"
print "OK to add a line to the lint.whitelist file in the root of the"
print "web-platform-tests directory to make the lint tool ignore it."
print
print "For example, to make the lint tool ignore all '%s'" % last[0]
print "errors in the %s file," % last[1]
print "you could add the following line to the lint.whitelist file."
print
print "%s:%s" % (last[0], last[1])
return sum(error_count.itervalues())
path_lints = [check_path_length]
file_lints = [check_regexp_line, check_parsed]
if __name__ == "__main__":
error_count = main()
if error_count > 0:
sys.exit(1)
|
bsd-3-clause
|
NaturalGIS/QGIS
|
python/plugins/processing/modeler/EditModelAction.py
|
10
|
2069
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditModelAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication, QgsProcessingModelAlgorithm, QgsMessageLog
from processing.gui.ContextAction import ContextAction
from processing.modeler.ModelerDialog import ModelerDialog
from qgis.core import Qgis
from qgis.utils import iface
class EditModelAction(ContextAction):
def __init__(self):
super().__init__()
self.name = QCoreApplication.translate('EditModelAction', 'Edit Model…')
def isEnabled(self):
return isinstance(self.itemData, QgsProcessingModelAlgorithm)
def execute(self):
alg = self.itemData
ok, msg = alg.canExecute()
if not ok:
iface.messageBar().pushMessage(QCoreApplication.translate('EditModelAction', 'Cannot edit model: {}').format(msg), level=Qgis.Warning)
else:
dlg = ModelerDialog(alg)
dlg.update_model.connect(self.updateModel)
dlg.show()
def updateModel(self):
QgsApplication.processingRegistry().providerById('model').refreshAlgorithms()
|
gpl-2.0
|
iamutkarshtiwari/Browse-fiddleFeature
|
browser.py
|
1
|
27765
|
# Copyright (C) 2006, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso, Simon Schampijer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import time
import re
import logging
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import WebKit
from gi.repository import Soup
from gi.repository import GConf
from sugar3.activity import activity
from sugar3.graphics import style
from sugar3.graphics.icon import Icon
from widgets import BrowserNotebook
from palettes import ContentInvoker
from filepicker import FilePicker
import globalhistory
import downloadmanager
from pdfviewer import PDFTabPage
ZOOM_ORIGINAL = 1.0
_ZOOM_AMOUNT = 0.1
LIBRARY_PATH = '/usr/share/library-common/index.html'
_WEB_SCHEMES = ['http', 'https', 'ftp', 'file', 'javascript', 'data',
'about', 'gopher', 'mailto']
_NON_SEARCH_REGEX = re.compile('''
(^localhost(\\.[^\s]+)?(:\\d+)?(/.*)?$|
^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]$|
^::[0-9a-f:]*$| # IPv6 literals
^[0-9a-f:]+:[0-9a-f:]*$| # IPv6 literals
^[^\\.\s]+\\.[^\\.\s]+.*$| # foo.bar...
^https?://[^/\\.\s]+.*$|
^about:.*$|
^data:.*$|
^file:.*$)
''', re.VERBOSE)
DEFAULT_ERROR_PAGE = os.path.join(activity.get_bundle_path(),
'data/error_page.tmpl')
HOME_PAGE_GCONF_KEY = '/desktop/sugar/browser/home_page'
_sugar_version = None
def get_sugar_version():
global _sugar_version
if _sugar_version is None:
if 'SUGAR_VERSION' in os.environ:
version = os.environ['SUGAR_VERSION']
major, minor = version.split('.')[0:2]
# use the last stable version
_sugar_version = '%s.%s' % (major, int(minor) - int(minor) % 2)
else:
logging.error('SUGAR_VERSION env variable not found')
_sugar_version = '0.100'
return _sugar_version
class TabbedView(BrowserNotebook):
__gtype_name__ = 'TabbedView'
__gsignals__ = {
'focus-url-entry': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
}
def __init__(self):
BrowserNotebook.__init__(self)
self.props.show_border = False
self.props.scrollable = True
# Used to connect and disconnect functions when 'switch-page'
self._browser = None
self._load_status_changed_hid = None
self.connect('size-allocate', self.__size_allocate_cb)
self.connect('page-added', self.__page_added_cb)
self.connect('page-removed', self.__page_removed_cb)
self.connect_after('switch-page', self.__switch_page_cb)
self.add_tab()
self._update_closing_buttons()
self._update_tab_sizes()
def __switch_page_cb(self, tabbed_view, page, page_num):
if tabbed_view.get_n_pages():
self._connect_to_browser(tabbed_view.props.current_browser)
def _connect_to_browser(self, browser):
if self._browser is not None:
self._browser.disconnect(self._load_status_changed_hid)
self._browser = browser
self._load_status_changed_hid = self._browser.connect(
'notify::load-status', self.__load_status_changed_cb)
def normalize_or_autosearch_url(self, url):
"""Normalize the url input or return a url for search.
We use SoupURI as an indication of whether the value given in url
is not something we want to search; we only do that, though, if
the address has a web scheme, because SoupURI will consider any
string: as a valid scheme, and we will end up prepending http://
to it.
This code is borrowed from Epiphany.
url -- input string that can be normalized to an url or serve
as search
Return: a string containing a valid url
"""
def has_web_scheme(address):
if address == '':
return False
scheme, sep, after = address.partition(':')
if sep == '':
return False
return scheme in _WEB_SCHEMES
soup_uri = None
effective_url = None
if has_web_scheme(url):
try:
soup_uri = Soup.URI.new(url)
except TypeError:
pass
if soup_uri is None and not _NON_SEARCH_REGEX.match(url):
# Get the user's LANG to use as default language of
# the results
locale = os.environ.get('LANG', '')
language_location = locale.split('.', 1)[0].lower()
language = language_location.split('_')[0]
# If the string doesn't look like an URI, let's search it:
url_search = 'http://www.google.com/search?' \
'q=%(query)s&ie=UTF-8&oe=UTF-8&hl=%(language)s'
query_param = Soup.form_encode_hash({'q': url})
# [2:] here is getting rid of 'q=':
effective_url = url_search % {'query': query_param[2:],
'language': language}
else:
if has_web_scheme(url):
effective_url = url
else:
effective_url = 'http://' + url
return effective_url
def __size_allocate_cb(self, widget, allocation):
self._update_tab_sizes()
def __page_added_cb(self, notebook, child, pagenum):
self._update_closing_buttons()
self._update_tab_sizes()
def __page_removed_cb(self, notebook, child, pagenum):
if self.get_n_pages():
self._update_closing_buttons()
self._update_tab_sizes()
def __new_tab_cb(self, browser, url):
new_browser = self.add_tab(next_to_current=True)
new_browser.load_uri(url)
new_browser.grab_focus()
def __create_web_view_cb(self, web_view, frame):
new_web_view = Browser()
new_web_view.connect('web-view-ready', self.__web_view_ready_cb)
return new_web_view
def __web_view_ready_cb(self, web_view):
"""
Handle new window requested and open it in a new tab.
This callback is called when the WebKit.WebView request for a
new window to open (for example a call to the Javascript
function 'window.open()' or target="_blank")
web_view -- the new browser there the url of the
window.open() call will be loaded.
This object is created in the signal callback
'create-web-view'.
"""
web_view.connect('new-tab', self.__new_tab_cb)
web_view.connect('open-pdf', self.__open_pdf_in_new_tab_cb)
web_view.connect('create-web-view', self.__create_web_view_cb)
web_view.grab_focus()
self._insert_tab_next(web_view)
def __open_pdf_in_new_tab_cb(self, browser, url):
tab_page = PDFTabPage()
tab_page.browser.connect('new-tab', self.__new_tab_cb)
tab_page.browser.connect('tab-close', self.__tab_close_cb)
label = TabLabel(tab_page.browser)
label.connect('tab-close', self.__tab_close_cb, tab_page)
next_index = self.get_current_page() + 1
self.insert_page(tab_page, label, next_index)
tab_page.show()
label.show()
self.set_current_page(next_index)
tab_page.setup(url)
def __load_status_changed_cb(self, widget, param):
if self.get_window() is None:
return
status = widget.get_load_status()
if status in (WebKit.LoadStatus.PROVISIONAL,
WebKit.LoadStatus.COMMITTED,
WebKit.LoadStatus.FIRST_VISUALLY_NON_EMPTY_LAYOUT):
self.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.WATCH))
elif status in (WebKit.LoadStatus.FAILED,
WebKit.LoadStatus.FINISHED):
self.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.LEFT_PTR))
def add_tab(self, next_to_current=False):
browser = Browser()
browser.connect('new-tab', self.__new_tab_cb)
browser.connect('open-pdf', self.__open_pdf_in_new_tab_cb)
browser.connect('web-view-ready', self.__web_view_ready_cb)
browser.connect('create-web-view', self.__create_web_view_cb)
if next_to_current:
self._insert_tab_next(browser)
else:
self._append_tab(browser)
self.emit('focus-url-entry')
return browser
def _insert_tab_next(self, browser):
tab_page = TabPage(browser)
label = TabLabel(browser)
label.connect('tab-close', self.__tab_close_cb, tab_page)
next_index = self.get_current_page() + 1
self.insert_page(tab_page, label, next_index)
tab_page.show()
self.set_current_page(next_index)
def _append_tab(self, browser):
tab_page = TabPage(browser)
label = TabLabel(browser)
label.connect('tab-close', self.__tab_close_cb, tab_page)
self.append_page(tab_page, label)
tab_page.show()
self.set_current_page(-1)
def on_add_tab(self, gobject):
self.add_tab()
def close_tab(self, tab_page=None):
if self.get_n_pages() == 1:
return
if tab_page is None:
tab_page = self.get_nth_page(self.get_current_page())
if isinstance(tab_page, PDFTabPage):
if tab_page.props.browser.props.load_status < \
WebKit.LoadStatus.FINISHED:
tab_page.cancel_download()
self.remove_page(self.page_num(tab_page))
current_page = self.get_nth_page(self.get_current_page())
current_page.props.browser.grab_focus()
def __tab_close_cb(self, label, tab_page):
self.close_tab(tab_page)
def _update_tab_sizes(self):
"""Update tab widths based in the amount of tabs."""
n_pages = self.get_n_pages()
canvas_size = self.get_allocation()
allowed_size = canvas_size.width
if n_pages == 1:
# use half of the whole space
tab_expand = False
tab_new_size = int(allowed_size / 2)
elif n_pages <= 8: # ensure eight tabs
tab_expand = True # use all the space available by tabs
tab_new_size = -1
else:
# scroll the tab toolbar if there are more than 8 tabs
tab_expand = False
tab_new_size = (allowed_size / 8)
for page_idx in range(n_pages):
page = self.get_nth_page(page_idx)
label = self.get_tab_label(page)
self.child_set_property(page, 'tab-expand', tab_expand)
label.update_size(tab_new_size)
def _update_closing_buttons(self):
"""Prevent closing the last tab."""
first_page = self.get_nth_page(0)
first_label = self.get_tab_label(first_page)
if self.get_n_pages() == 1:
first_label.hide_close_button()
else:
first_label.show_close_button()
def load_homepage(self, ignore_gconf=False):
browser = self.current_browser
uri_homepage = None
if not ignore_gconf:
client = GConf.Client.get_default()
uri_homepage = client.get_string(HOME_PAGE_GCONF_KEY)
if uri_homepage is not None:
browser.load_uri(uri_homepage)
elif os.path.isfile(LIBRARY_PATH):
browser.load_uri('file://' + LIBRARY_PATH)
else:
default_page = os.path.join(activity.get_bundle_path(),
"data/index.html")
browser.load_uri('file://' + default_page)
browser.grab_focus()
def set_homepage(self):
uri = self.current_browser.get_uri()
client = GConf.Client.get_default()
client.set_string(HOME_PAGE_GCONF_KEY, uri)
def reset_homepage(self):
client = GConf.Client.get_default()
client.unset(HOME_PAGE_GCONF_KEY)
def _get_current_browser(self):
if self.get_n_pages():
return self.get_nth_page(self.get_current_page()).browser
else:
return None
current_browser = GObject.property(type=object,
getter=_get_current_browser)
def get_history(self):
tab_histories = []
for index in xrange(0, self.get_n_pages()):
tab_page = self.get_nth_page(index)
tab_histories.append(tab_page.browser.get_history())
return tab_histories
def set_history(self, tab_histories):
if tab_histories and isinstance(tab_histories[0], dict):
# Old format, no tabs
tab_histories = [tab_histories]
while self.get_n_pages():
self.remove_page(self.get_n_pages() - 1)
def is_pdf_history(tab_history):
return (len(tab_history) == 1 and
tab_history[0]['url'].lower().endswith('pdf'))
for tab_history in tab_histories:
if is_pdf_history(tab_history):
url = tab_history[0]['url']
tab_page = PDFTabPage()
tab_page.browser.connect('new-tab', self.__new_tab_cb)
tab_page.browser.connect('tab-close', self.__tab_close_cb)
label = TabLabel(tab_page.browser)
label.connect('tab-close', self.__tab_close_cb, tab_page)
self.append_page(tab_page, label)
tab_page.show()
label.show()
tab_page.setup(url, title=tab_history[0]['title'])
else:
browser = Browser()
browser.connect('new-tab', self.__new_tab_cb)
browser.connect('open-pdf', self.__open_pdf_in_new_tab_cb)
browser.connect('web-view-ready', self.__web_view_ready_cb)
browser.connect('create-web-view', self.__create_web_view_cb)
self._append_tab(browser)
browser.set_history(tab_history)
def is_current_page_pdf(self):
index = self.get_current_page()
current_page = self.get_nth_page(index)
return isinstance(current_page, PDFTabPage)
Gtk.rc_parse_string('''
style "browse-tab-close" {
xthickness = 0
ythickness = 0
}
widget "*browse-tab-close" style "browse-tab-close"''')
class TabPage(Gtk.ScrolledWindow):
__gtype_name__ = 'BrowseTabPage'
def __init__(self, browser):
GObject.GObject.__init__(self)
self._browser = browser
self.add(browser)
browser.show()
def _get_browser(self):
return self._browser
browser = GObject.property(type=object,
getter=_get_browser)
class TabLabel(Gtk.HBox):
__gtype_name__ = 'BrowseTabLabel'
__gsignals__ = {
'tab-close': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
}
def __init__(self, browser):
GObject.GObject.__init__(self)
browser.connect('notify::title', self.__title_changed_cb)
browser.connect('notify::load-status', self.__load_status_changed_cb)
self._title = _('Untitled')
self._label = Gtk.Label(label=self._title)
self._label.set_ellipsize(Pango.EllipsizeMode.END)
self._label.set_alignment(0, 0.5)
self.pack_start(self._label, True, True, 0)
self._label.show()
close_tab_icon = Icon(icon_name='browse-close-tab')
button = Gtk.Button()
button.props.relief = Gtk.ReliefStyle.NONE
button.props.focus_on_click = False
icon_box = Gtk.HBox()
icon_box.pack_start(close_tab_icon, True, False, 0)
button.add(icon_box)
button.connect('clicked', self.__button_clicked_cb)
button.set_name('browse-tab-close')
self.pack_start(button, False, True, 0)
close_tab_icon.show()
icon_box.show()
button.show()
self._close_button = button
def update_size(self, size):
self.set_size_request(size, -1)
def hide_close_button(self):
self._close_button.hide()
def show_close_button(self):
self._close_button.show()
def __button_clicked_cb(self, button):
self.emit('tab-close')
def __title_changed_cb(self, widget, param):
title = widget.props.title
if not title:
title = os.path.basename(widget.props.uri)
self._label.set_text(title)
self._title = title
def __load_status_changed_cb(self, widget, param):
status = widget.get_load_status()
if status == WebKit.LoadStatus.FAILED:
self._label.set_text(self._title)
elif WebKit.LoadStatus.PROVISIONAL <= status \
< WebKit.LoadStatus.FINISHED:
self._label.set_text(_('Loading...'))
elif status == WebKit.LoadStatus.FINISHED:
if widget.props.title is None:
self._label.set_text(_('Untitled'))
self._title = _('Untitled')
class Browser(WebKit.WebView):
__gtype_name__ = 'Browser'
__gsignals__ = {
'new-tab': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
'open-pdf': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
'security-status-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
}
CURRENT_SUGAR_VERSION = get_sugar_version()
SECURITY_STATUS_SECURE = 1
SECURITY_STATUS_INSECURE = 2
def __init__(self):
WebKit.WebView.__init__(self)
web_settings = self.get_settings()
# Add SugarLabs user agent:
identifier = ' SugarLabs/' + self.CURRENT_SUGAR_VERSION
web_settings.props.user_agent += identifier
# Change font size based in the GtkSettings font size. The
# gtk-font-name property is a string with format '[font name]
# [font size]' like 'Sans Serif 10'.
gtk_settings = Gtk.Settings.get_default()
gtk_font_name = gtk_settings.get_property('gtk-font-name')
gtk_font_size = float(gtk_font_name.split()[-1])
web_settings.props.default_font_size = gtk_font_size * 1.2
web_settings.props.default_monospace_font_size = \
gtk_font_size * 1.2 - 2
self.set_settings(web_settings)
# Scale text and graphics:
self.set_full_content_zoom(True)
# This property is used to set the title immediatly the user
# presses Enter on the URL Entry
self.loading_uri = None
self.security_status = None
# Reference to the global history and callbacks to handle it:
self._global_history = globalhistory.get_global_history()
self.connect('notify::load-status', self.__load_status_changed_cb)
self.connect('notify::title', self.__title_changed_cb)
self.connect('download-requested', self.__download_requested_cb)
self.connect('mime-type-policy-decision-requested',
self.__mime_type_policy_cb)
self.connect('load-error', self.__load_error_cb)
self._inject_media_style = False
ContentInvoker(self)
try:
self.connect('run-file-chooser', self.__run_file_chooser)
except TypeError:
# Only present in WebKit1 > 1.9.3 and WebKit2
pass
def get_history(self):
"""Return the browsing history of this browser."""
back_forward_list = self.get_back_forward_list()
items_list = self._items_history_as_list(back_forward_list)
# If this is an empty tab, return an empty history:
if len(items_list) == 1 and items_list[0] is None:
return []
history = []
for item in items_list:
history.append({'url': item.get_uri(),
'title': item.get_title()})
return history
def set_history(self, history):
"""Restore the browsing history for this browser."""
back_forward_list = self.get_back_forward_list()
back_forward_list.clear()
for entry in history:
uri, title = entry['url'], entry['title']
history_item = WebKit.WebHistoryItem.new_with_data(uri, title)
back_forward_list.add_item(history_item)
def get_history_index(self):
"""Return the index of the current item in the history."""
back_forward_list = self.get_back_forward_list()
history_list = self._items_history_as_list(back_forward_list)
current_item = back_forward_list.get_current_item()
return history_list.index(current_item)
def set_history_index(self, index):
"""Go to the item in the history specified by the index."""
back_forward_list = self.get_back_forward_list()
current_item = index - back_forward_list.get_back_length()
item = back_forward_list.get_nth_item(current_item)
if item is not None:
self.go_to_back_forward_item(item)
def _items_history_as_list(self, history):
"""Return a list with the items of a WebKit.WebBackForwardList."""
back_items = []
for n in reversed(range(1, history.get_back_length() + 1)):
item = history.get_nth_item(n * -1)
back_items.append(item)
current_item = [history.get_current_item()]
forward_items = []
for n in range(1, history.get_forward_length() + 1):
item = history.get_nth_item(n)
forward_items.append(item)
all_items = back_items + current_item + forward_items
return all_items
def get_source(self, async_cb, async_err_cb):
data_source = self.get_main_frame().get_data_source()
data = data_source.get_data()
if data_source.is_loading() or data is None:
async_err_cb()
temp_path = os.path.join(activity.get_activity_root(), 'instance')
file_path = os.path.join(temp_path, '%i' % time.time())
file_handle = file(file_path, 'w')
file_handle.write(data.str)
file_handle.close()
async_cb(file_path)
def open_new_tab(self, url):
self.emit('new-tab', url)
def __run_file_chooser(self, browser, request):
picker = FilePicker(self)
chosen = picker.run()
picker.destroy()
if chosen:
request.select_files([chosen])
elif hasattr(request, 'cancel'):
# WebKit2 only
request.cancel()
return True
def __load_status_changed_cb(self, widget, param):
status = widget.get_load_status()
if status <= WebKit.LoadStatus.COMMITTED:
# Add the url to the global history or update it.
uri = self.get_uri()
self._global_history.add_page(uri)
if status == WebKit.LoadStatus.COMMITTED:
# Update the security status.
response = widget.get_main_frame().get_network_response()
message = response.get_message()
if message:
use_https, certificate, tls_errors = message.get_https_status()
if use_https:
if tls_errors == 0:
self.security_status = self.SECURITY_STATUS_SECURE
else:
self.security_status = self.SECURITY_STATUS_INSECURE
else:
self.security_status = None
self.emit('security-status-changed')
def __title_changed_cb(self, widget, param):
"""Update title in global history."""
uri = self.get_uri()
if self.props.title is not None:
title = self.props.title
if not isinstance(title, unicode):
title = unicode(title, 'utf-8')
self._global_history.set_page_title(uri, title)
def __mime_type_policy_cb(self, webview, frame, request, mimetype,
policy_decision):
"""Handle downloads and PDF files."""
if mimetype == 'application/pdf':
self.emit('open-pdf', request.get_uri())
policy_decision.ignore()
return True
elif mimetype == 'audio/x-vorbis+ogg' or mimetype == 'audio/mpeg':
self._inject_media_style = True
elif not self.can_show_mime_type(mimetype):
policy_decision.download()
return True
return False
def __download_requested_cb(self, browser, download):
downloadmanager.add_download(download, browser)
return True
def __load_error_cb(self, web_view, web_frame, uri, web_error):
"""Show Sugar's error page"""
# Don't show error page if the load was interrupted by policy
# change or the request is going to be handled by a
# plugin. For example, if a file was requested for download or
# an .ogg file is going to be played.
if web_error.code in (
WebKit.PolicyError.FRAME_LOAD_INTERRUPTED_BY_POLICY_CHANGE,
WebKit.PluginError.WILL_HANDLE_LOAD):
if self._inject_media_style:
css_style_file = open(os.path.join(activity.get_bundle_path(),
"data/media-controls.css"))
css_style = css_style_file.read().replace('\n', '')
inject_style_script = \
"var style = document.createElement('style');" \
"style.innerHTML = '%s';" \
"document.body.appendChild(style);" % css_style
web_view.execute_script(inject_style_script)
return True
data = {
'page_title': _('This web page could not be loaded'),
'title': _('This web page could not be loaded'),
'message': _('"%s" could not be loaded. Please check for '
'typing errors, and make sure you are connected '
'to the Internet.') % uri,
'btn_value': _('Try again'),
'url': uri,
}
html = open(DEFAULT_ERROR_PAGE, 'r').read() % data
web_frame.load_alternate_string(html, uri, uri)
return True
class PopupDialog(Gtk.Window):
def __init__(self):
GObject.GObject.__init__(self)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
border = style.GRID_CELL_SIZE
self.set_default_size(Gdk.Screen.width() - border * 2,
Gdk.Screen.height() - border * 2)
self.view = WebKit.WebView()
self.view.connect('notify::visibility', self.__notify_visibility_cb)
self.add(self.view)
self.view.realize()
def __notify_visibility_cb(self, web_view, pspec):
if self.view.props.visibility:
self.view.show()
self.show()
|
gpl-2.0
|
wdzhou/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateAdjustmentWorkspaces.py
|
2
|
13020
|
# pylint: disable=invalid-name
""" SANSCreateAdjustmentWorkspaces algorithm creates workspaces for pixel adjustment
, wavelength adjustment and pixel-and-wavelength adjustment workspaces.
"""
from __future__ import (absolute_import, division, print_function)
from mantid.kernel import (Direction, PropertyManagerProperty, StringListValidator, CompositeValidator)
from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
WorkspaceUnitValidator)
from sans.common.constants import EMPTY_NAME
from sans.common.enums import (DataType, DetectorType)
from sans.common.general_functions import create_unmanaged_algorithm
from sans.state.state_base import create_deserialized_sans_state_from_property_manager
class SANSCreateAdjustmentWorkspaces(DataProcessorAlgorithm):
def category(self):
return 'SANS\\Adjust'
def summary(self):
return 'Calculates wavelength adjustment, pixel adjustment workspaces and wavelength-and-pixel ' \
'adjustment workspaces.'
def PyInit(self):
# ---------------
# INPUT
# ---------------
# State
self.declareProperty(PropertyManagerProperty('SANSState'),
doc='A property manager which fulfills the SANSState contract.')
# Input workspaces
self.declareProperty(MatrixWorkspaceProperty('TransmissionWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The transmission workspace.')
self.declareProperty(MatrixWorkspaceProperty('DirectWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The direct workspace.')
self.declareProperty(MatrixWorkspaceProperty('MonitorWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The scatter monitor workspace. This workspace only contains monitors.')
workspace_validator = CompositeValidator()
workspace_validator.add(WorkspaceUnitValidator("Wavelength"))
self.declareProperty(MatrixWorkspaceProperty('SampleData', '',
optional=PropertyMode.Optional, direction=Direction.Input,
validator=workspace_validator),
doc='A workspace cropped to the detector to be reduced (the SAME as the input to Q1D). '
'This used to verify the solid angle. The workspace is not modified, just inspected.')
# The component
allowed_detector_types = StringListValidator([DetectorType.to_string(DetectorType.HAB),
DetectorType.to_string(DetectorType.LAB)])
self.declareProperty("Component", DetectorType.to_string(DetectorType.LAB),
validator=allowed_detector_types, direction=Direction.Input,
doc="The component of the instrument which is currently being investigated.")
# The data type
allowed_data = StringListValidator([DataType.to_string(DataType.Sample),
DataType.to_string(DataType.Can)])
self.declareProperty("DataType", DataType.to_string(DataType.Sample),
validator=allowed_data, direction=Direction.Input,
doc="The component of the instrument which is to be reduced.")
# Slice factor for monitor
self.declareProperty('SliceEventFactor', 1.0, direction=Direction.Input, doc='The slice factor for the monitor '
'normalization. This factor is the'
' one obtained from event '
'slicing.')
# ---------------
# Output
# ---------------
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceWavelengthAdjustment', '',
direction=Direction.Output),
doc='The workspace for wavelength-based adjustments.')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspacePixelAdjustment', '',
direction=Direction.Output),
doc='The workspace for wavelength-based adjustments.')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceWavelengthAndPixelAdjustment', '',
direction=Direction.Output),
doc='The workspace for, both, wavelength- and pixel-based adjustments.')
def PyExec(self):
# Read the state
state_property_manager = self.getProperty("SANSState").value
state = create_deserialized_sans_state_from_property_manager(state_property_manager)
# --------------------------------------
# Get the monitor normalization workspace
# --------------------------------------
monitor_normalization_workspace = self._get_monitor_normalization_workspace(state)
# --------------------------------------
# Get the calculated transmission
# --------------------------------------
calculated_transmission_workspace, unfitted_transmission_workspace =\
self._get_calculated_transmission_workspace(state)
# --------------------------------------
# Get the wide angle correction workspace
# --------------------------------------
wave_length_and_pixel_adjustment_workspace = self._get_wide_angle_correction_workspace(state,
calculated_transmission_workspace) # noqa
# --------------------------------------------
# Get the full wavelength and pixel adjustment
# --------------------------------------------
wave_length_adjustment_workspace, \
pixel_length_adjustment_workspace = self._get_wavelength_and_pixel_adjustment_workspaces(state,
monitor_normalization_workspace, # noqa
calculated_transmission_workspace) # noqa
if wave_length_adjustment_workspace:
self.setProperty("OutputWorkspaceWavelengthAdjustment", wave_length_adjustment_workspace)
if pixel_length_adjustment_workspace:
self.setProperty("OutputWorkspacePixelAdjustment", pixel_length_adjustment_workspace)
if wave_length_and_pixel_adjustment_workspace:
self.setProperty("OutputWorkspaceWavelengthAndPixelAdjustment", wave_length_and_pixel_adjustment_workspace)
# TODO: Nice to have: Provide diagnostic output workspaces which could be output either directly to the
# ADS or let it percolate up via SANSCreateAdjustmentWorkspaces->SANSReductionCore->
# SANSSingleReduction and then add it to the ADS
def _get_wavelength_and_pixel_adjustment_workspaces(self, state,
monitor_normalization_workspace,
calculated_transmission_workspace):
component = self.getProperty("Component").value
wave_pixel_adjustment_name = "SANSCreateWavelengthAndPixelAdjustment"
serialized_state = state.property_manager
wave_pixel_adjustment_options = {"SANSState": serialized_state,
"NormalizeToMonitorWorkspace": monitor_normalization_workspace,
"OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
"OutputWorkspacePixelAdjustment": EMPTY_NAME,
"Component": component}
if calculated_transmission_workspace:
wave_pixel_adjustment_options.update({"TransmissionWorkspace": calculated_transmission_workspace})
wave_pixel_adjustment_alg = create_unmanaged_algorithm(wave_pixel_adjustment_name,
**wave_pixel_adjustment_options)
wave_pixel_adjustment_alg.execute()
wavelength_out = wave_pixel_adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
pixel_out = wave_pixel_adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
return wavelength_out, pixel_out
def _get_monitor_normalization_workspace(self, state):
"""
Gets the monitor normalization workspace via the SANSNormalizeToMonitor algorithm
:param state: a SANSState object.
:return: the normalization workspace.
"""
monitor_workspace = self.getProperty("MonitorWorkspace").value
scale_factor = self.getProperty("SliceEventFactor").value
normalize_name = "SANSNormalizeToMonitor"
serialized_state = state.property_manager
normalize_option = {"InputWorkspace": monitor_workspace,
"OutputWorkspace": EMPTY_NAME,
"SANSState": serialized_state,
"ScaleFactor": scale_factor}
normalize_alg = create_unmanaged_algorithm(normalize_name, **normalize_option)
normalize_alg.execute()
ws = normalize_alg.getProperty("OutputWorkspace").value
return ws
def _get_calculated_transmission_workspace(self, state):
"""
Creates the fitted transmission workspace.
Note that this step is not mandatory. If no transmission and direct workspaces are provided, then we
don't have to do anything here.
:param state: a SANSState object.
:return: a fitted transmission workspace and the unfitted data.
"""
transmission_workspace = self.getProperty("TransmissionWorkspace").value
direct_workspace = self.getProperty("DirectWorkspace").value
if transmission_workspace and direct_workspace:
data_type = self.getProperty("DataType").value
transmission_name = "SANSCalculateTransmission"
serialized_state = state.property_manager
transmission_options = {"TransmissionWorkspace": transmission_workspace,
"DirectWorkspace": direct_workspace,
"SANSState": serialized_state,
"DataType": data_type,
"OutputWorkspace": EMPTY_NAME,
"UnfittedData": EMPTY_NAME}
transmission_alg = create_unmanaged_algorithm(transmission_name, **transmission_options)
transmission_alg.execute()
fitted_data = transmission_alg.getProperty("OutputWorkspace").value
unfitted_data = transmission_alg.getProperty("UnfittedData").value
else:
fitted_data = None
unfitted_data = None
return fitted_data, unfitted_data
def _get_wide_angle_correction_workspace(self, state, calculated_transmission_workspace):
wide_angle_correction = state.adjustment.wide_angle_correction
sample_data = self.getProperty("SampleData").value
workspace = None
if wide_angle_correction and sample_data and calculated_transmission_workspace:
wide_angle_name = "SANSWideAngleCorrection"
wide_angle_options = {"SampleData": sample_data,
"TransmissionData": calculated_transmission_workspace,
"OutputWorkspace": EMPTY_NAME}
wide_angle_alg = create_unmanaged_algorithm(wide_angle_name, **wide_angle_options)
wide_angle_alg.execute()
workspace = wide_angle_alg.getProperty("OutputWorkspace").value
return workspace
def validateInputs(self):
errors = dict()
# Check that the input can be converted into the right state object
state_property_manager = self.getProperty("SANSState").value
try:
state = create_deserialized_sans_state_from_property_manager(state_property_manager)
state.property_manager = state_property_manager
state.validate()
except ValueError as err:
errors.update({"SANSCreateAdjustmentWorkspaces": str(err)})
return errors
# Register algorithm with Mantid
AlgorithmFactory.subscribe(SANSCreateAdjustmentWorkspaces)
|
gpl-3.0
|
AsimmHirani/ISpyPi
|
tensorflow/contrib/tensorflow-master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
|
6
|
30329
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops.rnn_cell_impl import _RNNCell as RNNCell
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
with vs.variable_scope(scope or "basic_rnn_cell"):
output = self._activation(
_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or "gru_cell"):
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(
value=_linear(
[inputs, state], 2 * self._num_units, True, 1.0),
num_or_size_splits=2,
axis=1)
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if not c.dtype == h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=True, activation=tanh):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or "basic_lstm_cell"):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "lstm_cell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or "lstm_cell",
initializer=self._initializer) as unit_scope:
if self._num_unit_shards is not None:
unit_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_unit_shards))
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
if self._num_unit_shards is not None:
projection_scope.set_partitioner(None)
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
if self._num_proj_shards is not None:
proj_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_proj_shards))
m = _linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or "output_projection_wrapper"):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or "input_projection_wrapper"):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
"""
self._cell = cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
self._cell = cell
self._device = device
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or "embedding_wrapper"): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or "multi_rnn_cell"):
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
"weights", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = vs.get_variable(
"biases", [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return nn_ops.bias_add(res, biases)
|
apache-2.0
|
amiguez/youtube-dl
|
youtube_dl/extractor/naver.py
|
91
|
3756
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
clean_html,
)
class NaverIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P<id>\d+)'
_TESTS = [{
'url': 'http://tvcast.naver.com/v/81652',
'info_dict': {
'id': '81652',
'ext': 'mp4',
'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
'upload_date': '20130903',
},
}, {
'url': 'http://tvcast.naver.com/v/395837',
'md5': '638ed4c12012c458fefcddfd01f173cd',
'info_dict': {
'id': '395837',
'ext': 'mp4',
'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
'upload_date': '20150519',
},
'skip': 'Georestricted',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
m_error = re.search(
r'(?s)<div class="(?:nation_error|nation_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
query_urls = compat_urllib_parse.urlencode({
'masterVid': vid,
'protocol': 'p2p',
'inKey': key,
})
info = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query,
video_id, 'Downloading video info')
urls = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls,
video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('EncodingOptions/EncodingOption'):
domain = format_el.find('Domain').text
uri = format_el.find('uri').text
f = {
'url': compat_urlparse.urljoin(domain, uri),
'ext': 'mp4',
'width': int(format_el.find('width').text),
'height': int(format_el.find('height').text),
}
if domain.startswith('rtmp'):
# urlparse does not support custom schemes
# https://bugs.python.org/issue18828
f.update({
'url': domain + uri,
'ext': 'flv',
'rtmp_protocol': '1', # rtmpt
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('Subject').text,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'upload_date': info.find('WriteDate').text.replace('.', ''),
'view_count': int(info.find('PlayCount').text),
}
|
unlicense
|
mgit-at/ansible
|
lib/ansible/modules/network/aos/_aos_template.py
|
27
|
8372
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_template
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Template
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Template module let you manage your Template easily. You can create
create and delete Template by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Template to manage.
Only one of I(name), I(id) or I(src) can be set.
id:
description:
- AOS Id of the Template to manage (can't be used to create a new Template),
Only one of I(name), I(id) or I(src) can be set.
content:
description:
- Datastructure of the Template to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Template (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Check if an Template exist by name"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: present
- name: "Check if an Template exist by ID"
aos_template:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: present
- name: "Delete an Template by name"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: absent
- name: "Delete an Template by id"
aos_template:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
- name: "Access Template 1/3"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: present
register: template
- name: "Save Template into a JSON file 2/3"
copy:
content: "{{ template.value | to_nice_json }}"
dest: template_saved.json
- name: "Save Template into a YAML file 2/3"
copy:
content: "{{ template.value | to_nice_yaml }}"
dest: template_saved.yaml
- name: "Load Template from File (Json)"
aos_template:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/template_saved.json') }}"
state: present
- name: "Load Template from File (yaml)"
aos_template:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/template_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Template
returned: always
type: str
sample: My-Template
id:
description: AOS unique ID assigned to the Template
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import time
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def template_absent(module, aos, my_template):
margs = module.params
# If the module do not exist, return directly
if my_template.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Template
if not module.check_mode:
try:
# need to way 1sec before delete to workaround a current limitation in AOS
time.sleep(1)
my_template.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the Template")
module.exit_json(changed=True,
name=my_template.name,
id=my_template.id,
value={})
def template_present(module, aos, my_template):
margs = module.params
# if content is defined, create object from Content
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.DesignTemplates, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if template doesn't exist already, create a new one
if my_template.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
# if module already exist, just return it
module.exit_json(changed=False,
name=my_template.name,
id=my_template.id,
value=my_template.value)
#########################################################
# Main Function
#########################################################
def aos_template(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
# ----------------------------------------------------
# Find Object if available based on ID or Name
# ----------------------------------------------------
try:
my_template = find_collection_item(aos.DesignTemplates,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
# ----------------------------------------------------
# Proceed based on State value
# ----------------------------------------------------
if margs['state'] == 'absent':
template_absent(module, aos, my_template)
elif margs['state'] == 'present':
template_present(module, aos, my_template)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
content=dict(required=False, type="json"),
state=dict(required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_template(module)
if __name__ == "__main__":
main()
|
gpl-3.0
|
dydek/django
|
tests/utils_tests/test_termcolors.py
|
337
|
6461
|
import unittest
from django.utils.termcolors import (
DARK_PALETTE, DEFAULT_PALETTE, LIGHT_PALETTE, NOCOLOR_PALETTE, PALETTES,
colorize, parse_color_setting,
)
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertEqual(parse_color_setting('nocolor'), None)
def test_fg(self):
self.assertEqual(parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_fg_bg(self):
self.assertEqual(parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
def test_fg_opts(self):
self.assertEqual(parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink', 'bold')}))
def test_fg_bg_opts(self):
self.assertEqual(parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink', 'bold')}))
def test_override_palette(self):
self.assertEqual(parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}))
def test_override_nocolor(self):
self.assertEqual(parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'}))
def test_override_with_multiple_roles(self):
self.assertEqual(parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'}))
def test_empty_definition(self):
self.assertEqual(parse_color_setting(';'), None)
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting(';;;'), None)
def test_empty_options(self):
self.assertEqual(parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_bad_palette(self):
self.assertEqual(parse_color_setting('unknown'), None)
def test_bad_role(self):
self.assertEqual(parse_color_setting('unknown='), None)
self.assertEqual(parse_color_setting('unknown=green'), None)
self.assertEqual(parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
def test_bad_color(self):
self.assertEqual(parse_color_setting('error='), None)
self.assertEqual(parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
self.assertEqual(parse_color_setting('error=unknown'), None)
self.assertEqual(parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)}))
def test_bad_option(self):
self.assertEqual(parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_role_case(self):
self.assertEqual(parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_color_case(self):
self.assertEqual(parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
def test_opts_case(self):
self.assertEqual(parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_colorize_empty_text(self):
self.assertEqual(colorize(text=None), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=''), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=None, opts=('noreset')), '\x1b[m')
self.assertEqual(colorize(text='', opts=('noreset')), '\x1b[m')
|
bsd-3-clause
|
gperciva/artifastring
|
research/make-constants/cutoff.py
|
1
|
4535
|
#!/usr/bin/env python
CUTOFF_DB_FINAL_AUDIO = 10
#STRINGS = [
# (0,0), (0,1), (0,2), (0,3), (0,4),
# (1,0), (1,1),
# (2,0), (2,1), (2,2), (2,3)
# ]
INST_FORCES = [
[1.5,1.0,1.0,0.5],
[1.75,1.0,1.0,0.5],
[2.0,0.9,0.8,0.6],
]
import sys
sys.path.append('../../build/swig')
sys.path.append('../../build/.libs')
sys.path.append('../shared')
import math
import numpy
import dsp
import artifastring_instrument
#import monowav
import midi_pos
import scipy
import pylab
HOPSIZE = artifastring_instrument.NORMAL_BUFFER_SIZE
FORCE_SIZE = HOPSIZE / artifastring_instrument.HAPTIC_DOWNSAMPLE_FACTOR
def pluck_force(violin, st, force, finger, plot=False, write=False):
#violin = artifastring_instrument.ArtifastringInstrument(inst, instnum)
#wavfile = monowav.MonoWav("artifastring-test.wav")
violin.reset()
violin.finger(st, finger)
violin.pluck(st, 0.2, force)
def hop():
buf = numpy.empty(HOPSIZE, dtype=numpy.int16)
forces = numpy.empty(FORCE_SIZE, dtype=numpy.int16)
violin.wait_samples_forces_python(buf, forces)
string_array = numpy.zeros(4*HOPSIZE, dtype=numpy.float32)
string_array_size = violin.get_string_buffer(st, string_array)
string_array = string_array[:string_array_size]
#pylab.plot(string_array)
#pylab.show()
buf_rms = numpy.sqrt(numpy.mean(numpy.array(buf,
numpy.float64)**2))
sa_rms = numpy.sqrt(numpy.mean(numpy.array(string_array,
dtype=numpy.float64)**2))
buf_ss = numpy.sum(numpy.array(buf,
numpy.float64)**2)
sa_ss = numpy.sum(numpy.array(string_array,
dtype=numpy.float64)**2)
return buf_rms, sa_rms, buf_ss, sa_ss
dh = float(HOPSIZE) / artifastring_instrument.ARTIFASTRING_INSTRUMENT_SAMPLE_RATE
BUFS = 1000
dhs = numpy.arange(0, BUFS) * dh
buf = numpy.zeros(BUFS)
sa = numpy.zeros(BUFS)
buf_sss = numpy.zeros(BUFS)
sa_sss = numpy.zeros(BUFS)
for i in range(BUFS):
buf_this, string_array_this, buf_ss, sa_ss = hop()
buf[i] = buf_this
sa[i] = string_array_this
buf_sss[i] = buf_ss
sa_sss[i] = sa_ss
buf_db = dsp.amplitude2db(buf)
sa_db = dsp.amplitude2db(sa)
cutoff_hop = 0
for i in range(BUFS):
if buf_db[i] < CUTOFF_DB_FINAL_AUDIO:
cutoff_hop = i
break
print "cutoff time:", cutoff_hop*dh
cutoff_internal_audio = sa_sss[cutoff_hop]
cutoff_internal_audio_db = sa_db[cutoff_hop]
#print "Cutoff internal audio:", cutoff_internal_audio
if write:
numpy.savetxt("instrument-%i-%.3f-db.txt" % (
st,finger),
numpy.vstack( (
dhs, buf_db
)).transpose())
numpy.savetxt("string-%i-%.3f-db.txt" % (
st,finger),
numpy.vstack( (
dhs, sa_db
)).transpose())
numpy.savetxt("cutoff-%i-%.3f.txt" % (
st,finger),
numpy.array([
[0,
cutoff_internal_audio_db],
[BUFS*dh,
cutoff_internal_audio_db]
])
)
if plot:
pylab.subplot(211)
pylab.title("Final audio")
pylab.plot(dhs, buf_db)
pylab.axhline(CUTOFF_DB_FINAL_AUDIO)
pylab.subplot(212)
pylab.title("String audio")
pylab.plot(dhs, sa_db, '.-')
pylab.axhline(cutoff_internal_audio_db)
pylab.show()
return cutoff_internal_audio
def do_string(violin, insttype, st, plot=False, write=False):
force = INST_FORCES[insttype][st]
MIDI = numpy.arange(0, 13)
POS = [ midi_pos.midi2pos(float(m)) for m in MIDI]
vals = []
#dbs = []
for finger in POS:
val = pluck_force(violin=violin,
st=st, force=force, finger=finger,
plot=plot, write=write,
)
vals.append(val)
#dbs.append(db)
val = min(vals)
#db = max(dbs)
return val
#for inst, instnum in STRINGS:
# forces = INST_FORCES[inst]
# for st, force in enumerate(forces):
# val, db = do_string(inst=inst, instnum=instnum,
# st=st, force=force,
# #plot=True
# )
# print inst, instnum, st, val
if __name__ == "__main__":
violin = artifastring_instrument.ArtifastringInstrument(0, 0)
do_string(violin, 0, 3, plot=False, write=True)
|
gpl-3.0
|
ovnicraft/odoo
|
addons/edi/models/edi.py
|
277
|
31944
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'Odoo' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._log_access:
record_log = record.get_metadata()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
class edi(osv.AbstractModel):
_name = 'edi.edi'
_description = 'EDI Subsystem'
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model = record._model
edi_list += record_model.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application.'),
_("The document you are trying to import requires the Odoo `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported.' % (model,module)
model_obj = self.pool[model]
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi must not be provided if edi_url is given.'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi):
model_name = edi.get('__imported_model') or edi.get('__model') or self._name
for attribute in attributes:
assert edi.get(attribute),\
'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any Odoo instance, without conflict.
For Odoo records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s.',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'Odoo', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing EDI documents containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field_name in fields_to_export:
field = self._fields[field_name]
value = getattr(record, field_name)
if not value and value not in ('', 0):
continue
elif field.type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif field.type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif field.type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field_name, {}), context=context)
edi_dict[field_name] = value
results.append(edi_dict)
return results
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool[model_name]
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
self.pool.get('ir.attachment').create(cr, uid,
{
'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id,
'type': 'binary'
},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content.'
assert attachment.get('name'), 'Incorrect/Missing attachment name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool[model]
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
if model.exists(cr, uid, [data.res_id]):
return model.browse(cr, uid, data.res_id, context=context)
# stale external-id, cleanup to allow re-import, as the corresponding record is gone
ir_model_data.unlink(cr, 1, [data_ids[0]])
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
:param str value: display name of the record to import
:param str external_id: fully-qualified external ID of the record
:return: database id of newly-imported or pre-existing record
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool[model]
res_id, _ = model.name_create(cr, uid, value, context=context)
target = model.browse(cr, uid, res_id, context=context)
else:
_logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
self._name, external_id, value, target.id)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi, context=None):
"""Imports a dict representing an EDI document into the system.
:param dict edi: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi.get('__import_model') or \
('__import_model' not in edi and self._name == edi.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
(edi.get('__model'), self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field = self._fields.get(field_name)
if not field:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
continue
# skip function/related fields
if not field.store:
_logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
continue
relation_model = field.comodel_name
if field.type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field.type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field.type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._fields[o2m_field]
dest_model = self.pool[field.comodel_name]
dest_field = field.inverse_name
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[dest_field] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tschneidereit/servo
|
tests/wpt/css-tests/css21_dev/html4/support/fonts/makegsubfonts.py
|
820
|
14309
|
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
mpl-2.0
|
moylop260/odoo-dev
|
addons/mail/tests/test_message_read.py
|
44
|
14606
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
class test_mail_access_rights(TestMail):
def test_00_message_read(self):
""" Tests for message_read and expandables. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
self.mail_group.message_subscribe_users(cr, uid, [group_pigs.id], [user_raoul.id])
pigs_domain = [('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)]
# Data: create a discussion in Pigs (3 threads, with respectively 0, 4 and 4 answers)
msg_id0 = self.group_pigs.message_post(body='0', subtype='mt_comment')
msg_id1 = self.group_pigs.message_post(body='1', subtype='mt_comment')
msg_id2 = self.group_pigs.message_post(body='2', subtype='mt_comment')
msg_id3 = self.group_pigs.message_post(body='1-1', subtype='mt_comment', parent_id=msg_id1)
msg_id4 = self.group_pigs.message_post(body='2-1', subtype='mt_comment', parent_id=msg_id2)
msg_id5 = self.group_pigs.message_post(body='1-2', subtype='mt_comment', parent_id=msg_id1)
msg_id6 = self.group_pigs.message_post(body='2-2', subtype='mt_comment', parent_id=msg_id2)
msg_id7 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id8 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_id9 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id10 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_ids = [msg_id10, msg_id9, msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0]
ordered_msg_ids = [msg_id2, msg_id4, msg_id6, msg_id8, msg_id10, msg_id1, msg_id3, msg_id5, msg_id7, msg_id9, msg_id0]
# Test: raoul received notifications
raoul_notification_ids = self.mail_notification.search(cr, user_raoul.id, [('read', '=', False), ('message_id', 'in', msg_ids), ('partner_id', '=', user_raoul.partner_id.id)])
self.assertEqual(len(raoul_notification_ids), 11, 'message_post: wrong number of produced notifications')
# Test: read some specific ids
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, ids=msg_ids[2:4], domain=[('body', 'like', 'dummy')], context={'mail_read_set_read': True})
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids[2:4], read_msg_ids, 'message_read with direct ids should read only the requested ids')
# Test: read messages of Pigs through a domain, being thread or not threaded
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids, read_msg_ids, 'message_read flat with domain on Pigs should equal all messages of Pigs')
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(ordered_msg_ids, read_msg_ids,
'message_read threaded with domain on Pigs should equal all messages of Pigs, and sort them with newer thread first, last message last in thread')
# ----------------------------------------
# CASE1: message_read with domain, threaded
# We simulate an entire flow, using the expandables to test them
# ----------------------------------------
# Do: read last message, threaded
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# TDE TODO: test expandables order
type_list = map(lambda item: item.get('type'), read_msg_list)
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on last Pigs message should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id2, msg_id10]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in first thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on last Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id2), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id4), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id8), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id2, 'new messages expandable should have parent_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id2 (should be imposed by JS), 2 messages
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
new_msg_exp = [msg for msg in read_msg_list if msg.get('type') == 'expandable'][0]
# Test: structure content, 2 messages and 1 thread expandable
self.assertEqual(len(read_msg_list), 3, 'message_read in Pigs thread should return 2 messages and 1 expandables')
self.assertEqual(set([msg_id6, msg_id8]), set(read_msg_ids), 'message_read in Pigs thread should return 2 more previous messages in thread')
# Do: read the last message
read_msg_list = self.mail_message.message_read(cr, uid, domain=new_msg_exp.get('domain'), limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, 1 message
self.assertEqual(len(read_msg_list), 1, 'message_read in Pigs thread should return 1 message')
self.assertEqual(set([msg_id4]), set(read_msg_ids), 'message_read in Pigs thread should return the last message in thread')
# Do: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read on last Pigs message should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
self.assertFalse(new_threads_exp.get('parent_id'), 'new threads expandable should not have an parent_id')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on Pigs should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id1, msg_id9]), set(read_msg_ids), 'message_read on a Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in second thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id1), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id3), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id7), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id1, 'new messages expandable should have ancestor_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=200, thread_level=0, parent_id=msg_id1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: other message in thread have been fetch
self.assertEqual(set([msg_id3, msg_id5, msg_id7]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
# Test: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'general expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 1, 'message_read on Pigs should return 1 message because everything else has been fetched')
self.assertEqual([msg_id0], read_msg_ids, 'message_read after 2 More should return only 1 last message')
# ----------------------------------------
# CASE2: message_read with domain, flat
# ----------------------------------------
# Do: read 2 lasts message, flat
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=2, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is not set, 1 expandable
self.assertEqual(len(read_msg_list), 3, 'message_read on last Pigs message should return 2 messages and 1 expandable')
self.assertEqual(set([msg_id9, msg_id10]), set(read_msg_ids), 'message_read flat on Pigs last messages should only return those messages')
self.assertFalse(read_msg_list[0].get('parent_id'), 'message_read flat should set the ancestor as False')
self.assertFalse(read_msg_list[1].get('parent_id'), 'message_read flat should set the ancestor as False')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
# Do: fetch new messages, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read flat on the 2 last Pigs messages should have returns a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=0 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=20, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 9, 'message_read on Pigs should return 9 messages and 0 expandable')
self.assertEqual([msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0], read_msg_ids,
'message_read, More on flat, should return all remaning messages')
|
agpl-3.0
|
leoliujie/odoo
|
addons/auth_signup/controllers/main.py
|
144
|
6049
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import werkzeug
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.addons.web.controllers.main import ensure_db
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AuthSignupHome(openerp.addons.web.controllers.main.Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
except (SignupError, AssertionError), e:
qcontext['error'] = _(e.message)
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, "No login provided."
res_users = request.registry.get('res.users')
res_users.reset_password(request.cr, openerp.SUPERUSER_ID, login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception, e:
qcontext['error'] = _(e.message)
return request.render('auth_signup.reset_password', qcontext)
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
icp = request.registry.get('ir.config_parameter')
return {
'signup_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
res_partner = request.registry.get('res.partner')
token_infos = res_partner.signup_retrieve_info(request.cr, openerp.SUPERUSER_ID, qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = dict((key, qcontext.get(key)) for key in ('login', 'name', 'password'))
assert any([k for k in values.values()]), "The form was not properly filled in."
assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them."
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.registry['res.users'].signup(request.cr, openerp.SUPERUSER_ID, values, token)
request.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentification Failed.'))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
iheitlager/django-rest-framework
|
rest_framework/utils/model_meta.py
|
71
|
5904
|
"""
Helper function for returning the field information that is associated
with a model class. This includes returning all the forward and reverse
relationships and their associated metadata.
Usage: `get_field_info(model)` returns a `FieldInfo` instance.
"""
import inspect
from collections import namedtuple
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils import six
from rest_framework.compat import OrderedDict
FieldInfo = namedtuple('FieldResult', [
'pk', # Model field instance
'fields', # Dict of field name -> model field instance
'forward_relations', # Dict of field name -> RelationInfo
'reverse_relations', # Dict of field name -> RelationInfo
'fields_and_pk', # Shortcut for 'pk' + 'fields'
'relations' # Shortcut for 'forward_relations' + 'reverse_relations'
])
RelationInfo = namedtuple('RelationInfo', [
'model_field',
'related_model',
'to_many',
'has_through_model'
])
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = models.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def get_field_info(model):
"""
Given a model class, returns a `FieldInfo` instance, which is a
`namedtuple`, containing metadata about the various field types on the model
including information about their relationships.
"""
opts = model._meta.concrete_model._meta
pk = _get_pk(opts)
fields = _get_fields(opts)
forward_relations = _get_forward_relationships(opts)
reverse_relations = _get_reverse_relationships(opts)
fields_and_pk = _merge_fields_and_pk(pk, fields)
relationships = _merge_relationships(forward_relations, reverse_relations)
return FieldInfo(pk, fields, forward_relations, reverse_relations,
fields_and_pk, relationships)
def _get_pk(opts):
pk = opts.pk
while pk.rel and pk.rel.parent_link:
# If model is a child via multi-table inheritance, use parent's pk.
pk = pk.rel.to._meta.pk
return pk
def _get_fields(opts):
fields = OrderedDict()
for field in [field for field in opts.fields if field.serialize and not field.rel]:
fields[field.name] = field
return fields
def _get_forward_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
forward_relations = OrderedDict()
for field in [field for field in opts.fields if field.serialize and field.rel]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=False,
has_through_model=False
)
# Deal with forward many-to-many relationships.
for field in [field for field in opts.many_to_many if field.serialize]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=True,
has_through_model=(
not field.rel.through._meta.auto_created
)
)
return forward_relations
def _get_reverse_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
# Note that we have a hack here to handle internal API differences for
# this internal API across Django 1.7 -> Django 1.8.
# See: https://code.djangoproject.com/ticket/24208
reverse_relations = OrderedDict()
for relation in opts.get_all_related_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=relation.field.rel.multiple,
has_through_model=False
)
# Deal with reverse many-to-many relationships.
for relation in opts.get_all_related_many_to_many_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=True,
has_through_model=(
(getattr(relation.field.rel, 'through', None) is not None) and
not relation.field.rel.through._meta.auto_created
)
)
return reverse_relations
def _merge_fields_and_pk(pk, fields):
fields_and_pk = OrderedDict()
fields_and_pk['pk'] = pk
fields_and_pk[pk.name] = pk
fields_and_pk.update(fields)
return fields_and_pk
def _merge_relationships(forward_relations, reverse_relations):
return OrderedDict(
list(forward_relations.items()) +
list(reverse_relations.items())
)
def is_abstract_model(model):
"""
Given a model class, returns a boolean True if it is abstract and False if it is not.
"""
return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
|
bsd-2-clause
|
ujenmr/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_monitor_firepass.py
|
14
|
25665
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_firepass
short_description: Manages F5 BIG-IP GTM FirePass monitors
description:
- Manages F5 BIG-IP GTM FirePass monitors.
version_added: 2.6
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: /Common/firepass_gtm
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run.
- If this parameter is not provided when creating a new monitor, then
the default value will be 30.
- This value B(must) be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then
the default value will be 90.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
probe_timeout:
description:
- Specifies the number of seconds after which the system times out the probe request
to the system.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(5).
ignore_down_response:
description:
- Specifies that the monitor allows more than one probe attempt per interval.
- When C(yes), specifies that the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies that the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
target_username:
description:
- Specifies the user name, if the monitored target requires authentication.
target_password:
description:
- Specifies the password, if the monitored target requires authentication.
update_password:
description:
- C(always) will update passwords if the C(target_password) is specified.
- C(on_create) will only set the password for newly created monitors.
default: always
choices:
- always
- on_create
cipher_list:
description:
- Specifies the list of ciphers for this monitor.
- The items in the cipher list are separated with the colon C(:) symbol.
- When creating a new monitor, if this parameter is not specified, the default
list is C(HIGH:!ADH).
max_load_average:
description:
- Specifies the number that the monitor uses to mark the Secure Access Manager
system up or down.
- The system compares the Max Load Average setting against a one-minute average
of the Secure Access Manager system load.
- When the Secure Access Manager system-load average falls within the specified
Max Load Average, the monitor marks the Secure Access Manager system up.
- When the average exceeds the setting, the monitor marks the system down.
- When creating a new monitor, if this parameter is not specified, the default
is C(12).
concurrency_limit:
description:
- Specifies the maximum percentage of licensed connections currently in use under
which the monitor marks the Secure Access Manager system up.
- As an example, a setting of 95 percent means that the monitor marks the Secure
Access Manager system up until 95 percent of licensed connections are in use.
- When the number of in-use licensed connections exceeds 95 percent, the monitor
marks the Secure Access Manager system down.
- When creating a new monitor, if this parameter is not specified, the default is C(95).
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a GTM FirePass monitor
bigip_gtm_monitor_firepass:
name: my_monitor
ip: 1.1.1.1
port: 80
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove FirePass Monitor
bigip_gtm_monitor_firepass:
name: my_monitor
state: absent
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add FirePass monitor for all addresses, port 514
bigip_gtm_monitor_firepass:
name: my_monitor
port: 514
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: firepass_gtm
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
port:
description: The new port the monitor checks the resource on.
returned: changed
type: str
sample: 8080
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
probe_timeout:
description: The new timeout in which the system will timeout the monitor probe.
returned: changed
type: int
sample: 10
cipher_list:
description: The new value for the cipher list.
returned: changed
type: str
sample: +3DES:+kEDH
max_load_average:
description: The new value for the max load average.
returned: changed
type: int
sample: 12
concurrency_limit:
description: The new value for the concurrency limit.
returned: changed
type: int
sample: 95
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'probeTimeout': 'probe_timeout',
'username': 'target_username',
'password': 'target_password',
'cipherlist': 'cipher_list',
'concurrencyLimit': 'concurrency_limit',
'maxLoadAverage': 'max_load_average',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'probeTimeout',
'ignoreDownResponse',
'username',
'password',
'cipherlist',
'concurrencyLimit',
'maxLoadAverage',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'probe_timeout',
'ignore_down_response',
'cipher_list',
'max_load_average',
'concurrency_limit',
]
updatables = [
'destination',
'interval',
'timeout',
'probe_timeout',
'ignore_down_response',
'ip',
'port',
'target_username',
'target_password',
'cipher_list',
'max_load_average',
'concurrency_limit',
]
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
try:
return int(port)
except ValueError:
return port
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self): # lgtm [py/similar-function]
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def probe_timeout(self):
if self._values['probe_timeout'] is None:
return None
return int(self._values['probe_timeout'])
@property
def max_load_average(self):
if self._values['max_load_average'] is None:
return None
return int(self._values['max_load_average'])
@property
def concurrency_limit(self):
if self._values['concurrency_limit'] is None:
return None
return int(self._values['concurrency_limit'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
elif self._values['ignore_down_response'] is True:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def target_password(self):
if self.want.target_password != self.have.target_password:
if self.want.update_password == 'always':
result = self.want.target_password
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 90})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.probe_timeout is None:
self.want.update({'probe_timeout': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.cipher_list is None:
self.want.update({'cipher_list': 'HIGH:!ADH'})
if self.want.max_load_average is None:
self.want.update({'max_load_average': 12})
if self.want.concurrency_limit is None:
self.want.update({'concurrency_limit': 95})
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_default_creation_values()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/firepass/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/firepass/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/firepass/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/firepass/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/firepass/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/firepass_gtm'),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
probe_timeout=dict(type='int'),
target_username=dict(),
target_password=dict(no_log=True),
cipher_list=dict(),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
max_load_average=dict(type='int'),
concurrency_limit=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
gpl-3.0
|
vlajos/bitcoin
|
qa/rpc-tests/getblocktemplate_proposals.py
|
145
|
6328
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(BitcoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
|
mit
|
eusi/MissionPlanerHM
|
Lib/site-packages/scipy/misc/tests/test_pilutil.py
|
53
|
1878
|
import os.path
import numpy as np
from numpy.testing import assert_, assert_equal, \
dec, decorate_methods, TestCase, run_module_suite
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
import scipy.misc.pilutil as pilutil
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10,20))
for T in np.sctypes['float'] + [float]:
# 1.1 rounds to below 1.1 for float16, 1.101 works
im1 = pilutil.imresize(im,T(1.101))
assert_equal(im1.shape,(11,22))
def test_imresize2(self):
im = np.random.random((20,30))
im2 = pilutil.imresize(im, (30,40), interp='bicubic')
assert_equal(im2.shape, (30,40))
def test_imresize3(self):
im = np.random.random((15,30))
im2 = pilutil.imresize(im, (30,60), interp='nearest')
assert_equal(im2.shape, (30,60))
def test_bytescale(self):
x = np.array([0,1,2],np.uint8)
y = np.array([0,1,2])
assert_equal(pilutil.bytescale(x),x)
assert_equal(pilutil.bytescale(y),[0,127,255])
def tst_fromimage(filename, irange):
img = pilutil.fromimage(PIL.Image.open(filename))
imin,imax = irange
assert_(img.min() >= imin)
assert_(img.max() <= imax)
@_pilskip
def test_fromimage():
''' Test generator for parametric tests '''
data = {'icon.png':(0,255),
'icon_mono.png':(0,2),
'icon_mono_flat.png':(0,1)}
for fn, irange in data.iteritems():
yield tst_fromimage, os.path.join(datapath,'data',fn), irange
decorate_methods(TestPILUtil, _pilskip)
if __name__ == "__main__":
run_module_suite()
|
gpl-3.0
|
mlperf/training_results_v0.6
|
Google/benchmarks/mask/implementations/tpu-v3-128-mask/mask_rcnn/train_low_level_runner.py
|
3
|
13676
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training with low level API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import threading
import time
from six.moves import queue as Queue
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import graph_io
import runner_utils
_INITIAL_LOSS = 1e7
_STOP = -1
_MAX_NUM_CHECKPOINT_THREADS = 1
# for spatial partition
_NUM_CORES_TO_COMPUTATION_SHAPE = {
1: [1, 1, 1],
2: [1, 1, 2],
4: [1, 2, 2],
8: [2, 2, 2],
16: [4, 2, 2],
}
class TrainLowLevelRunner(object):
"""Run Train via direct session.run calls."""
def __init__(self, tpu_cluster_resolver, params, input_partition_dims=None,
tpu_job_name=None):
tf.logging.info("TrainLowLevelRunner: constructor")
self.tpu_cluster_resolver = tpu_cluster_resolver
self.params = params
self.tpu_job_name = tpu_job_name
self.model_dir = params["model_dir"]
self.iterations_per_loop = params["iterations_per_loop"]
self.num_shards = self.params["num_shards"]
self.input_flattener = runner_utils.InputsFlattener()
self.feature_structure = {}
self.train_compile_op = None
self.train_op = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.scaffold_fn = None
# Having two separate sessions and graphs to make the initialization faster.
self.input_sess = None
self.train_sess = None
self.input_graph = tf.Graph()
self.train_graph = None
self.session_config = tf.ConfigProto(
allow_soft_placement=True, isolate_session_state=True,
operation_timeout_in_ms=600 * 60 * 1000) # 10 hours
cluster_spec = self.tpu_cluster_resolver.cluster_spec()
if cluster_spec:
self.session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.tpu_init = tf.contrib.tpu.initialize_system()
self.tpu_shutdown = tf.contrib.tpu.shutdown_system()
self.init_sess = tf.Session(self.tpu_cluster_resolver.get_master(),
config=self.session_config)
self.device_topology = self.init_sess.run(self.tpu_init)
self.input_partition_dims = input_partition_dims
self.use_spatial_partition = input_partition_dims is not None
self.num_cores_per_replica = (
self.params["num_cores_per_replica"]
if self.params["num_cores_per_replica"] else 1)
if self.use_spatial_partition:
computation_shape = _NUM_CORES_TO_COMPUTATION_SHAPE[
self.num_cores_per_replica]
self.device_assignment = tpu_device_assignment.device_assignment(
topology=self.device_topology,
computation_shape=computation_shape,
num_replicas=self.num_shards)
tf.logging.info("num_cores_per_replica: %d", self.num_cores_per_replica)
tf.logging.info("computation_shape: %s", str(computation_shape))
tf.logging.info("num_replicas: %d", self.num_shards)
tf.logging.info("device_assignment.topology.device_coordinates: %s",
str(self.device_assignment.topology.device_coordinates))
tf.logging.info("device_assignment.core_assignment: %s",
str(self.device_assignment.core_assignment))
self.input_dims_flattener = runner_utils.InputDimsFlattener(
self.input_partition_dims)
else:
self.device_assignment = None
self.input_dims_flattener = None
self.queue = Queue.Queue()
# Summary writer writes out train metrics.
self.summary_writer = tf.summary.FileWriter(self.model_dir)
self.infeed_thread = None
def shutdown(self):
"""Shut down TrainLowLevelRunner."""
tf.logging.info("TrainLowLevelRunner: shutdown")
self.queue.put(_STOP)
if self.infeed_thread:
self.infeed_thread.join()
if self.input_sess:
self.input_sess.close()
if self.train_sess:
self.train_sess.close()
self.summary_writer.close()
def _get_host(self, host_id):
if self.tpu_cluster_resolver.get_master() in ("", "local"):
return "/replica:0/task:0"
job_name = (
self.tpu_job_name or self.tpu_cluster_resolver.get_job_name() or
"tpu_worker")
return "/job:%s/task:%d" % (job_name, host_id)
def build_enqueue_ops(self, input_fn, params, num_hosts, host_id):
"""Build enqueue ops."""
tf.logging.info("TrainLowLevelRunner: build_enqueue_ops for %d", host_id)
def get_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
# TODO(b/129084726): make dataset sharding also work for TPU Estimator.
params["dataset_num_shards"] = num_hosts
params["dataset_shard_id"] = host_id
with tf.device(runner_utils.device_for_host(self._get_host(host_id))):
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.params["replicas_per_worker"]):
with tf.control_dependencies(control_deps):
features, labels = iterator.get_next()
if self.use_spatial_partition:
self.input_dims_flattener.validate_and_flatten_input_dims(
features, labels)
flattened_inputs = self.input_flattener.flatten_features_and_labels(
features, labels)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if self.use_spatial_partition:
flattened_input_dims = (
self.input_dims_flattener.flattened_input_dims)
# pylint: disable=protected-access
infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=flattened_input_dims,
device_assignment=self.device_assignment)
self.infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(per_host_sharded_inputs)
infeed = tf.contrib.tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=functools.partial(
runner_utils.tpu_ordinal_fn,
replicas_per_worker=self.params["replicas_per_worker"]))
return enqueue_ops_fn
with self.input_graph.as_default():
self.enqueue_ops.append(
runner_utils.wrap_computation_in_while_loop(
get_enqueue_ops_fn(host_id),
n=self.iterations_per_loop,
host_name=self._get_host(host_id)))
def initialize(self, model_fn, input_fn):
"""Build graph and do initialization for training."""
tf.logging.info("TrainLowLevelRunner: initialize method")
num_hosts = (
self.num_shards * self.num_cores_per_replica //
self.params["cores_per_worker"])
for i in range(num_hosts):
self.build_enqueue_ops(input_fn, self.params, num_hosts, i)
def infeed_thread_fn():
"""Build and infeed session.run calls in a background thread."""
self.input_sess = tf.Session(
self.tpu_cluster_resolver.get_master(),
graph=self.input_graph,
config=self.session_config)
# Initialize dataset variables
self.input_sess.run(self.dataset_initializer)
# Run infeed session.run calls
while True:
iterations = self.queue.get(block=True)
if iterations == _STOP:
return
tf.logging.info("Start to infeed %d batches", iterations)
self.input_sess.run([self.enqueue_ops])
def tpu_train_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
features, labels = self.input_flattener.unflatten_features_and_labels(
values)
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
self.params)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
self.scaffold_fn = estimator_spec.scaffold_fn
with tf.control_dependencies([train_op]):
return tf.identity(loss)
@tpu_function.on_device_training_loop
def train_loop():
return tf.contrib.tpu.repeat(self.iterations_per_loop, tpu_train_step,
[_INITIAL_LOSS])
self.train_graph = tf.Graph()
with self.train_graph.as_default():
(self.train_compile_op, self.train_op) = tpu.split_compile_and_shard(
train_loop,
inputs=[],
num_shards=self.num_shards,
outputs_from_all_shards=False,
device_assignment=self.device_assignment
)
if self.scaffold_fn:
self.scaffold_fn()
global_initializer = tf.global_variables_initializer()
local_initializer = tf.local_variables_initializer()
graph_io.write_graph(
self.input_graph.as_graph_def(add_shapes=True), self.model_dir,
"input_graph.pbtxt")
graph_io.write_graph(
self.train_graph.as_graph_def(add_shapes=True), self.model_dir,
"graph.pbtxt")
self.saver = tf.train.Saver()
# Build tpu train model session and initialize graph
self.train_sess = tf.Session(
self.tpu_cluster_resolver.get_master(),
graph=self.train_graph,
config=self.session_config)
self.train_sess.run(global_initializer)
self.train_sess.run(local_initializer)
# Compiles the train program.
self.train_sess.run([self.train_compile_op])
# Complete infeed graph generation and session.run calls
self.infeed_thread = threading.Thread(target=infeed_thread_fn)
self.infeed_thread.start()
def write_summary(self, summary_writer, graph, loss, global_step,
elapsed_time, elapsed_steps, trained_examples):
"""Write a per-epoch summary of loss, epoch time, etc."""
with graph.as_default():
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = trained_examples / elapsed_time
if summary_writer is not None:
loss_summary = tf.Summary(
value=[tf.Summary.Value(tag="loss", simple_value=loss)])
global_step_summary = tf.Summary(value=[
tf.Summary.Value(
tag="global_step/sec", simple_value=global_step_per_sec)
])
example_summary = tf.Summary(value=[
tf.Summary.Value(
tag="examples/sec", simple_value=examples_per_sec)
])
summary_writer.add_summary(loss_summary, global_step)
summary_writer.add_summary(global_step_summary, global_step)
summary_writer.add_summary(example_summary, global_step)
tf.logging.info("loss = %g, step = %d (%.3f sec)", loss, global_step,
elapsed_time)
tf.logging.info("global_step/sec: %g", global_step_per_sec)
tf.logging.info("examples/sec: %g", examples_per_sec)
def train(self):
"""Run the Train loop on the TPU device."""
train_steps = self.params["total_steps"]
num_examples_per_epoch = self.params["num_examples_per_epoch"]
tf.logging.info("TrainLowLevelRunner: train for %d steps in total",
train_steps)
if train_steps % self.iterations_per_loop != 0:
tf.logging.warning(
"train_steps %d is not divisible by iterations_per_loop %d",
train_steps, self.iterations_per_loop)
train_steps = self.iterations_per_loop * int(
math.ceil(train_steps / self.iterations_per_loop))
ckpt_saver = runner_utils.AsyncCheckpointSaver(_MAX_NUM_CHECKPOINT_THREADS,
self.saver, self.model_dir,
self.train_sess)
cur_step = 0
while cur_step < train_steps:
start = time.time()
tf.logging.info("TrainLowLevelRunner: start train step:%d", cur_step)
self.queue.put(self.iterations_per_loop)
cur_step += self.iterations_per_loop
loss = self.train_sess.run(self.train_op)
end = time.time()
# checkpoint every epoch.
ckpt_saver.checkpoint(cur_step)
self.write_summary(
summary_writer=self.summary_writer,
graph=self.train_graph,
loss=loss[0],
global_step=cur_step,
elapsed_time=end - start,
elapsed_steps=self.iterations_per_loop,
trained_examples=num_examples_per_epoch)
|
apache-2.0
|
Cl3MM/metagoofil
|
hachoir_parser/container/ogg.py
|
95
|
11864
|
#
# Ogg parser
# Author Julien Muchembled <jm AT jm10.no-ip.com>
# Created: 10 june 2006
#
from hachoir_parser import Parser
from hachoir_core.field import (Field, FieldSet, createOrphanField,
NullBits, Bit, Bits, Enum, Fragment, MissingField, ParserError,
UInt8, UInt16, UInt24, UInt32, UInt64,
RawBytes, String, PascalString32, NullBytes)
from hachoir_core.stream import FragmentedStream, InputStreamError
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.tools import humanDurationNanosec
from hachoir_core.text_handler import textHandler, hexadecimal
MAX_FILESIZE = 1000 * 1024 * 1024
class XiphInt(Field):
"""
Positive integer with variable size. Values bigger than 254 are stored as
(255, 255, ..., rest): value is the sum of all bytes.
Example: 1000 is stored as (255, 255, 255, 235), total = 255*3+235 = 1000
"""
def __init__(self, parent, name, max_size=None, description=None):
Field.__init__(self, parent, name, size=0, description=description)
value = 0
addr = self.absolute_address
while max_size is None or self._size < max_size:
byte = parent.stream.readBits(addr, 8, LITTLE_ENDIAN)
value += byte
self._size += 8
if byte != 0xff:
break
addr += 8
self.createValue = lambda: value
class Lacing(FieldSet):
def createFields(self):
size = self.size
while size:
field = XiphInt(self, 'size[]', size)
yield field
size -= field.size
def parseVorbisComment(parent):
yield PascalString32(parent, 'vendor', charset="UTF-8")
yield UInt32(parent, 'count')
for index in xrange(parent["count"].value):
yield PascalString32(parent, 'metadata[]', charset="UTF-8")
if parent.current_size != parent.size:
yield UInt8(parent, "framing_flag")
PIXEL_FORMATS = {
0: "4:2:0",
2: "4:2:2",
3: "4:4:4",
}
def formatTimeUnit(field):
return humanDurationNanosec(field.value * 100)
def parseVideoHeader(parent):
yield NullBytes(parent, "padding[]", 2)
yield String(parent, "fourcc", 4)
yield UInt32(parent, "size")
yield textHandler(UInt64(parent, "time_unit", "Frame duration"), formatTimeUnit)
yield UInt64(parent, "sample_per_unit")
yield UInt32(parent, "default_len")
yield UInt32(parent, "buffer_size")
yield UInt16(parent, "bits_per_sample")
yield NullBytes(parent, "padding[]", 2)
yield UInt32(parent, "width")
yield UInt32(parent, "height")
yield NullBytes(parent, "padding[]", 4)
def parseTheoraHeader(parent):
yield UInt8(parent, "version_major")
yield UInt8(parent, "version_minor")
yield UInt8(parent, "version_revision")
yield UInt16(parent, "width", "Width*16 in pixel")
yield UInt16(parent, "height", "Height*16 in pixel")
yield UInt24(parent, "frame_width")
yield UInt24(parent, "frame_height")
yield UInt8(parent, "offset_x")
yield UInt8(parent, "offset_y")
yield UInt32(parent, "fps_num", "Frame per second numerator")
yield UInt32(parent, "fps_den", "Frame per second denominator")
yield UInt24(parent, "aspect_ratio_num", "Aspect ratio numerator")
yield UInt24(parent, "aspect_ratio_den", "Aspect ratio denominator")
yield UInt8(parent, "color_space")
yield UInt24(parent, "target_bitrate")
yield Bits(parent, "quality", 6)
yield Bits(parent, "gp_shift", 5)
yield Enum(Bits(parent, "pixel_format", 2), PIXEL_FORMATS)
yield Bits(parent, "spare_config", 3)
def parseVorbisHeader(parent):
yield UInt32(parent, "vorbis_version")
yield UInt8(parent, "audio_channels")
yield UInt32(parent, "audio_sample_rate")
yield UInt32(parent, "bitrate_maximum")
yield UInt32(parent, "bitrate_nominal")
yield UInt32(parent, "bitrate_minimum")
yield Bits(parent, "blocksize_0", 4)
yield Bits(parent, "blocksize_1", 4)
yield UInt8(parent, "framing_flag")
class Chunk(FieldSet):
tag_info = {
"vorbis": {
3: ("comment", parseVorbisComment),
1: ("vorbis_hdr", parseVorbisHeader),
}, "theora": {
128: ("theora_hdr", parseTheoraHeader),
129: ("comment", parseVorbisComment),
}, "video\0": {
1: ("video_hdr", parseVideoHeader),
},
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if 7*8 <= self.size:
try:
self._name, self.parser = self.tag_info[self["codec"].value][self["type"].value]
if self._name == "theora_hdr":
self.endian = BIG_ENDIAN
except KeyError:
self.parser = None
else:
self.parser = None
def createFields(self):
if 7*8 <= self.size:
yield UInt8(self, 'type')
yield String(self, 'codec', 6)
if self.parser:
for field in self.parser(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw", size)
class Packets:
def __init__(self, first):
self.first = first
def __iter__(self):
fragment = self.first
size = None
while fragment is not None:
page = fragment.parent
continued_packet = page["continued_packet"].value
for segment_size in page.segment_size:
if continued_packet:
size += segment_size
continued_packet = False
else:
if size:
yield size * 8
size = segment_size
fragment = fragment.next
if size:
yield size * 8
class Segments(Fragment):
def __init__(self, parent, *args, **kw):
Fragment.__init__(self, parent, *args, **kw)
if parent['last_page'].value:
next = None
else:
next = self.createNext
self.setLinks(parent.parent.streams.setdefault(parent['serial'].value, self), next)
def _createInputStream(self, **args):
if self.first is self:
return FragmentedStream(self, packets=Packets(self), tags=[("id","ogg_stream")], **args)
return Fragment._createInputStream(self, **args)
def _getData(self):
return self
def createNext(self):
parent = self.parent
index = parent.index
parent = parent.parent
first = self.first
try:
while True:
index += 1
next = parent[index][self.name]
if next.first is first:
return next
except MissingField:
pass
def createFields(self):
for segment_size in self.parent.segment_size:
if segment_size:
yield Chunk(self, "chunk[]", size=segment_size*8)
class OggPage(FieldSet):
MAGIC = "OggS"
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = 27
self.lacing_size = self['lacing_size'].value
if self.lacing_size:
size += self.lacing_size
lacing = self['lacing']
self.segment_size = [ field.value for field in lacing ]
size += sum(self.segment_size)
self._size = size * 8
def createFields(self):
yield String(self, 'capture_pattern', 4, charset="ASCII")
if self['capture_pattern'].value != self.MAGIC:
self.warning('Invalid signature. An Ogg page must start with "%s".' % self.MAGIC)
yield UInt8(self, 'stream_structure_version')
yield Bit(self, 'continued_packet')
yield Bit(self, 'first_page')
yield Bit(self, 'last_page')
yield NullBits(self, 'unused', 5)
yield UInt64(self, 'abs_granule_pos')
yield textHandler(UInt32(self, 'serial'), hexadecimal)
yield UInt32(self, 'page')
yield textHandler(UInt32(self, 'checksum'), hexadecimal)
yield UInt8(self, 'lacing_size')
if self.lacing_size:
yield Lacing(self, "lacing", size=self.lacing_size*8)
yield Segments(self, "segments", size=self._size-self._current_size)
def validate(self):
if self['capture_pattern'].value != self.MAGIC:
return "Wrong signature"
if self['stream_structure_version'].value != 0:
return "Unknown structure version (%s)" % self['stream_structure_version'].value
return ""
class OggFile(Parser):
PARSER_TAGS = {
"id": "ogg",
"category": "container",
"file_ext": ("ogg", "ogm"),
"mime": (
u"application/ogg", u"application/x-ogg",
u"audio/ogg", u"audio/x-ogg",
u"video/ogg", u"video/x-ogg",
u"video/theora", u"video/x-theora",
),
"magic": ((OggPage.MAGIC, 0),),
"subfile": "skip",
"min_size": 28*8,
"description": "Ogg multimedia container"
}
endian = LITTLE_ENDIAN
def validate(self):
magic = OggPage.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic string"
# Validate first 3 pages
for index in xrange(3):
try:
page = self[index]
except MissingField:
if self.done:
return True
return "Unable to get page #%u" % index
except (InputStreamError, ParserError):
return "Unable to create page #%u" % index
err = page.validate()
if err:
return "Invalid page #%s: %s" % (index, err)
return True
def createMimeType(self):
if "theora_hdr" in self["page[0]/segments"]:
return u"video/theora"
elif "vorbis_hdr" in self["page[0]/segments"]:
return u"audio/vorbis"
else:
return u"application/ogg"
def createDescription(self):
if "theora_hdr" in self["page[0]"]:
return u"Ogg/Theora video"
elif "vorbis_hdr" in self["page[0]"]:
return u"Ogg/Vorbis audio"
else:
return u"Ogg multimedia container"
def createFields(self):
self.streams = {}
while not self.eof:
yield OggPage(self, "page[]")
def createLastPage(self):
start = self[0].size
end = MAX_FILESIZE * 8
if True:
# FIXME: This doesn't work on all files (eg. some Ogg/Theora)
offset = self.stream.searchBytes("OggS\0\5", start, end)
if offset is None:
offset = self.stream.searchBytes("OggS\0\4", start, end)
if offset is None:
return None
return createOrphanField(self, offset, OggPage, "page")
else:
# Very slow version
page = None
while True:
offset = self.stream.searchBytes("OggS\0", start, end)
if offset is None:
break
page = createOrphanField(self, offset, OggPage, "page")
start += page.size
return page
def createContentSize(self):
page = self.createLastPage()
if page:
return page.absolute_address + page.size
else:
return None
class OggStream(Parser):
PARSER_TAGS = {
"id": "ogg_stream",
"category": "container",
"subfile": "skip",
"min_size": 7*8,
"description": "Ogg logical stream"
}
endian = LITTLE_ENDIAN
def validate(self):
return False
def createFields(self):
for size in self.stream.packets:
yield RawBytes(self, "packet[]", size//8)
|
gpl-2.0
|
Lujeni/ansible
|
lib/ansible/module_utils/network/ios/facts/vlans/vlans.py
|
12
|
5089
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The ios vlans fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.ios.argspec.vlans.vlans import VlansArgs
class VlansFacts(object):
""" The ios vlans fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = VlansArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for vlans
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if connection:
pass
objs = []
mtu_objs = []
remote_objs = []
final_objs = []
if not data:
data = connection.get('show vlan')
# operate on a collection of resource x
config = data.split('\n')
# Get individual vlan configs separately
vlan_info = ''
for conf in config:
if 'Name' in conf:
vlan_info = 'Name'
elif 'Type' in conf:
vlan_info = 'Type'
elif 'Remote' in conf:
vlan_info = 'Remote'
if conf and ' ' not in filter(None, conf.split('-')):
obj = self.render_config(self.generated_spec, conf, vlan_info)
if 'mtu' in obj:
mtu_objs.append(obj)
elif 'remote_span' in obj:
remote_objs = obj
elif obj:
objs.append(obj)
# Appending MTU value to the retrieved dictionary
for o, m in zip(objs, mtu_objs):
o.update(m)
final_objs.append(o)
# Appending Remote Span value to related VLAN:
if remote_objs:
if remote_objs.get('remote_span'):
for each in remote_objs.get('remote_span'):
for every in final_objs:
if each == every.get('vlan_id'):
every.update({'remote_span': True})
break
facts = {}
if final_objs:
facts['vlans'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
for cfg in params['config']:
facts['vlans'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf, vlan_info):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
if vlan_info == 'Name' and 'Name' not in conf:
conf = filter(None, conf.split(' '))
config['vlan_id'] = int(conf[0])
config['name'] = conf[1]
if len(conf[2].split('/')) > 1:
if conf[2].split('/')[0] == 'sus':
config['state'] = 'suspend'
elif conf[2].split('/')[0] == 'act':
config['state'] = 'active'
config['shutdown'] = 'enabled'
else:
if conf[2] == 'suspended':
config['state'] = 'suspend'
elif conf[2] == 'active':
config['state'] = 'active'
config['shutdown'] = 'disabled'
elif vlan_info == 'Type' and 'Type' not in conf:
conf = filter(None, conf.split(' '))
config['mtu'] = int(conf[3])
elif vlan_info == 'Remote':
if len(conf.split(',')) > 1 or conf.isdigit():
remote_span_vlan = []
if len(conf.split(',')) > 1:
remote_span_vlan = conf.split(',')
else:
remote_span_vlan.append(conf)
remote_span = []
for each in remote_span_vlan:
remote_span.append(int(each))
config['remote_span'] = remote_span
return utils.remove_empties(config)
|
gpl-3.0
|
aio-libs/frozenlist
|
docs/conf.py
|
1
|
10850
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# frozenlist documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 5 12:35:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import io
import os
import re
import sys
_docs_path = os.path.dirname(__file__)
_version_path = os.path.abspath(os.path.join(_docs_path,
'..', 'frozenlist', '__init__.py'))
with io.open(_version_path, 'r', encoding='latin1') as fp:
try:
_version_info = re.search(r"^__version__ = '"
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r"(?P<tag>.*)?'$",
fp.read(), re.M).groupdict()
except IndexError:
raise RuntimeError('Unable to determine version.')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'aiohttp':
('https://docs.aiohttp.org/en/stable/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'frozenlist'
copyright = '2013-2019, frozenlist contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{major}.{minor}'.format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = '{major}.{minor}.{patch}{tag}'.format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'aiohttp_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': None,
'description': 'A list-like structure which implements collections.abc.MutableSequence',
'canonical_url': 'http://frozenlist.readthedocs.org/en/stable/',
'github_user': 'aio-libs',
'github_repo': 'frozenlist',
'github_button': True,
'github_type': 'star',
'github_banner': True,
'badges': [{'image': 'https://github.com/aio-libs/frozenlist/workflows/CI/badge.svg',
'target': 'https://github.com/aio-libs/frozenlist/actions',
'height': '20',
'alt': 'Github CI status for master branch'},
{'image': 'https://codecov.io/github/aio-libs/frozenlist/coverage.svg?branch=master',
'target': 'https://codecov.io/github/aio-libs/frozenlist',
'height': '20',
'alt': 'Code coverage status'},
{'image': 'https://badge.fury.io/py/frozenlist.svg',
'target': 'https://badge.fury.io/py/frozenlist',
'height': '20',
'alt': 'Latest PyPI package version'},
{'image': 'https://img.shields.io/discourse/topics?server=https%3A%2F%2Faio-libs.discourse.group%2F',
'target': 'https://aio-libs.discourse.group/',
'height': '20',
'alt': 'Discourse group for io-libs'},
{'image': 'https://badges.gitter.im/Join%20Chat.svg',
'target': 'https://gitter.im/aio-libs/Lobby',
'height': '20',
'alt': 'Chat on Gitter'}],
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'frozenlist-icon.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'frozenlistdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'frozenlist.tex', 'frozenlist Documentation',
'frozenlist contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'frozenlist', 'frozenlist Documentation',
['frozenlist'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'frozenlist', 'frozenlist Documentation',
'frozenlist contributors', 'frozenlist', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
apache-2.0
|
mickaelsilva/pythonscripts
|
AlleleCalling/CompareSameLocus.py
|
1
|
5474
|
import HTSeq
import argparse
import os.path
from CommonFastaFunctions import Create_Blastdb
from CommonFastaFunctions import LoadAlelleFasta
from CommonFastaFunctions import LoadAlellicProfileGeneric
from CommonFastaFunctions import WriteFasta
from CommonFastaFunctions import runBlast
from CommonFastaFunctions import runBlastParser
from Bio.Blast.Applications import NcbiblastnCommandline
import shutil
def concat_genes(listgenes, outFileName):
fp = open(listgenes, 'r')
concatGenes=''
for genefile in fp:
genefile = genefile.rstrip('\n')
biggestallename=''
biggestallelestr=''
biggestallelelen=0
gene_fp = HTSeq.FastaReader(genefile)
for contig in gene_fp:
contigLen=int(len(contig.seq))
if contigLen>biggestallelelen:
#biggestallename=contig.name+" "+contig.descr
biggestallelestr = contig.seq
#concatGenes+=">"+str(os.path.basename(genefile))+" | "+biggestallename+"\n"+biggestallelestr+"\n"
concatGenes+=">"+genefile+"\n"+biggestallelestr+"\n"
with open(outFileName, "wb") as f:
f.write(concatGenes)
def alignHasGoodMatch(alignment,geneDict,LocusID,blast_record,alreadyUsed):
sameAllele=0
for match in alignment.hsps:
bmAlleleLen=0
bmAllele=''
for seq, alleleid in geneDict.iteritems():
if alleleid == alignment.hit_def:
bmAllele=seq
bmAlleleLen= len(bmAllele)
break
idratio=float(match.identities)/float(bmAlleleLen)
sizeratio=float(match.align_length)/float(bmAlleleLen)
if sizeratio>0.8 and sizeratio<1.2 and idratio>0.8 and alignment.hit_def not in alreadyUsed :
LocusID+=1
genename=alignment.hit_def.split("/")
genename=genename[len(genename)-1]
newpath='./sameLocus/L'+str(LocusID)+'_'+genename
shutil.copy2(alignment.hit_def, newpath)
#genename=blast_record.query.split("/")
#genename=genename[len(genename)-1]
#newpath='./sameLocus/L'+str(LocusID)+'_'+genename
#shutil.copy2(blast_record.query, newpath)
alreadyUsed.append(alignment.hit_def)
isGood=True
sameAllele+=1
return sameAllele,alignment.hit_def,LocusID,alreadyUsed
def main():
parser = argparse.ArgumentParser(description="Given two list of genes, creates a folder with paired files when located on the same locus")
parser.add_argument('-i', nargs='?', type=str, help='1st list of genes files to compare', required=True)
parser.add_argument('-g', nargs='?', type=str, help='2nd list of genes files to compare', required=True)
args = parser.parse_args()
geneFiles1 = args.i
geneFiles2 = args.g
name1="concat1.fasta"
name2="concat2.fasta"
concat_genes(geneFiles1, name1)
concat_genes(geneFiles2, name2)
#orderedAlleleNames=[]
geneDict={}
gene_fp = HTSeq.FastaReader(name1)
alleleI=0
for allele in gene_fp:
#if allele.seq in geneDict:
# print "\nWARNING: this file contains a repeated allele, it should be checked. Ignoring it now!\n", geneFile
#else:
#orderedAlleleNames.append(allele.name)
geneDict[ allele.seq ] = allele.name
alleleI += 1
gene_fp = HTSeq.FastaReader(name1)
geneFile = os.path.abspath( name1 )
Gene_Blast_DB_name = Create_Blastdb( geneFile, 1 , False)
geneF = os.path.splitext( geneFile )[0]
blast_out_file = geneF + '.xml'
# list of results - the output of the function
resultsList = []
# ------------------------------ RUNNING BLAST ------------------------------ #
cline = NcbiblastnCommandline(query=name2, db=Gene_Blast_DB_name, evalue=0.001, out=blast_out_file, outfmt=5)
blast_records = runBlastParser(cline, blast_out_file, name2)
samelocus=0
alreadyUsed=[]
nomatch=0
small=0
if not os.path.exists("./sameLocus"):
os.makedirs("./sameLocus")
LocusID=0
for blast_record in blast_records:
try:
alignment=blast_record.alignments[1]
#print blast_record.query
#print alignment.num_alignments
try:
#print alleleLength, alignment.length
i=0
align=blast_record.alignments[i]
while i<len(blast_record.alignments):
if align.hit_def:
result,allelename2,LocusID,alreadyUsed=alignHasGoodMatch(align,geneDict,LocusID,blast_record,alreadyUsed)
if result>0 and allelename2:
samelocus+=result
i+=999
else:
small+=1
i+=999
alreadyUsed.append(allelename2)
elif allelename :
#alreadyUsed.append(allelename)
result,allelename,LocusID,alreadyUsed=alignHasGoodMatch(align,geneDict,LocusID,blast_record,alreadyUsed)
if result>0:
samelocus+=result
i+=999
else:
small+=1
i+=999
#alreadyUsed.append(allelename2)
else :
nomatch+=1
#print align.length, alleleleng
i+=1
except Exception as e:
print e
#print "lkjh"
pass
except:
try:
alignment=blast_record.alignments[0]
#print blast_record.query
result,allelename,LocusID,alreadyUsed=alignHasGoodMatch(alignment,geneDict,LocusID,blast_record,alreadyUsed)
if result>0 and allelename:
samelocus+=result
else :
small+=1
#alreadyUsed.append(allelename)
#alreadyUsed.append(alignment.hit_def)
except:
nomatch+=1
print "%s are within same locus, %s had no match and %s had a bigger than 0.2 ratio size difference or less than 0.8 similarity ratio" % (samelocus,nomatch, small)
os.remove(name1)
os.remove(name2)
shutil.rmtree('./blastdbs')
if __name__ == "__main__":
main()
|
gpl-2.0
|
zrhans/python
|
exemplos/Examples.lnk/bokeh/plotting/server/elements.py
|
2
|
1506
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import pandas as pd
from bokeh.plotting import *
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_server("elements")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"], text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
|
gpl-2.0
|
pratikmallya/hue
|
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/drop_test_database.py
|
35
|
5691
|
import logging
from optparse import make_option
from django.conf import settings
from django.core.management.base import CommandError, BaseCommand
from django.db.backends.creation import TEST_DATABASE_PREFIX
from six.moves import input, configparser
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('-U', '--user', action='store',
dest='user', default=None,
help='Use another user for the database then defined in settings.py'),
make_option('-P', '--password', action='store',
dest='password', default=None,
help='Use another password for the database then defined in settings.py'),
make_option('-D', '--dbname', action='store',
dest='dbname', default=None,
help='Use another database name then defined in settings.py'),
make_option('-R', '--router', action='store',
dest='router', default='default',
help='Use this router-database other then defined in settings.py'),
)
help = "Drops test database for this project."
@signalcommand
def handle(self, *args, **options):
"""
Drop test database for this project.
"""
if args:
raise CommandError("reset_db takes no arguments")
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = ''
if engine == 'mysql':
read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file')
if read_default_file:
config = configparser.ConfigParser()
config.read(read_default_file)
user = config.get('client', 'user')
password = config.get('client', 'password')
database_name = config.get('client', 'database')
user = options.get('user') or dbinfo.get('USER') or user
password = options.get('password') or dbinfo.get('PASSWORD') or password
try:
database_name = dbinfo['TEST']['NAME']
except KeyError:
database_name = None
if database_name is None:
database_name = TEST_DATABASE_PREFIX + (options.get('dbname') or dbinfo.get('NAME'))
if database_name is None or database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST')
database_port = dbinfo.get('PORT')
verbosity = int(options.get('verbosity', 1))
if options.get('interactive'):
confirm = input("""
You have requested to drop the test database.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database" % engine)
if os.path.isfile(database_name):
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
logging.info('Executing: "' + drop_query + '"')
connection.query(drop_query)
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
if engine == 'postgresql':
import psycopg as Database # NOQA
elif engine in ('postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA
conn_string = "dbname=template1"
if user:
conn_string += " user=%s" % user
if password:
conn_string += " password='%s'" % password
if database_host:
conn_string += " host=%s" % database_host
if database_port:
conn_string += " port=%s" % database_port
connection = Database.connect(conn_string)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
drop_query = "DROP DATABASE IF EXISTS \"%s\";" % database_name
logging.info('Executing: "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options.get('interactive'):
print("Reset successful.")
|
apache-2.0
|
Spiderlover/Toontown
|
toontown/ai/DistributedAprilToonsMgrAI.py
|
2
|
1360
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.task import Task
from toontown.toonbase.AprilToonsGlobals import *
class DistributedAprilToonsMgrAI(DistributedObjectAI):
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
# Define the default events we want for this year
self.events = [EventRandomDialogue,
EventRandomEffects,
EventEstateGravity,
EventGlobalGravity]
def getEvents(self):
return self.events
def isEventActive(self, eventId):
if not self.air.config.GetBool('want-april-toons', False):
# If this DO is generated but we don't want april toons, always return
# false regardless.
return False
return eventId in self.events
def requestEventsList(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'requestEventsListResp', [self.getEvents()])
def toggleEvent(self, eventId):
if eventId in self.getEvents():
del self.getEvents()[eventId]
self.sendUpdate('setEventActive', [eventId, False])
else:
self.getEvents().append(eventId)
self.sendUpdate('setEventActive', [eventId, True])
|
mit
|
ehealthafrica-ci/kivy
|
kivy/uix/__init__.py
|
5
|
1780
|
'''
Widgets
=======
A widget is an element of a graphical user interface.
The `kivy.uix` module contains classes for creating and managing Widgets.
First read: :doc:`api-kivy.uix.widget`
- **UX widgets**: Classical user interface widgets, perfect and ready to be
assembled to create more complex widgets.
:doc:`api-kivy.uix.label`, :doc:`api-kivy.uix.button`,
:doc:`api-kivy.uix.checkbox`,
:doc:`api-kivy.uix.image`, :doc:`api-kivy.uix.slider`,
:doc:`api-kivy.uix.progressbar`, :doc:`api-kivy.uix.textinput`,
:doc:`api-kivy.uix.togglebutton`, :doc:`api-kivy.uix.switch`,
:doc:`api-kivy.uix.video`
- **Layouts**: A layout widget does no rendering but just acts as a trigger
that arranges its children in a specific way. Read more on
:doc:`api-kivy.uix.layout`.
:doc:`api-kivy.uix.gridlayout`, :doc:`api-kivy.uix.boxlayout`,
:doc:`api-kivy.uix.anchorlayout`, :doc:`api-kivy.uix.stacklayout`
- **Complex UX widgets**: Non-atomic widgets that are the result of
combining multiple classic widgets.
We call them complex because their assembly and usage are not as
generic as the classicals widgets.
:doc:`api-kivy.uix.bubble`, :doc:`api-kivy.uix.dropdown`,
:doc:`api-kivy.uix.filechooser`, :doc:`api-kivy.uix.popup`,
:doc:`api-kivy.uix.spinner`,
:doc:`api-kivy.uix.listview`,
:doc:`api-kivy.uix.tabbedpanel`, :doc:`api-kivy.uix.videoplayer`,
:doc:`api-kivy.uix.vkeyboard`,
- **Behaviors widgets**: Theses widgets do no rendering but act on the
graphics instructions or interaction (touch) behavior.
:doc:`api-kivy.uix.scatter`, :doc:`api-kivy.uix.stencilview`
- **Screen manager**: Manages screens and transitions when switching
from one to another.
:doc:`api-kivy.uix.screenmanager`
----
'''
|
mit
|
SlimRoms/android_external_chromium_org
|
tools/telemetry/telemetry/core/exceptions.py
|
17
|
1919
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class NativeBrowserCrashException(Exception):
def __init__(self, browser=None, msg=''):
super(NativeBrowserCrashException, self).__init__(msg)
self._browser = browser
self._msg = msg
def __str__(self):
if not self._browser:
return super(NativeBrowserCrashException, self).__str__()
divider = '*' * 80
return '%s\nStack Trace:\n%s\n\t%s\n%s' % (
super(NativeBrowserCrashException, self).__str__(), divider,
self._browser.GetStackTrace().replace('\n', '\n\t'), divider)
class TabCrashException(NativeBrowserCrashException):
"""Represents a crash of the current tab, but not the overall browser.
In this state, the tab is gone, but the underlying browser is still alive."""
def __init__(self, browser, msg='Tab crashed'):
super(TabCrashException, self).__init__(browser, msg)
class BrowserGoneException(NativeBrowserCrashException):
"""Represents a crash of the entire browser.
In this state, all bets are pretty much off."""
def __init__(self, browser, msg='Browser crashed'):
super(BrowserGoneException, self).__init__(browser, msg)
class BrowserConnectionGoneException(BrowserGoneException):
"""Represents a browser that still exists but cannot be reached."""
def __init__(self, browser, msg='Browser exists but the connection is gone'):
super(BrowserConnectionGoneException, self).__init__(browser, msg)
class ProcessGoneException(Exception):
"""Represents a process that no longer exists for an unknown reason."""
class IntentionalException(Exception):
"""Represent an exception raised by a unittest which is not printed."""
class LoginException(Exception):
pass
class EvaluateException(Exception):
pass
class ProfilingException(Exception):
pass
|
bsd-3-clause
|
juanyaw/python
|
cpython/Lib/test/test_shelve.py
|
91
|
6270
|
import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
def test_with(self):
d1 = {}
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
|
bsd-3-clause
|
crisely09/horton
|
horton/grid/base.py
|
1
|
6180
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Base classes for 3D integration grids'''
import numpy as np
from horton.log import timer
from horton.grid.utils import parse_args_integrate
from horton.grid.cext import dot_multi, eval_spline_grid, \
dot_multi_moments, eval_decomposition_grid
from horton.cext import Cell
__all__ = ['IntGrid']
class IntGrid(object):
'''Base class for real-space integration grids in HORTON'''
def __init__(self, points, weights, subgrids=None):
'''
**Arguments:**
points
A numpy array with shape (npoint,3) with the Cartesian
coordinates of the grids points.
weights
The integration weights of the grid points
**Optional arguments:**
subgrids
Can be given when this grid is composed of several other
grids. The points data is shared, but the weights of the
subgrids may be different.
'''
if subgrids is not None and len(subgrids) == 0:
raise TypeError('When subgrids are given, it may not be an empty list.')
self._points = points
self._weights = weights
self._subgrids = subgrids
# assign begin and end attributes to the subgrids
if subgrids is not None:
offset = 0
for sg in subgrids:
sg.begin = offset
offset += sg.size
sg.end = offset
def _get_size(self):
'''The size of the grid.'''
return self._weights.size
size = property(_get_size)
def _get_shape(self):
'''The shape of the grid.'''
return self._weights.shape
shape = property(_get_shape)
def _get_points(self):
'''The grid points.'''
return self._points
points = property(_get_points)
def _get_weights(self):
'''The grid weights.'''
return self._weights
weights = property(_get_weights)
def _get_subgrids(self):
'''A list of grid objects used to construct this grid.'''
return self._subgrids
subgrids = property(_get_subgrids)
def zeros(self):
return np.zeros(self.shape)
def integrate(self, *args, **kwargs):
'''Integrate the product of all arguments
**Arguments:**
data1, data2, ...
All arguments must be arrays with the same size as the number
of grid points. The arrays contain the functions, evaluated
at the grid points, that must be multiplied and integrated.
**Optional arguments:**
center=None
When given, multipole moments are computed with respect to
this center instead of a plain integral.
lmax=0
The maximum angular momentum to consider when computing multipole
moments
mtype=1
The type of multipole moments: 1=``cartesian``, 2=``pure``,
3=``radial``, 4=``surface``.
segments=None
This argument can be used to divide the grid in segments. When
given, it must be an array with the number of grid points in
each consecutive segment. The integration is then carried out
over each segment separately and an array of results is
returned. The sum over all elements gives back the total
integral.
'''
args, multipole_args, segments = parse_args_integrate(*args, **kwargs)
args.append(self.weights)
if multipole_args is None:
# regular integration
return dot_multi(*args, segments=segments)
else:
# computation of multipole expansion of the integrand
center, lmax, mtype = multipole_args
return dot_multi_moments(args, self.points, center, lmax, mtype, segments)
@timer.with_section('Eval spher')
def eval_spline(self, cubic_spline, center, output, cell=None):
'''Evaluate a spherically symmetric function
**Arguments:**
cubic_spline
A cubic spline with the radial dependence
center
The center of the spherically symmetric function
output
The output array
**Optional arguments:**
cell
A unit cell when periodic boundary conditions are used.
'''
if cell is None:
cell = Cell(None)
eval_spline_grid(cubic_spline, center, output, self.points, cell)
@timer.with_section('Eval decomp')
def eval_decomposition(self, cubic_splines, center, output, cell=None):
'''Evaluate a spherical decomposition
**Arguments:**
cubic_splines
A list cubic splines, where each item is a radial function
that is associated with a corresponding real spherical harmonic.
center
The center of the spherically symmetric function
output
The output array
**Optional arguments:**
cell
A unit cell when periodic boundary conditions are used.
'''
if cell is None:
cell = Cell(None)
eval_decomposition_grid(cubic_splines, center, output, self.points, cell)
|
gpl-3.0
|
qizenguf/MLC-STT
|
src/arch/x86/isa/insts/general_purpose/data_transfer/move.py
|
40
|
9122
|
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
#
# Regular moves
#
def macroop MOV_R_MI {
limm t1, imm, dataSize=asz
ld reg, seg, [1, t0, t1]
};
def macroop MOV_MI_R {
limm t1, imm, dataSize=asz
st reg, seg, [1, t0, t1]
};
def macroop MOV_R_R {
mov reg, reg, regm
};
def macroop MOV_M_R {
st reg, seg, sib, disp
};
def macroop MOV_P_R {
rdip t7
st reg, seg, riprel, disp
};
def macroop MOV_R_M {
ld reg, seg, sib, disp
};
def macroop MOV_R_P {
rdip t7
ld reg, seg, riprel, disp
};
def macroop MOV_R_I {
limm reg, imm
};
def macroop MOV_M_I {
limm t1, imm
st t1, seg, sib, disp
};
def macroop MOV_P_I {
rdip t7
limm t1, imm
st t1, seg, riprel, disp
};
#
# Sign extending moves
#
def macroop MOVSXD_R_R {
sexti reg, regm, 31
};
def macroop MOVSXD_R_M {
ld t1, seg, sib, disp, dataSize=4
sexti reg, t1, 31
};
def macroop MOVSXD_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=4
sexti reg, t1, 31
};
def macroop MOVSX_B_R_R {
mov t1, t1, regm, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_B_R_M {
ld t1, seg, sib, disp, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_B_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_W_R_R {
sexti reg, regm, 15
};
def macroop MOVSX_W_R_M {
ld reg, seg, sib, disp, dataSize=2
sexti reg, reg, 15
};
def macroop MOVSX_W_R_P {
rdip t7
ld reg, seg, riprel, disp, dataSize=2
sexti reg, reg, 15
};
#
# Zero extending moves
#
def macroop MOVZX_B_R_R {
mov t1, t1, regm, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_B_R_M {
ld t1, seg, sib, disp, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_B_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_W_R_R {
zexti reg, regm, 15
};
def macroop MOVZX_W_R_M {
ld t1, seg, sib, disp, dataSize=2
zexti reg, t1, 15
};
def macroop MOVZX_W_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
zexti reg, t1, 15
};
def macroop MOV_C_R {
.serializing
.adjust_env maxOsz
wrcr reg, regm
};
def macroop MOV_R_C {
.serializing
.adjust_env maxOsz
rdcr reg, regm
};
def macroop MOV_D_R {
.serializing
.adjust_env maxOsz
wrdr reg, regm
};
def macroop MOV_R_D {
.adjust_env maxOsz
rddr reg, regm
};
def macroop MOV_R_S {
rdsel reg, regm
};
def macroop MOV_M_S {
rdsel t1, reg
st t1, seg, sib, disp, dataSize=2
};
def macroop MOV_P_S {
rdip t7
rdsel t1, reg
st t1, seg, riprel, disp, dataSize=2
};
def macroop MOV_REAL_S_R {
zexti t2, regm, 15, dataSize=8
slli t3, t2, 4, dataSize=8
wrsel reg, regm
wrbase reg, t3, dataSize=8
};
def macroop MOV_REAL_S_M {
ld t1, seg, sib, disp, dataSize=2
zexti t2, t1, 15, dataSize=8
slli t3, t2, 4, dataSize=8
wrsel reg, t1
wrbase reg, t3, dataSize=8
};
def macroop MOV_REAL_S_P {
panic "RIP relative addressing shouldn't happen in real mode"
};
def macroop MOV_S_R {
andi t0, regm, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, regm, 0xF8, dataSize=8
andi t0, regm, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks regm, t3, dataSize=8
wrdl reg, t3, regm
wrsel reg, regm
};
def macroop MOV_S_M {
ld t1, seg, sib, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOV_S_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVSS_S_R {
andi t0, regm, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, regm, 0xF8, dataSize=8
andi t0, regm, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks regm, t3, SSCheck, dataSize=8
wrdl reg, t3, regm
wrsel reg, regm
};
def macroop MOVSS_S_M {
ld t1, seg, sib, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, SSCheck, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVSS_S_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, SSCheck, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVNTI_M_R {
st reg, seg, sib, disp
};
def macroop MOVNTI_P_R {
rdip t7
st reg, seg, riprel, disp
};
def macroop MOVD_XMM_R {
mov2fp xmml, regm, srcSize=dsz, destSize=8
lfpimm xmmh, 0
};
def macroop MOVD_XMM_M {
ldfp xmml, seg, sib, disp, dataSize=dsz
lfpimm xmmh, 0
};
def macroop MOVD_XMM_P {
rdip t7
ldfp xmml, seg, riprel, disp, dataSize=dsz
lfpimm xmmh, 0
};
def macroop MOVD_R_XMM {
mov2int reg, xmmlm, size=dsz
};
def macroop MOVD_M_XMM {
stfp xmml, seg, sib, disp, dataSize=dsz
};
def macroop MOVD_P_XMM {
rdip t7
stfp xmml, seg, riprel, disp, dataSize=dsz
};
'''
#let {{
# class MOVD(Inst):
# "GenFault ${new UnimpInstFault}"
#}};
|
bsd-3-clause
|
TathagataChakraborti/resource-conflicts
|
PLANROB-2015/seq-sat-lama/Python-2.5.2/Demo/tkinter/guido/AttrDialog.py
|
47
|
14340
|
# The options of a widget are described by the following attributes
# of the Pack and Widget dialogs:
#
# Dialog.current: {name: value}
# -- changes during Widget's lifetime
#
# Dialog.options: {name: (default, klass)}
# -- depends on widget class only
#
# Dialog.classes: {klass: (v0, v1, v2, ...) | 'boolean' | 'other'}
# -- totally static, though different between PackDialog and WidgetDialog
# (but even that could be unified)
from Tkinter import *
class Option:
varclass = StringVar # May be overridden
def __init__(self, dialog, option):
self.dialog = dialog
self.option = option
self.master = dialog.top
self.default, self.klass = dialog.options[option]
self.var = self.varclass(self.master)
self.frame = Frame(self.master)
self.frame.pack(fill=X)
self.label = Label(self.frame, text=(option + ":"))
self.label.pack(side=LEFT)
self.update()
self.addoption()
def refresh(self):
self.dialog.refresh()
self.update()
def update(self):
try:
self.current = self.dialog.current[self.option]
except KeyError:
self.current = self.default
self.var.set(self.current)
def set(self, e=None): # Should be overridden
pass
class BooleanOption(Option):
varclass = BooleanVar
def addoption(self):
self.button = Checkbutton(self.frame,
text='on/off',
onvalue=1,
offvalue=0,
variable=self.var,
relief=RAISED,
borderwidth=2,
command=self.set)
self.button.pack(side=RIGHT)
class EnumOption(Option):
def addoption(self):
self.button = Menubutton(self.frame,
textvariable=self.var,
relief=RAISED, borderwidth=2)
self.button.pack(side=RIGHT)
self.menu = Menu(self.button)
self.button['menu'] = self.menu
for v in self.dialog.classes[self.klass]:
self.menu.add_radiobutton(
label=v,
variable=self.var,
value=v,
command=self.set)
class StringOption(Option):
def addoption(self):
self.entry = Entry(self.frame,
textvariable=self.var,
width=10,
relief=SUNKEN,
borderwidth=2)
self.entry.pack(side=RIGHT, fill=X, expand=1)
self.entry.bind('<Return>', self.set)
class ReadonlyOption(Option):
def addoption(self):
self.label = Label(self.frame, textvariable=self.var,
anchor=E)
self.label.pack(side=RIGHT)
class Dialog:
def __init__(self, master):
self.master = master
self.fixclasses()
self.refresh()
self.top = Toplevel(self.master)
self.top.title(self.__class__.__name__)
self.top.minsize(1, 1)
self.addchoices()
def refresh(self): pass # Must override
def fixclasses(self): pass # May override
def addchoices(self):
self.choices = {}
list = []
for k, dc in self.options.items():
list.append((k, dc))
list.sort()
for k, (d, c) in list:
try:
cl = self.classes[c]
except KeyError:
cl = 'unknown'
if type(cl) == TupleType:
cl = self.enumoption
elif cl == 'boolean':
cl = self.booleanoption
elif cl == 'readonly':
cl = self.readonlyoption
else:
cl = self.stringoption
self.choices[k] = cl(self, k)
# Must override:
options = {}
classes = {}
# May override:
booleanoption = BooleanOption
stringoption = StringOption
enumoption = EnumOption
readonlyoption = ReadonlyOption
class PackDialog(Dialog):
def __init__(self, widget):
self.widget = widget
Dialog.__init__(self, widget)
def refresh(self):
self.current = self.widget.info()
self.current['.class'] = self.widget.winfo_class()
self.current['.name'] = self.widget._w
class packoption: # Mix-in class
def set(self, e=None):
self.current = self.var.get()
try:
apply(self.dialog.widget.pack, (),
{self.option: self.current})
except TclError, msg:
print msg
self.refresh()
class booleanoption(packoption, BooleanOption): pass
class enumoption(packoption, EnumOption): pass
class stringoption(packoption, StringOption): pass
class readonlyoption(packoption, ReadonlyOption): pass
options = {
'.class': (None, 'Class'),
'.name': (None, 'Name'),
'after': (None, 'Widget'),
'anchor': ('center', 'Anchor'),
'before': (None, 'Widget'),
'expand': ('no', 'Boolean'),
'fill': ('none', 'Fill'),
'in': (None, 'Widget'),
'ipadx': (0, 'Pad'),
'ipady': (0, 'Pad'),
'padx': (0, 'Pad'),
'pady': (0, 'Pad'),
'side': ('top', 'Side'),
}
classes = {
'Anchor': (N, NE, E, SE, S, SW, W, NW, CENTER),
'Boolean': 'boolean',
'Class': 'readonly',
'Expand': 'boolean',
'Fill': (NONE, X, Y, BOTH),
'Name': 'readonly',
'Pad': 'pixel',
'Side': (TOP, RIGHT, BOTTOM, LEFT),
'Widget': 'readonly',
}
class RemotePackDialog(PackDialog):
def __init__(self, master, app, widget):
self.master = master
self.app = app
self.widget = widget
self.refresh()
self.top = Toplevel(self.master)
self.top.title(self.app + ' PackDialog')
self.top.minsize(1, 1)
self.addchoices()
def refresh(self):
try:
words = self.master.tk.splitlist(
self.master.send(self.app,
'pack',
'info',
self.widget))
except TclError, msg:
print msg
return
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
dict[key] = value
dict['.class'] = self.master.send(self.app,
'winfo',
'class',
self.widget)
dict['.name'] = self.widget
self.current = dict
class remotepackoption: # Mix-in class
def set(self, e=None):
self.current = self.var.get()
try:
self.dialog.master.send(
self.dialog.app,
'pack',
'config',
self.dialog.widget,
'-'+self.option,
self.dialog.master.tk.merge(
self.current))
except TclError, msg:
print msg
self.refresh()
class booleanoption(remotepackoption, BooleanOption): pass
class enumoption(remotepackoption, EnumOption): pass
class stringoption(remotepackoption, StringOption): pass
class readonlyoption(remotepackoption, ReadonlyOption): pass
class WidgetDialog(Dialog):
def __init__(self, widget):
self.widget = widget
self.klass = widget.winfo_class()
Dialog.__init__(self, widget)
def fixclasses(self):
if self.addclasses.has_key(self.klass):
classes = {}
for c in (self.classes,
self.addclasses[self.klass]):
for k in c.keys():
classes[k] = c[k]
self.classes = classes
def refresh(self):
self.configuration = self.widget.config()
self.update()
self.current['.class'] = self.widget.winfo_class()
self.current['.name'] = self.widget._w
def update(self):
self.current = {}
self.options = {}
for k, v in self.configuration.items():
if len(v) > 4:
self.current[k] = v[4]
self.options[k] = v[3], v[2] # default, klass
self.options['.class'] = (None, 'Class')
self.options['.name'] = (None, 'Name')
class widgetoption: # Mix-in class
def set(self, e=None):
self.current = self.var.get()
try:
self.dialog.widget[self.option] = self.current
except TclError, msg:
print msg
self.refresh()
class booleanoption(widgetoption, BooleanOption): pass
class enumoption(widgetoption, EnumOption): pass
class stringoption(widgetoption, StringOption): pass
class readonlyoption(widgetoption, ReadonlyOption): pass
# Universal classes
classes = {
'Anchor': (N, NE, E, SE, S, SW, W, NW, CENTER),
'Aspect': 'integer',
'Background': 'color',
'Bitmap': 'bitmap',
'BorderWidth': 'pixel',
'Class': 'readonly',
'CloseEnough': 'double',
'Command': 'command',
'Confine': 'boolean',
'Cursor': 'cursor',
'CursorWidth': 'pixel',
'DisabledForeground': 'color',
'ExportSelection': 'boolean',
'Font': 'font',
'Foreground': 'color',
'From': 'integer',
'Geometry': 'geometry',
'Height': 'pixel',
'InsertWidth': 'time',
'Justify': (LEFT, CENTER, RIGHT),
'Label': 'string',
'Length': 'pixel',
'MenuName': 'widget',
'Name': 'readonly',
'OffTime': 'time',
'OnTime': 'time',
'Orient': (HORIZONTAL, VERTICAL),
'Pad': 'pixel',
'Relief': (RAISED, SUNKEN, FLAT, RIDGE, GROOVE),
'RepeatDelay': 'time',
'RepeatInterval': 'time',
'ScrollCommand': 'command',
'ScrollIncrement': 'pixel',
'ScrollRegion': 'rectangle',
'ShowValue': 'boolean',
'SetGrid': 'boolean',
'Sliderforeground': 'color',
'SliderLength': 'pixel',
'Text': 'string',
'TickInterval': 'integer',
'To': 'integer',
'Underline': 'index',
'Variable': 'variable',
'Value': 'string',
'Width': 'pixel',
'Wrap': (NONE, CHAR, WORD),
}
# Classes that (may) differ per widget type
_tristate = {'State': (NORMAL, ACTIVE, DISABLED)}
_bistate = {'State': (NORMAL, DISABLED)}
addclasses = {
'Button': _tristate,
'Radiobutton': _tristate,
'Checkbutton': _tristate,
'Entry': _bistate,
'Text': _bistate,
'Menubutton': _tristate,
'Slider': _bistate,
}
class RemoteWidgetDialog(WidgetDialog):
def __init__(self, master, app, widget):
self.app = app
self.widget = widget
self.klass = master.send(self.app,
'winfo',
'class',
self.widget)
Dialog.__init__(self, master)
def refresh(self):
try:
items = self.master.tk.splitlist(
self.master.send(self.app,
self.widget,
'config'))
except TclError, msg:
print msg
return
dict = {}
for item in items:
words = self.master.tk.splitlist(item)
key = words[0][1:]
value = (key,) + words[1:]
dict[key] = value
self.configuration = dict
self.update()
self.current['.class'] = self.klass
self.current['.name'] = self.widget
class remotewidgetoption: # Mix-in class
def set(self, e=None):
self.current = self.var.get()
try:
self.dialog.master.send(
self.dialog.app,
self.dialog.widget,
'config',
'-'+self.option,
self.current)
except TclError, msg:
print msg
self.refresh()
class booleanoption(remotewidgetoption, BooleanOption): pass
class enumoption(remotewidgetoption, EnumOption): pass
class stringoption(remotewidgetoption, StringOption): pass
class readonlyoption(remotewidgetoption, ReadonlyOption): pass
def test():
import sys
root = Tk()
root.minsize(1, 1)
if sys.argv[1:]:
remotetest(root, sys.argv[1])
else:
frame = Frame(root, name='frame')
frame.pack(expand=1, fill=BOTH)
button = Button(frame, name='button', text='button')
button.pack(expand=1)
canvas = Canvas(frame, name='canvas')
canvas.pack()
fpd = PackDialog(frame)
fwd = WidgetDialog(frame)
bpd = PackDialog(button)
bwd = WidgetDialog(button)
cpd = PackDialog(canvas)
cwd = WidgetDialog(canvas)
root.mainloop()
def remotetest(root, app):
from listtree import listtree
list = listtree(root, app)
list.bind('<Any-Double-1>', opendialogs)
list.app = app # Pass it on to handler
def opendialogs(e):
import string
list = e.widget
sel = list.curselection()
for i in sel:
item = list.get(i)
widget = string.split(item)[0]
RemoteWidgetDialog(list, list.app, widget)
if widget == '.': continue
try:
RemotePackDialog(list, list.app, widget)
except TclError, msg:
print msg
test()
|
mit
|
carlmw/oscar-wager
|
django/templatetags/l10n.py
|
247
|
1845
|
from django.conf import settings
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import formats
from django.utils.encoding import force_unicode
register = Library()
def localize(value):
"""
Forces a value to be rendered as a localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(formats.localize(value, use_l10n=True))
localize.is_safe = False
def unlocalize(value):
"""
Forces a value to be rendered as a non-localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(value)
unlocalize.is_safe = False
class LocalizeNode(Node):
def __init__(self, nodelist, use_l10n):
self.nodelist = nodelist
self.use_l10n = use_l10n
def __repr__(self):
return "<LocalizeNode>"
def render(self, context):
old_setting = context.use_l10n
context.use_l10n = self.use_l10n
output = self.nodelist.render(context)
context.use_l10n = old_setting
return output
@register.tag('localize')
def localize_tag(parser, token):
"""
Forces or prevents localization of values, regardless of the value of
`settings.USE_L10N`.
Sample usage::
{% localize off %}
var pi = {{ 3.1415 }};
{% endlocalize %}
"""
use_l10n = None
bits = list(token.split_contents())
if len(bits) == 1:
use_l10n = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_l10n = bits[1] == 'on'
nodelist = parser.parse(('endlocalize',))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
register.filter(localize)
register.filter(unlocalize)
|
bsd-3-clause
|
fredericlepied/ansible
|
lib/ansible/module_utils/connection.py
|
17
|
5354
|
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import signal
import socket
import struct
import os
import uuid
from functools import partial
from ansible.module_utils.basic import get_exception
from ansible.module_utils._text import to_bytes, to_native, to_text
def send_data(s, data):
packed_len = struct.pack('!Q', len(data))
return s.sendall(packed_len + data)
def recv_data(s):
header_len = 8 # size of a packed unsigned long long
data = to_bytes("")
while len(data) < header_len:
d = s.recv(header_len - len(data))
if not d:
return None
data += d
data_len = struct.unpack('!Q', data[:header_len])[0]
data = data[header_len:]
while len(data) < data_len:
d = s.recv(data_len - len(data))
if not d:
return None
data += d
return data
def exec_command(module, command):
try:
sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sf.connect(module._socket_path)
data = "EXEC: %s" % command
send_data(sf, to_bytes(data.strip()))
rc = int(recv_data(sf), 10)
stdout = recv_data(sf)
stderr = recv_data(sf)
except socket.error:
exc = get_exception()
sf.close()
module.fail_json(msg='unable to connect to socket', err=str(exc))
sf.close()
return rc, to_native(stdout, errors='surrogate_or_strict'), to_native(stderr, errors='surrogate_or_strict')
def request_builder(method, *args, **kwargs):
reqid = str(uuid.uuid4())
req = {'jsonrpc': '2.0', 'method': method, 'id': reqid}
params = list(args) or kwargs or None
if params:
req['params'] = params
return req
class Connection:
def __init__(self, module):
self._module = module
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return partial(self.__rpc__, name)
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
req = request_builder(name, *args, **kwargs)
reqid = req['id']
if not self._module._socket_path:
self._module.fail_json(msg='provider support not available for this host')
if not os.path.exists(self._module._socket_path):
self._module.fail_json(msg='provider socket does not exist, is the provider running?')
try:
data = self._module.jsonify(req)
rc, out, err = exec_command(self._module, data)
except socket.error:
exc = get_exception()
self._module.fail_json(msg='unable to connect to socket', err=str(exc))
try:
response = self._module.from_json(to_text(out, errors='surrogate_then_replace'))
except ValueError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
if response['id'] != reqid:
self._module.fail_json(msg='invalid id received')
if 'error' in response:
msg = response['error'].get('data') or response['error']['message']
self._module.fail_json(msg=to_text(msg, errors='surrogate_then_replace'))
return response['result']
|
gpl-3.0
|
TianpeiLuke/GPy
|
GPy/likelihoods/student_t.py
|
4
|
12651
|
# Copyright (c) 2012-2014 Ricardo Andrade, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
import scipy as sp
import link_functions
from scipy import stats, integrate
from scipy.special import gammaln, gamma
from likelihood import Likelihood
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
class StudentT(Likelihood):
"""
Student T likelihood
For nomanclature see Bayesian Data Analysis 2003 p576
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - f_{i})^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
"""
def __init__(self,gp_link=None, deg_free=5, sigma2=2):
if gp_link is None:
gp_link = link_functions.Identity()
super(StudentT, self).__init__(gp_link, name='Student_T')
# sigma2 is not a noise parameter, it is a squared scale.
self.sigma2 = Param('t_scale2', float(sigma2), Logexp())
self.v = Param('deg_free', float(deg_free))
self.link_parameter(self.sigma2)
self.link_parameter(self.v)
self.v.constrain_fixed()
self.log_concave = False
def parameters_changed(self):
self.variance = (self.v / float(self.v - 2)) * self.sigma2
def update_gradients(self, grads):
"""
Pull out the gradients, be careful as the order must match the order
in which the parameters are added
"""
self.sigma2.gradient = grads[0]
self.v.gradient = grads[1]
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \\lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
#Careful gamma(big_number) is infinity!
objective = ((np.exp(gammaln((self.v + 1)*0.5) - gammaln(self.v * 0.5))
/ (np.sqrt(self.v * np.pi * self.sigma2)))
* ((1 + (1./float(self.v))*((e**2)/float(self.sigma2)))**(-0.5*(self.v + 1)))
)
return np.prod(objective)
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)
:param inv_link_f: latent variables (link(f))
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
#FIXME:
#Why does np.log(1 + (1/self.v)*((y-inv_link_f)**2)/self.sigma2) suppress the divide by zero?!
#But np.log(1 + (1/float(self.v))*((y-inv_link_f)**2)/self.sigma2) throws it correctly
#print - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))
objective = (+ gammaln((self.v + 1) * 0.5)
- gammaln(self.v * 0.5)
- 0.5*np.log(self.sigma2 * self.v * np.pi)
- 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))
)
return np.sum(objective)
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the log likelihood function at y, given link(f) w.r.t link(f)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\lambda(f)} = \\frac{(v+1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v}
:param inv_link_f: latent variables (f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: gradient of likelihood evaluated at points
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
grad = ((self.v + 1) * e) / (self.v * self.sigma2 + (e**2))
return grad
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = \\frac{(v+1)((y_{i}-\lambda(f_{i}))^{2} - \\sigma^{2}v)}{((y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v)^{2}}
:param inv_link_f: latent variables inv_link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
hess = ((self.v + 1)*(e**2 - self.v*self.sigma2)) / ((self.sigma2*self.v + e**2)**2)
return hess
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: third derivative of likelihood evaluated at points f
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) /
((e**2 + self.sigma2*self.v)**3)
)
return d3lik_dlink3
def dlogpdf_link_dvar(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the log-likelihood function at y given f, w.r.t variance parameter (t_noise)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\sigma^{2}} = \\frac{v((y_{i} - \lambda(f_{i}))^{2} - \\sigma^{2})}{2\\sigma^{2}(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: float
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2))
return np.sum(dlogpdf_dvar)
def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar
def d2logpdf_dlink2_dvar(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the hessian (d2logpdf_dlink2) w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}f}) = \\frac{v(v+1)(\\sigma^{2}v - 3(y_{i} - \lambda(f_{i}))^{2})}{(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})^{3}}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of hessian evaluated at points f and f_j w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
d2logpdf_dlink2_dvar = ( (self.v*(self.v+1)*(self.sigma2*self.v - 3*(e**2)))
/ ((self.sigma2*self.v + (e**2))**3)
)
return d2logpdf_dlink2_dvar
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dvar, dlogpdf_dv))
def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dlink_dvar, dlogpdf_dlink_dv))
def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None):
d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata)
d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet
return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv))
def predictive_mean(self, mu, sigma, Y_metadata=None):
# The comment here confuses mean and median.
return self.gp_link.transf(mu) # only true if link is monotonic, which it is.
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
if self.deg_free<=2.:
return np.empty(mu.shape)*np.nan # does not exist for degrees of freedom <= 2.
else:
return super(StudentT, self).predictive_variance(mu, variance, predictive_mean, Y_metadata)
def conditional_mean(self, gp):
return self.gp_link.transf(gp)
def conditional_variance(self, gp):
return self.deg_free/(self.deg_free - 2.)
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
#FIXME: Very slow as we are computing a new random variable per input!
#Can't get it to sample all at the same time
#student_t_samples = np.array([stats.t.rvs(self.v, self.gp_link.transf(gpj),scale=np.sqrt(self.sigma2), size=1) for gpj in gp])
dfs = np.ones_like(gp)*self.v
scales = np.ones_like(gp)*np.sqrt(self.sigma2)
student_t_samples = stats.t.rvs(dfs, loc=self.gp_link.transf(gp),
scale=scales)
return student_t_samples.reshape(orig_shape)
|
bsd-3-clause
|
rmariotti/py_cli_rpg
|
entity.py
|
1
|
1482
|
#!/usr/bin/env python3
class Entity:
def __init__(self, name = "nome", hp = 10, velocity = 10,
strength = 10, intelligence = 10, hand = None, items = [], lvl = 1 ):
from random import random
self.name = name
self.lvl = lvl
self.hp = hp
self.velocity = velocity + int(random() * 10)
self.strength = strength + int(random() * 10)
self.intelligence = intelligence + int(random() * 10)
self.hand = hand
self.items = items
class Player(Entity):
def __init__(self):
Entity.__init__(self)
self.name = str(input("What is your name?\n"))
a = 1
while a == 1:
sure = input("Your name is: %s, are you sure?\n" % self.name)
while sure not in ["no", "yes"]:
sure = input("Your name is: %s, are you sure?\n" % self.name)
if sure == "no":
self.name = str(input("What is your name?\n"))
else: a = 2
classi = ("chemist", "developer", "engineer")
alleanze = ("empire", "rebels")
self.classe = str(input("Select your class (chemist|developer|engineer)\n"))
while self.classe not in classi:
self.classe = str(input("Choose your class (chemist|developer|engineer)\n"))
self.alleanza = str(input("Choose your faction (empire|rebels)\n"))
while self.alleanza not in alleanze:
self.alleanza = str(input("Choose your faction(empire|rebels)\n"))
|
gpl-3.0
|
di0spyr0s/pants
|
src/python/pants/backend/jvm/tasks/jvm_compile/scala/zinc_analysis_parser.py
|
7
|
2169
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from zincutils.zinc_analysis_parser import ZincAnalysisParser as UnderlyingParser
from pants.backend.jvm.tasks.jvm_compile.analysis_parser import (AnalysisParser, ParseError,
raise_on_eof)
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis import ZincAnalysis
class ZincAnalysisParser(AnalysisParser):
"""Parses a zinc analysis file.
Implemented by delegating to an underlying zincutils.ZincAnalysisParser instance.
"""
# Implement AnalysisParser properties.
empty_test_header = b'products'
current_test_header = ZincAnalysis.FORMAT_VERSION_LINE
def __init__(self):
self._underlying_parser = UnderlyingParser()
# Implement AnalysisParser methods.
def parse(self, infile):
"""Parse a ZincAnalysis instance from an open text file."""
with raise_on_eof(infile):
try:
return ZincAnalysis(self._underlying_parser.parse(infile))
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def parse_products(self, infile, classes_dir):
"""An efficient parser of just the products section."""
with raise_on_eof(infile):
try:
return self._underlying_parser.parse_products(infile)
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def parse_deps(self, infile, classpath_indexer, classes_dir):
with raise_on_eof(infile):
try:
return self._underlying_parser.parse_deps(infile, classes_dir)
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def rebase(self, infile, outfile, pants_home_from, pants_home_to, java_home=None):
with raise_on_eof(infile):
try:
self._underlying_parser.rebase(infile, outfile, pants_home_from, pants_home_to, java_home)
except UnderlyingParser.ParseError as e:
raise ParseError(e)
|
apache-2.0
|
bguillot/OpenUpgrade
|
addons/l10n_fr/wizard/fr_report_compute_resultant.py
|
374
|
2312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
DNFcode/edx-platform
|
common/djangoapps/embargo/models.py
|
48
|
3847
|
"""
Models for embargoing visits to certain courses by IP address.
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration embargo --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/embargo/migrations/
"""
import ipaddr
from django.db import models
from config_models.models import ConfigurationModel
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
class EmbargoedCourse(models.Model):
"""
Enable course embargo on a course-by-course basis.
"""
objects = NoneToEmptyManager()
# The course to embargo
course_id = CourseKeyField(max_length=255, db_index=True, unique=True)
# Whether or not to embargo
embargoed = models.BooleanField(default=False)
@classmethod
def is_embargoed(cls, course_id):
"""
Returns whether or not the given course id is embargoed.
If course has not been explicitly embargoed, returns False.
"""
try:
record = cls.objects.get(course_id=course_id)
return record.embargoed
except cls.DoesNotExist:
return False
def __unicode__(self):
not_em = "Not "
if self.embargoed:
not_em = ""
# pylint: disable=no-member
return u"Course '{}' is {}Embargoed".format(self.course_id.to_deprecated_string(), not_em)
class EmbargoedState(ConfigurationModel):
"""
Register countries to be embargoed.
"""
# The countries to embargo
embargoed_countries = models.TextField(
blank=True,
help_text="A comma-separated list of country codes that fall under U.S. embargo restrictions"
)
@property
def embargoed_countries_list(self):
"""
Return a list of upper case country codes
"""
if self.embargoed_countries == '':
return []
return [country.strip().upper() for country in self.embargoed_countries.split(',')] # pylint: disable=no-member
class IPFilter(ConfigurationModel):
"""
Register specific IP addresses to explicitly block or unblock.
"""
whitelist = models.TextField(
blank=True,
help_text="A comma-separated list of IP addresses that should not fall under embargo restrictions."
)
blacklist = models.TextField(
blank=True,
help_text="A comma-separated list of IP addresses that should fall under embargo restrictions."
)
class IPFilterList(object):
"""
Represent a list of IP addresses with support of networks.
"""
def __init__(self, ips):
self.networks = [ipaddr.IPNetwork(ip) for ip in ips]
def __iter__(self):
for network in self.networks:
yield network
def __contains__(self, ip):
try:
ip = ipaddr.IPAddress(ip)
except ValueError:
return False
for network in self.networks:
if network.Contains(ip):
return True
return False
@property
def whitelist_ips(self):
"""
Return a list of valid IP addresses to whitelist
"""
if self.whitelist == '':
return []
return self.IPFilterList([addr.strip() for addr in self.whitelist.split(',')]) # pylint: disable=no-member
@property
def blacklist_ips(self):
"""
Return a list of valid IP addresses to blacklist
"""
if self.blacklist == '':
return []
return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member
|
agpl-3.0
|
beezee/GAE-Django-base-app
|
django/template/debug.py
|
232
|
3797
|
from django.conf import settings
from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output, use_l10n=context.use_l10n)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
|
bsd-3-clause
|
dr4ke616/pinky
|
pinky/core/manhole.py
|
1
|
3337
|
# -*- coding: utf8 -*-
# Copyright (c) 2013 Dedsert Ltd.
# See LICENSE for more details
"""
.. module:: manhole
:platform: Unix, Windows
:synopsys: ManHole Administrative Python Shell
.. moduleauthor:: Adam Drakeford <[email protected]>
"""
import string
from rlcompleter import Completer
from twisted.application import internet
from twisted.cred import portal, checkers
from twisted.conch import manhole, manhole_ssh
class EnhancedColoredManhole(manhole.ColoredManhole):
def find_common(self, l):
"""
Find common parts in thelist items ex: 'ab' for ['abcd','abce','abf']
requires an ordered list
"""
if len(l) == 1:
return l[0]
init = l[0]
for item in l[1:]:
for i, (x, y) in enumerate(zip(init, item)):
if x != y:
init = "".join(init[:i])
break
if not init:
return None
return init
def handle_TAB(self):
necessarypart = "".join(self.lineBuffer).split(' ')[-1]
completer = Completer(globals())
if completer.complete(necessarypart, 0):
matches = list(set(completer.matches)) # has multiples
if len(matches) == 1:
length = len(necessarypart)
self.lineBuffer = self.lineBuffer[:-length]
self.lineBuffer.extend(matches[0])
self.lineBufferIndex = len(self.lineBuffer)
else:
matches.sort()
commons = self.find_common(matches)
if commons:
length = len(necessarypart)
self.lineBuffer = self.lineBuffer[:-length]
self.lineBuffer.extend(commons)
self.lineBufferIndex = len(self.lineBuffer)
self.terminal.nextLine()
while matches:
matches, part = matches[4:], matches[:4]
for item in part:
self.terminal.write('%s' % item.ljust(30))
self.terminal.write('\n')
self.terminal.nextLine()
self.terminal.eraseLine()
self.terminal.cursorBackward(self.lineBufferIndex + 5)
self.terminal.write(
"%s %s" % (self.ps[self.pn], "".join(self.lineBuffer))
)
def keystrokeReceived(self, keyID, modifier):
# my terminal needed this
self.keyHandlers.update({'\b': self.handle_BACKSPACE})
m = self.keyHandlers.get(keyID)
if m is not None:
m()
elif keyID in string.printable:
self.characterReceived(keyID, False)
def get_manhole_factory(namespace, **passwords):
"""Get a Manhole Factory
"""
realm = manhole_ssh.TerminalRealm()
realm.chainedProtocolFactory.protocolFactory = (
lambda _: EnhancedColoredManhole(namespace)
)
p = portal.Portal(realm)
p.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(**passwords)
)
return manhole_ssh.ConchFactory(p)
class ManholeServer(internet.TCPServer):
def __init__(self, user, password, port):
factory = get_manhole_factory(globals(), **{user: password})
internet.TCPServer.__init__(self, int(port), factory)
|
mit
|
mhotwagner/abackend
|
abackend-env/lib/python3.5/site-packages/django/utils/timesince.py
|
109
|
2521
|
from __future__ import unicode_literals
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import ugettext, ungettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
(60 * 60, ungettext_lazy('%d hour', '%d hours')),
(60, ungettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
delta = (d - now) if reversed else (now - d)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(ugettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += ugettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
|
mit
|
mcuringa/py-tutor
|
pytutor/social/migrations/0001_initial.py
|
1
|
5250
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tutor', '0004_auto_20150314_0420'),
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='FriendConnection',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='FriendRequest',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('status', models.CharField(max_length=20, blank=True)),
('sent', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='HelpRequest',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('question', models.ForeignKey(to='tutor.Question')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('msg', models.TextField()),
('sent', models.DateTimeField(auto_now=True)),
('unread', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='SocialProfile',
fields=[
('bio', models.CharField(null=True, max_length=200, blank=True)),
('public', models.BooleanField(default=False)),
('profile_pic', models.ImageField(null=True, upload_to='profile_pics', blank=True)),
('institution', models.CharField(null=True, max_length=120, blank=True)),
('city', models.CharField(null=True, max_length=120, blank=True)),
('state', models.CharField(null=True, max_length=120, blank=True)),
('country', models.CharField(null=True, max_length=120, blank=True)),
('mobile', models.CharField(null=True, max_length=120, blank=True)),
('facebook', models.CharField(null=True, max_length=120, blank=True)),
('twitter', models.CharField(null=True, max_length=120, blank=True)),
('whatsapp', models.CharField(null=True, max_length=120, blank=True)),
('skype', models.CharField(null=True, max_length=120, blank=True)),
('google', models.CharField(null=True, max_length=120, blank=True)),
('pyanywhere', models.CharField(null=True, max_length=120, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, serialize=False)),
],
),
migrations.AddField(
model_name='message',
name='msg_from',
field=models.ForeignKey(related_name='from_user', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='message',
name='msg_to',
field=models.ForeignKey(related_name='to_user', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='helprequest',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='friendrequest',
name='invited',
field=models.ForeignKey(related_name='invited', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='friendrequest',
name='sender',
field=models.ForeignKey(related_name='sender', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='friendconnection',
name='friend_a',
field=models.ForeignKey(related_name='frienda', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='friendconnection',
name='friend_b',
field=models.ForeignKey(related_name='friendb', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='RestProfile',
fields=[
],
options={
'proxy': True,
},
bases=('social.socialprofile',),
),
migrations.CreateModel(
name='PublicRestProfile',
fields=[
],
options={
'proxy': True,
},
bases=('social.restprofile',),
),
]
|
agpl-3.0
|
blueboxgroup/neutron
|
neutron/db/migration/alembic_migrations/versions/4dbe243cd84d_nsxv.py
|
3
|
7947
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsxv
Revision ID: 4dbe243cd84d
Revises: 38495dc99731
Create Date: 2015-01-05 23:22:04.501609
"""
# revision identifiers, used by Alembic.
revision = '4dbe243cd84d'
down_revision = '38495dc99731'
from alembic import op
import sqlalchemy as sa
appliance_sizes_enum = sa.Enum('compact', 'large', 'xlarge', 'quadlarge',
name='nsxv_router_bindings_appliance_size')
edge_types_enum = sa.Enum('service', 'vdr',
name='nsxv_router_bindings_edge_type')
internal_network_purpose_enum = sa.Enum('inter_edge_net',
name='nsxv_internal_networks_purpose')
internal_edge_purpose_enum = sa.Enum('inter_edge_net',
name='nsxv_internal_edges_purpose')
tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup',
name='nsxv_tz_network_bindings_binding_type')
def upgrade():
op.create_table(
'nsxv_router_bindings',
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('status_description', sa.String(length=255), nullable=True),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=True),
sa.Column('lswitch_id', sa.String(length=36), nullable=True),
sa.Column('appliance_size',
appliance_sizes_enum,
nullable=True),
sa.Column('edge_type', edge_types_enum, nullable=True),
sa.PrimaryKeyConstraint('router_id'))
op.create_table(
'nsxv_internal_networks',
sa.Column('network_purpose', internal_network_purpose_enum,
nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_purpose'))
op.create_table(
'nsxv_internal_edges',
sa.Column('ext_ip_address', sa.String(length=64), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('purpose', internal_edge_purpose_enum, nullable=True),
sa.PrimaryKeyConstraint('ext_ip_address'))
op.create_table(
'nsxv_firewall_rule_bindings',
sa.Column('rule_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('rule_vse_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('rule_id', 'edge_id'))
op.create_table(
'nsxv_edge_dhcp_static_bindings',
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('mac_address', sa.String(length=32), nullable=False),
sa.Column('binding_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('edge_id', 'mac_address'))
op.create_table(
'nsxv_edge_vnic_bindings',
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('vnic_index', sa.Integer(), nullable=False),
sa.Column('tunnel_index', sa.Integer(), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('edge_id', 'vnic_index', 'tunnel_index'))
op.create_table(
'nsxv_spoofguard_policy_network_mappings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('policy_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'nsxv_security_group_section_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('ip_section_id', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id'))
op.create_table(
'nsxv_tz_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type',
tz_binding_type_enum,
nullable=False),
sa.Column('phy_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid',
'vlan_id'))
op.create_table(
'nsxv_port_vnic_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=42), nullable=False),
sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'))
op.create_table(
'nsxv_port_index_mappings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('device_id', sa.String(length=255), nullable=False),
sa.Column('index', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'),
sa.UniqueConstraint('device_id', 'index'))
op.create_table(
'nsxv_rule_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_rule_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id', 'nsx_rule_id'))
op.create_table(
'nsxv_router_ext_attributes',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('distributed', sa.Boolean(), nullable=False),
sa.Column('exclusive', sa.Boolean(), nullable=False),
sa.Column('service_router', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id'))
def downgrade():
op.drop_table('nsxv_router_ext_attributes')
op.drop_table('nsxv_rule_mappings')
op.drop_table('nsxv_port_index_mappings')
op.drop_table('nsxv_port_vnic_mappings')
op.drop_table('nsxv_tz_network_bindings')
op.drop_table('nsxv_security_group_section_mappings')
op.drop_table('nsxv_spoofguard_policy_network_mappings')
op.drop_table('nsxv_edge_vnic_bindings')
op.drop_table('nsxv_edge_dhcp_static_bindings')
op.drop_table('nsxv_firewall_rule_bindings')
op.drop_table('nsxv_internal_edges')
op.drop_table('nsxv_internal_networks')
op.drop_table('nsxv_router_bindings')
appliance_sizes_enum.drop(op.get_bind(), checkfirst=False)
edge_types_enum.drop(op.get_bind(), checkfirst=False)
internal_network_purpose_enum.drop(op.get_bind(), checkfirst=False)
internal_edge_purpose_enum.drop(op.get_bind(), checkfirst=False)
tz_binding_type_enum.drop(op.get_bind(), checkfirst=False)
|
apache-2.0
|
mas2df/phonograph
|
utils.py
|
1
|
1691
|
import requests
import logging
import json
from py2neo import Node, Relationship
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
spotify_api_url = "https://api.spotify.com/v1/"
artists_endpoint = "artists/"
related_artists_endpoint = "/related-artists"
def getArtistJson(artist_id):
""" Returns a json representation of the artist response for an artist_id """
request_url = spotify_api_url + artists_endpoint + artist_id
return getJsonResponse(request_url)
def getRelatedArtistJson(artist_id):
""" Returns a json representation of the related_artists response for an artist_id """
request_url = spotify_api_url + artists_endpoint + artist_id + related_artists_endpoint
return getJsonResponse(request_url)
def getJsonResponse(request_url):
logger.info("Getting json: " + request_url)
response = requests.get(request_url)
return json.loads(response.text)
def createArtistNode(graph, artist_json):
artist_node = Node(artist_json["type"])
artist_node.properties["name"] = artist_json["name"]
artist_node.properties["href"] = artist_json["href"]
artist_node.properties["external_href"] = artist_json["external_urls"]["spotify"]
artist_node.properties["spotify_id"] = artist_json["id"]
artist_node.properties["href"] = artist_json["href"]
# Insert into the db
graph.create(artist_node)
# return the node
return artist_node
def createRelatedArtistNodes(graph, artist_node, related_artist_json):
for related_artist_json in related_artist_json["artists"]:
related_artist_node = createArtistNode(graph, related_artist_json)
relationship = Relationship(artist_node, "RELATED TO", related_artist_node)
graph.create(relationship)
|
mit
|
sidartaoliveira/ansible
|
lib/ansible/plugins/action/iosxr_config.py
|
79
|
4164
|
#
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.iosxr import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
gpl-3.0
|
GISAElkartea/bidasoamedia
|
project/articles/migrations/0001_initial.py
|
1
|
2400
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 21:52
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations, models
import django.utils.timezone
import markitup.fields
import sorl.thumbnail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='title')),
('description', markitup.fields.MarkupField(blank=True, help_text='populated from body if not given', no_rendered_field=True, verbose_name='description')),
('body', markitup.fields.MarkupField(no_rendered_field=True, verbose_name='body')),
('image', sorl.thumbnail.fields.ImageField(blank=True, upload_to=b'images', verbose_name='image')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='publication date')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from=b'title', unique=True)),
('_body_rendered', models.TextField(blank=True, editable=False)),
('_description_rendered', models.TextField(blank=True, editable=False)),
],
options={
'ordering': ('-pub_date',),
'verbose_name': 'Article',
'verbose_name_plural': 'Articles',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, verbose_name='izena')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', unique=True)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.AddField(
model_name='article',
name='categories',
field=models.ManyToManyField(blank=True, to='articles.Category', verbose_name='categories'),
),
]
|
agpl-3.0
|
sankhesh/VTK
|
Filters/Core/Testing/Python/dispPlot.py
|
20
|
1768
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this is a tcl version of plate vibration
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read a vtk file
#
plate = vtk.vtkPolyDataReader()
plate.SetFileName(VTK_DATA_ROOT + "/Data/plate.vtk")
plate.SetVectorsName("mode8")
warp = vtk.vtkWarpVector()
warp.SetInputConnection(plate.GetOutputPort())
warp.SetScaleFactor(0.5)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(warp.GetOutputPort())
color = vtk.vtkVectorDot()
color.SetInputConnection(normals.GetOutputPort())
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.Build()
i = 0
while i < 128:
lut.SetTableValue(i, (128.0 - i) / 128.0, (128.0 - i) / 128.0,
(128.0 - i) / 128.0, 1)
i += 1
i = 128
while i < 256:
lut.SetTableValue(i, (i - 128.0) / 128.0, (i - 128.0) / 128.0,
(i - 128.0) / 128.0, 1)
i += 1
plateMapper = vtk.vtkDataSetMapper()
plateMapper.SetInputConnection(color.GetOutputPort())
plateMapper.SetLookupTable(lut)
plateMapper.SetScalarRange(-1, 1)
plateActor = vtk.vtkActor()
plateActor.SetMapper(plateMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(plateActor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(250, 250)
ren1.GetActiveCamera().SetPosition(13.3991, 14.0764, 9.97787)
ren1.GetActiveCamera().SetFocalPoint(1.50437, 0.481517, 4.52992)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(-0.120861, 0.458556, -0.880408)
ren1.GetActiveCamera().SetClippingRange(12.5724, 26.8374)
# render the image
#
iren.Initialize()
#iren.Start()
|
bsd-3-clause
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/webdriver/pylib/selenium/webdriver/remote/utils.py
|
37
|
3643
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import zipfile
try:
import json
except ImportError: # < 2.6
import simplejson as json
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.common.exceptions import NoSuchElementException
LOGGER = logging.getLogger(__name__)
def format_json(json_struct):
return json.dumps(json_struct, indent=4)
def dump_json(json_struct):
return json.dumps(json_struct)
def load_json(s):
return json.loads(s)
def handle_find_element_exception(e):
if ("Unable to find" in e.response["value"]["message"] or
"Unable to locate" in e.response["value"]["message"]):
raise NoSuchElementException("Unable to locate element:")
else:
raise e
def return_value_if_exists(resp):
if resp and "value" in resp:
return resp["value"]
def get_root_parent(elem):
parent = elem.parent
while True:
try:
parent.parent
parent = parent.parent
except AttributeError:
return parent
def unzip_to_temp_dir(zip_file_name):
"""Unzip zipfile to a temporary directory.
The directory of the unzipped files is returned if success,
otherwise None is returned. """
if not zip_file_name or not os.path.exists(zip_file_name):
return None
zf = zipfile.ZipFile(zip_file_name)
if zf.testzip() is not None:
return None
# Unzip the files into a temporary directory
LOGGER.info("Extracting zipped file: %s" % zip_file_name)
tempdir = tempfile.mkdtemp()
try:
# Create directories that don't exist
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if (name.endswith(os.path.sep) and not os.path.exists(dest)):
os.mkdir(dest)
LOGGER.debug("Directory %s created." % dest)
# Copy files
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if not (name.endswith(os.path.sep)):
LOGGER.debug("Copying file %s......" % dest)
outfile = open(dest, 'wb')
outfile.write(zf.read(zip_name))
outfile.close()
LOGGER.debug("File %s copied." % dest)
LOGGER.info("Unzipped file can be found at %s" % tempdir)
return tempdir
except IOError, err:
LOGGER.error("Error in extracting webdriver.xpi: %s" % err)
return None
|
bsd-3-clause
|
ryanahall/django
|
django/contrib/auth/tokens.py
|
103
|
2667
|
from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
value = (six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
|
bsd-3-clause
|
t4ngo/dragonfly
|
dragonfly/actions/action_pause.py
|
5
|
1898
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Pause action
============================================================================
"""
import time
from dragonfly.actions.action_base import DynStrActionBase
class Pause(DynStrActionBase):
"""
Pause for the given amount of time.
The *spec* constructor argument should be a *string* giving the
time to wait. It should be given in hundredths of a second. For
example, the following code will pause for 20/100s = 0.2
seconds: ::
Pause("20").execute()
The reason the *spec* must be given as a *string* is because it
can then be used in dynamic value evaluation. For example, the
following code determines the time to pause at execution time: ::
action = Pause("%(time)d")
data = {"time": 37}
action.execute(data)
"""
def _parse_spec(self, spec):
interval = float(spec) / 100
return interval
def _execute_events(self, interval):
time.sleep(interval)
return True
|
lgpl-3.0
|
calvinchengx/django-haystack
|
haystack/backends/whoosh_backend.py
|
1
|
29514
|
import logging
import os
import re
import shutil
import threading
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
from django.utils.datetime_safe import datetime
from django.utils.encoding import force_unicode
from haystack.backends import BaseSearchBackend, BaseSearchQuery, log_query, EmptyResults
from haystack.constants import ID, DJANGO_CT, DJANGO_ID
from haystack.exceptions import MissingDependency, SearchBackendError
from haystack.models import SearchResult
from haystack.utils import get_identifier
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
try:
from django.db.models.sql.query import get_proxied_model
except ImportError:
# Likely on Django 1.0
get_proxied_model = None
try:
import whoosh
except ImportError:
raise MissingDependency("The 'whoosh' backend requires the installation of 'Whoosh'. Please refer to the documentation.")
# Bubble up the correct error.
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import Schema, IDLIST, STORED, TEXT, KEYWORD, NUMERIC, BOOLEAN, DATETIME, NGRAM, NGRAMWORDS
from whoosh.fields import ID as WHOOSH_ID
from whoosh import index
from whoosh.qparser import QueryParser
from whoosh.filedb.filestore import FileStorage, RamStorage
from whoosh.searching import ResultsPage
from whoosh.spelling import SpellChecker
from whoosh.writing import AsyncWriter
# Handle minimum requirement.
if not hasattr(whoosh, '__version__') or whoosh.__version__ < (1, 8, 4):
raise MissingDependency("The 'whoosh' backend requires version 1.8.4 or greater.")
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d{3,6}Z?)?$')
BACKEND_NAME = 'whoosh'
LOCALS = threading.local()
LOCALS.RAM_STORE = None
class SearchBackend(BaseSearchBackend):
# Word reserved by Whoosh for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Whoosh for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':', '.',
)
def __init__(self, site=None):
super(SearchBackend, self).__init__(site)
self.setup_complete = False
self.use_file_storage = True
self.post_limit = getattr(settings, 'HAYSTACK_WHOOSH_POST_LIMIT', 128 * 1024 * 1024)
if getattr(settings, 'HAYSTACK_WHOOSH_STORAGE', 'file') != 'file':
self.use_file_storage = False
if self.use_file_storage and not hasattr(settings, 'HAYSTACK_WHOOSH_PATH'):
raise ImproperlyConfigured('You must specify a HAYSTACK_WHOOSH_PATH in your settings.')
self.log = logging.getLogger('haystack')
def setup(self):
"""
Defers loading until needed.
"""
new_index = False
# Make sure the index is there.
if self.use_file_storage and not os.path.exists(settings.HAYSTACK_WHOOSH_PATH):
os.makedirs(settings.HAYSTACK_WHOOSH_PATH)
new_index = True
if self.use_file_storage and not os.access(settings.HAYSTACK_WHOOSH_PATH, os.W_OK):
raise IOError("The path to your Whoosh index '%s' is not writable for the current user/group." % settings.HAYSTACK_WHOOSH_PATH)
if self.use_file_storage:
self.storage = FileStorage(settings.HAYSTACK_WHOOSH_PATH)
else:
global LOCALS
if LOCALS.RAM_STORE is None:
LOCALS.RAM_STORE = RamStorage()
self.storage = LOCALS.RAM_STORE
self.content_field_name, self.schema = self.build_schema(self.site.all_searchfields())
self.parser = QueryParser(self.content_field_name, schema=self.schema)
if new_index is True:
self.index = self.storage.create_index(self.schema)
else:
try:
self.index = self.storage.open_index(schema=self.schema)
except index.EmptyIndexError:
self.index = self.storage.create_index(self.schema)
self.setup_complete = True
def build_schema(self, fields):
schema_fields = {
ID: WHOOSH_ID(stored=True, unique=True),
DJANGO_CT: WHOOSH_ID(stored=True),
DJANGO_ID: WHOOSH_ID(stored=True),
}
# Grab the number of keys that are hard-coded into Haystack.
# We'll use this to (possibly) fail slightly more gracefully later.
initial_key_count = len(schema_fields)
content_field_name = ''
for field_name, field_class in fields.items():
if field_class.is_multivalued:
if field_class.indexed is False:
schema_fields[field_class.index_fieldname] = IDLIST(stored=True, field_boost=field_class.boost)
else:
schema_fields[field_class.index_fieldname] = KEYWORD(stored=True, commas=True, scorable=True, field_boost=field_class.boost)
elif field_class.field_type in ['date', 'datetime']:
schema_fields[field_class.index_fieldname] = DATETIME(stored=field_class.stored)
elif field_class.field_type == 'integer':
schema_fields[field_class.index_fieldname] = NUMERIC(stored=field_class.stored, type=int, field_boost=field_class.boost)
elif field_class.field_type == 'float':
schema_fields[field_class.index_fieldname] = NUMERIC(stored=field_class.stored, type=float, field_boost=field_class.boost)
elif field_class.field_type == 'boolean':
# Field boost isn't supported on BOOLEAN as of 1.8.2.
schema_fields[field_class.index_fieldname] = BOOLEAN(stored=field_class.stored)
elif field_class.field_type == 'ngram':
schema_fields[field_class.index_fieldname] = NGRAM(minsize=3, maxsize=15, stored=field_class.stored, field_boost=field_class.boost)
elif field_class.field_type == 'edge_ngram':
schema_fields[field_class.index_fieldname] = NGRAMWORDS(minsize=2, maxsize=15, at='start', stored=field_class.stored, field_boost=field_class.boost)
else:
schema_fields[field_class.index_fieldname] = TEXT(stored=True, analyzer=StemmingAnalyzer(), field_boost=field_class.boost)
if field_class.document is True:
content_field_name = field_class.index_fieldname
# Fail more gracefully than relying on the backend to die if no fields
# are found.
if len(schema_fields) <= initial_key_count:
raise SearchBackendError("No fields were found in any search_indexes. Please correct this before attempting to search.")
return (content_field_name, Schema(**schema_fields))
def update(self, index, iterable, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
writer = AsyncWriter(self.index)
for obj in iterable:
doc = index.full_prepare(obj)
# Really make sure it's unicode, because Whoosh won't have it any
# other way.
for key in doc:
doc[key] = self._from_python(doc[key])
try:
writer.update_document(**doc)
except Exception, e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Whoosh: %s", e)
if len(iterable) > 0:
# For now, commit no matter what, as we run into locking issues otherwise.
writer.commit()
# If spelling support is desired, add to the dictionary.
if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False) is True:
sp = SpellChecker(self.storage)
sp.add_field(self.index, self.content_field_name)
def remove(self, obj_or_string, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
whoosh_id = get_identifier(obj_or_string)
try:
self.index.delete_by_query(q=self.parser.parse(u'%s:"%s"' % (ID, whoosh_id)))
except Exception, e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Whoosh: %s", whoosh_id, e)
def clear(self, models=[], commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
try:
if not models:
self.delete_index()
else:
models_to_delete = []
for model in models:
models_to_delete.append(u"%s:%s.%s" % (DJANGO_CT, model._meta.app_label, model._meta.module_name))
self.index.delete_by_query(q=self.parser.parse(u" OR ".join(models_to_delete)))
except Exception, e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Whoosh: %s", whoosh_id, e)
def delete_index(self):
# Per the Whoosh mailing list, if wiping out everything from the index,
# it's much more efficient to simply delete the index files.
if self.use_file_storage and os.path.exists(settings.HAYSTACK_WHOOSH_PATH):
shutil.rmtree(settings.HAYSTACK_WHOOSH_PATH)
elif not self.use_file_storage:
self.storage.clean()
# Recreate everything.
self.setup()
def optimize(self):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
self.index.optimize()
@log_query
def search(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None, date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
limit_to_registered_models=None, result_class=None, **kwargs):
if not self.setup_complete:
self.setup()
# A zero length query should return no results.
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
query_string = force_unicode(query_string)
# A one-character query (non-wildcard) gets nabbed by a stopwords
# filter and should yield zero results.
if len(query_string) <= 1 and query_string != u'*':
return {
'results': [],
'hits': 0,
}
reverse = False
if sort_by is not None:
# Determine if we need to reverse the results and if Whoosh can
# handle what it's being asked to sort by. Reversing is an
# all-or-nothing action, unfortunately.
sort_by_list = []
reverse_counter = 0
for order_by in sort_by:
if order_by.startswith('-'):
reverse_counter += 1
if len(sort_by) > 1 and reverse_counter > 1:
raise SearchBackendError("Whoosh does not handle more than one field and any field being ordered in reverse.")
for order_by in sort_by:
if order_by.startswith('-'):
sort_by_list.append(order_by[1:])
if len(sort_by_list) == 1:
reverse = True
else:
sort_by_list.append(order_by)
if len(sort_by_list) == 1:
reverse = False
sort_by = sort_by_list[0]
if facets is not None:
warnings.warn("Whoosh does not handle faceting.", Warning, stacklevel=2)
if date_facets is not None:
warnings.warn("Whoosh does not handle date faceting.", Warning, stacklevel=2)
if query_facets is not None:
warnings.warn("Whoosh does not handle query faceting.", Warning, stacklevel=2)
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if limit_to_registered_models:
# Using narrow queries, limit the results to only models registered
# with the current site.
if narrow_queries is None:
narrow_queries = set()
registered_models = self.build_registered_models_list()
if len(registered_models) > 0:
narrow_queries.add(' OR '.join(['%s:%s' % (DJANGO_CT, rm) for rm in registered_models]))
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(self.parser.parse(force_unicode(nq)))
if narrowed_results:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
self.index = self.index.refresh()
if self.index.doc_count():
searcher = self.index.searcher()
parsed_query = self.parser.parse(query_string)
# In the event of an invalid/stopworded query, recover gracefully.
if parsed_query is None:
return {
'results': [],
'hits': 0,
}
# Prevent against Whoosh throwing an error. Requires an end_offset
# greater than 0.
if not end_offset is None and end_offset <= 0:
end_offset = 1
raw_results = searcher.search(parsed_query, limit=end_offset, sortedby=sort_by, reverse=reverse)
# Handle the case where the results have been narrowed.
if narrowed_results:
raw_results.filter(narrowed_results)
# Determine the page.
page_num = 0
if end_offset is None:
end_offset = 1000000
if start_offset is None:
start_offset = 0
page_length = end_offset - start_offset
if page_length and page_length > 0:
page_num = start_offset / page_length
# Increment because Whoosh uses 1-based page numbers.
page_num += 1
try:
raw_page = ResultsPage(raw_results, page_num, page_length)
except ValueError:
if not self.silently_fail:
raise
return {
'results': [],
'hits': 0,
'spelling_suggestion': None,
}
results = self._process_results(raw_page, highlight=highlight, query_string=query_string, spelling_query=spelling_query, result_class=result_class)
searcher.close()
if hasattr(narrow_searcher, 'close'):
narrow_searcher.close()
return results
else:
if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False):
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(spelling_query)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
else:
spelling_suggestion = None
return {
'results': [],
'hits': 0,
'spelling_suggestion': spelling_suggestion,
}
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None,
limit_to_registered_models=None, result_class=None, **kwargs):
if not self.setup_complete:
self.setup()
# Handle deferred models.
if get_proxied_model and hasattr(model_instance, '_deferred') and model_instance._deferred:
model_klass = get_proxied_model(model_instance._meta)
else:
model_klass = type(model_instance)
index = self.site.get_index(model_klass)
field_name = index.get_content_field()
narrow_queries = set()
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if limit_to_registered_models:
# Using narrow queries, limit the results to only models registered
# with the current site.
if narrow_queries is None:
narrow_queries = set()
registered_models = self.build_registered_models_list()
if len(registered_models) > 0:
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(registered_models)))
if additional_query_string:
narrow_queries.add(additional_query_string)
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(self.parser.parse(force_unicode(nq)))
if narrowed_results:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
# Prevent against Whoosh throwing an error. Requires an end_offset
# greater than 0.
if not end_offset is None and end_offset <= 0:
end_offset = 1
# Determine the page.
page_num = 0
if end_offset is None:
end_offset = 1000000
if start_offset is None:
start_offset = 0
page_length = end_offset - start_offset
if page_length and page_length > 0:
page_num = start_offset / page_length
# Increment because Whoosh uses 1-based page numbers.
page_num += 1
self.index = self.index.refresh()
raw_results = EmptyResults()
if self.index.doc_count():
query = "%s:%s" % (ID, get_identifier(model_instance))
searcher = self.index.searcher()
parsed_query = self.parser.parse(query)
results = searcher.search(parsed_query)
if len(results):
raw_results = results[0].more_like_this(field_name, top=end_offset)
# Handle the case where the results have been narrowed.
if narrowed_results and hasattr(raw_results, 'filter'):
raw_results.filter(narrowed_results)
try:
raw_page = ResultsPage(raw_results, page_num, page_length)
except ValueError:
if not self.silently_fail:
raise
return {
'results': [],
'hits': 0,
'spelling_suggestion': None,
}
results = self._process_results(raw_page, result_class=result_class)
searcher.close()
if hasattr(narrow_searcher, 'close'):
narrow_searcher.close()
return results
def _process_results(self, raw_page, highlight=False, query_string='', spelling_query=None, result_class=None):
if not self.site:
from haystack import site
else:
site = self.site
results = []
# It's important to grab the hits first before slicing. Otherwise, this
# can cause pagination failures.
hits = len(raw_page)
if result_class is None:
result_class = SearchResult
facets = {}
spelling_suggestion = None
indexed_models = site.get_indexed_models()
for doc_offset, raw_result in enumerate(raw_page):
score = raw_page.score(doc_offset) or 0
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = site.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
# Special-cased due to the nature of KEYWORD fields.
if index.fields[string_key].is_multivalued:
if value is None or len(value) is 0:
additional_fields[string_key] = []
else:
additional_fields[string_key] = value.split(',')
else:
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
if highlight:
from whoosh import analysis
from whoosh.highlight import highlight, ContextFragmenter, UppercaseFormatter
sa = analysis.StemmingAnalyzer()
terms = [term.replace('*', '') for term in query_string.split()]
additional_fields['highlighted'] = {
self.content_field_name: [highlight(additional_fields.get(self.content_field_name), terms, sa, ContextFragmenter(terms), UppercaseFormatter())],
}
result = result_class(app_label, model_name, raw_result[DJANGO_ID], score, searchsite=self.site, **additional_fields)
results.append(result)
else:
hits -= 1
if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False):
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(spelling_query)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
return {
'results': results,
'hits': hits,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def create_spelling_suggestion(self, query_string):
spelling_suggestion = None
sp = SpellChecker(self.storage)
cleaned_query = force_unicode(query_string)
if not query_string:
return spelling_suggestion
# Clean the string.
for rev_word in self.RESERVED_WORDS:
cleaned_query = cleaned_query.replace(rev_word, '')
for rev_char in self.RESERVED_CHARACTERS:
cleaned_query = cleaned_query.replace(rev_char, '')
# Break it down.
query_words = cleaned_query.split()
suggested_words = []
for word in query_words:
suggestions = sp.suggest(word, number=1)
if len(suggestions) > 0:
suggested_words.append(suggestions[0])
spelling_suggestion = ' '.join(suggested_words)
return spelling_suggestion
def _from_python(self, value):
"""
Converts Python values to a string for Whoosh.
Code courtesy of pysolr.
"""
if hasattr(value, 'strftime'):
if not hasattr(value, 'hour'):
value = datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
elif isinstance(value, (list, tuple)):
value = u','.join([force_unicode(v) for v in value])
elif isinstance(value, (int, long, float)):
# Leave it alone.
pass
else:
value = force_unicode(value)
return value
def _to_python(self, value):
"""
Converts values from Whoosh to native Python values.
A port of the same method in pysolr, as they deal with data the same way.
"""
if value == 'true':
return True
elif value == 'false':
return False
if value and isinstance(value, basestring):
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second'])
try:
# Attempt to use json to load the values.
converted_value = json.loads(value)
# Try to handle most built-in types.
if isinstance(converted_value, (list, tuple, set, dict, int, float, long, complex)):
return converted_value
except:
# If it fails (SyntaxError or its ilk) or we don't trust it,
# continue on.
pass
return value
class SearchQuery(BaseSearchQuery):
def __init__(self, site=None, backend=None):
super(SearchQuery, self).__init__(site, backend)
if backend is not None:
self.backend = backend
else:
self.backend = SearchBackend(site=site)
def _convert_datetime(self, date):
if hasattr(date, 'hour'):
return force_unicode(date.strftime('%Y%m%d%H%M%S'))
else:
return force_unicode(date.strftime('%Y%m%d000000'))
def clean(self, query_fragment):
"""
Provides a mechanism for sanitizing user input before presenting the
value to the backend.
Whoosh 1.X differs here in that you can no longer use a backslash
to escape reserved characters. Instead, the whole word should be
quoted.
"""
words = query_fragment.split()
cleaned_words = []
for word in words:
if word in self.backend.RESERVED_WORDS:
word = word.replace(word, word.lower())
for char in self.backend.RESERVED_CHARACTERS:
if char in word:
word = "'%s'" % word
break
cleaned_words.append(word)
return ' '.join(cleaned_words)
def build_query_fragment(self, field, filter_type, value):
result = ''
is_datetime = False
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if hasattr(value, 'strftime'):
is_datetime = True
if not filter_type in ('in', 'range'):
# 'in' is a bit of a special case, as we don't want to
# convert a valid list/tuple to string. Defer handling it
# until later...
value = self.backend._from_python(value)
# Check to see if it's a phrase for an exact match.
if isinstance(value, basestring) and ' ' in value:
value = '"%s"' % value
index_fieldname = self.backend.site.get_index_fieldname(field)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
result = value
else:
filter_types = {
'exact': "%s:%s",
'gt': "%s:{%s to}",
'gte': "%s:[%s to]",
'lt': "%s:{to %s}",
'lte': "%s:[to %s]",
'startswith': "%s:%s*",
}
if filter_type == 'in':
in_options = []
for possible_value in value:
is_datetime = False
if hasattr(possible_value, 'strftime'):
is_datetime = True
pv = self.backend._from_python(possible_value)
if is_datetime is True:
pv = self._convert_datetime(pv)
in_options.append('%s:"%s"' % (index_fieldname, pv))
result = "(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend._from_python(value[0])
end = self.backend._from_python(value[1])
if hasattr(value[0], 'strftime'):
start = self._convert_datetime(start)
if hasattr(value[1], 'strftime'):
end = self._convert_datetime(end)
return "%s:[%s to %s]" % (index_fieldname, start, end)
else:
if is_datetime is True:
value = self._convert_datetime(value)
result = filter_types[filter_type] % (index_fieldname, value)
return result
|
bsd-3-clause
|
villeneuvelab/vlpp
|
vlpp/utils.py
|
1
|
2522
|
# -*- coding: utf-8 -*-
import os
import json
import shlex
import shutil
import subprocess
from jinja2 import Environment, FileSystemLoader
PKG_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TPL_PATH = os.path.join(PKG_PATH, "templates")
def load_json(filename):
with open(filename, 'r') as f:
data = json.load(f)
return data
def save_json(filename, data):
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
def add_suffix(path, suffix, ext=None):
root, _ext = splitext_(os.path.basename(path))
if not suffix.startswith("_"):
suffix = "_" + suffix
if ext is not None:
return root + suffix + ext
else:
return root + suffix + _ext
def splitext_(path):
for ext in ['.nii.gz']:
if path.endswith(ext):
return path[:-len(ext)], path[-len(ext):]
return os.path.splitext(path)
def gzipd(path, copy=False):
root, ext = splitext_(path)
source = path
if os.path.islink(path) or copy:
source = root + "_copy" + ext
shutil.copy(path, source)
run_shell("gzip -d {0}".format(source))
return source.replace(".gz", "")
def run_shell(commandLine):
cmd = shlex.split(commandLine)
try:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = process.communicate()
#try:
#logger.info("\n" + output.decode("utf-8"))
#except:
#logger.info(output)
except OSError as exception:
print("Exception: {}".format(exception))
return output
def get_jinja_tpl(templatePath):
path, templateName = os.path.split(templatePath)
return Environment(
loader=FileSystemLoader(path or "./"),
trim_blocks=True,
).get_template(templateName)
def run_matlab(templatePath, tags, filename):
get_jinja_tpl(templatePath).stream(**tags).dump(filename)
print(run_shell("matlab -nodisplay < {0}".format(filename)))
def nfmap2dict(mapStr):
rsl = {}
for item in mapStr[1:-1].replace(" ", "").split(","):
it = item.split(":")
rsl[it[0]] = it[1]
return rsl
def filename2dict(filename):
rsl = {}
base, _ = splitext_(filename)
for item in base.split("_"):
it = item.split("-")
try:
rsl[it[0]] = it[1]
except:
rsl[it[0]] = None
return rsl
def warn(messages):
for line in messages:
print(line)
|
mit
|
elingg/tensorflow
|
tensorflow/python/saved_model/signature_def_utils_impl.py
|
58
|
5229
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import utils
def build_signature_def(inputs=None, outputs=None, method_name=None):
"""Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
Returns:
A SignatureDef protocol buffer constructed based on the supplied arguments.
"""
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
signature_def.outputs[item].CopyFrom(outputs[item])
if method_name is not None:
signature_def.method_name = method_name
return signature_def
def regression_signature_def(examples, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
predictions: `Tensor`.
Returns:
A regression-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None for regression.')
if predictions is None:
raise ValueError('predictions cannot be None for regression.')
input_tensor_info = utils.build_tensor_info(examples)
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}
output_tensor_info = utils.build_tensor_info(predictions)
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
return signature_def
def classification_signature_def(examples, classes, scores):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
classes: `Tensor`.
scores: `Tensor`.
Returns:
A classification-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None for classification.')
if classes is None and scores is None:
raise ValueError('classes and scores cannot both be None for '
'classification.')
input_tensor_info = utils.build_tensor_info(examples)
signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}
signature_outputs = {}
if classes is not None:
classes_tensor_info = utils.build_tensor_info(classes)
signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = (
classes_tensor_info)
if scores is not None:
scores_tensor_info = utils.build_tensor_info(scores)
signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = (
scores_tensor_info)
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.CLASSIFY_METHOD_NAME)
return signature_def
def predict_signature_def(inputs, outputs):
"""Creates prediction signature from given inputs and outputs.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('inputs cannot be None or empty for prediction.')
if outputs is None:
raise ValueError('outputs cannot be None or empty for prediction.')
# If there's only one input or output, we can standardize keys
if len(inputs) == 1:
(_, value), = inputs.items()
inputs = {signature_constants.PREDICT_INPUTS: value}
if len(outputs) == 1:
(_, value), = outputs.items()
outputs = {signature_constants.PREDICT_OUTPUTS: value}
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor)
for key, tensor in outputs.items()}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.