repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
rrooij/youtube-dl | youtube_dl/extractor/mtv.py | 18 | 18984 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_xpath,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
HEADRequest,
RegexNotFoundError,
sanitized_Request,
strip_or_none,
timeconvert,
try_get,
unescapeHTML,
update_url_query,
url_basename,
xpath_text,
)
def _media_xml_tag(tag):
return '{http://search.yahoo.com/mrss/}%s' % tag
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
_LANG = None
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
@staticmethod
def _remove_template_parameter(url):
# Remove the templates, like &device={device}
return re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', url)
def _get_feed_url(self, uri):
return self._FEED_URL
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
if thumb_node is None:
return None
return thumb_node.get('url') or thumb_node.text or None
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
'Downloading mobile page')
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
req = HEADRequest(metrics_url)
response = self._request_webpage(req, mtvn_id, 'Resolving url')
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id, video_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
return self._extract_mobile_video_formats(mtvn_id)
raise ExtractorError('This video is not available from your country.',
expected=True)
formats = []
for rendition in mdoc.findall('.//rendition'):
if rendition.get('method') == 'hls':
hls_url = rendition.find('./src').text
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
# fms
try:
_, _, ext = rendition.attrib['type'].partition('/')
rtmp_video_url = rendition.find('./src').text
if 'error_not_available.swf' in rtmp_video_url:
raise ExtractorError(
'%s said: video is not available' % self.IE_NAME,
expected=True)
if rtmp_video_url.endswith('siteunavail.png'):
continue
formats.extend([{
'ext': 'flv' if rtmp_video_url.startswith('rtmp') else ext,
'url': rtmp_video_url,
'format_id': '-'.join(filter(None, [
'rtmp' if rtmp_video_url.startswith('rtmp') else None,
rendition.get('bitrate')])),
'width': int(rendition.get('width')),
'height': int(rendition.get('height')),
}])
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
if formats:
self._sort_formats(formats)
return formats
def _extract_subtitles(self, mdoc, mtvn_id):
subtitles = {}
for transcript in mdoc.findall('.//transcript'):
if transcript.get('kind') != 'captions':
continue
lang = transcript.get('srclang')
for typographic in transcript.findall('./typographic'):
sub_src = typographic.get('src')
if not sub_src:
continue
ext = typographic.get('format')
if ext == 'cea-608':
ext = 'scc'
subtitles.setdefault(lang, []).append({
'url': compat_str(sub_src),
'ext': ext
})
return subtitles
def _get_video_info(self, itemdoc, use_hls=True):
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
content_el = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content')))
mediagen_url = self._remove_template_parameter(content_el.attrib['url'])
mediagen_url = mediagen_url.replace('device={device}', '')
if 'acceptMethods' not in mediagen_url:
mediagen_url += '&' if '?' in mediagen_url else '?'
mediagen_url += 'acceptMethods='
mediagen_url += 'hls' if use_hls else 'fms'
mediagen_doc = self._download_xml(
mediagen_url, video_id, 'Downloading video urls', fatal=False)
if mediagen_doc is False:
return None
item = mediagen_doc.find('./video/item')
if item is not None and item.get('type') == 'text':
message = '%s returned error: ' % self.IE_NAME
if item.get('code') is not None:
message += '%s - ' % item.get('code')
message += item.text
raise ExtractorError(message, expected=True)
description = strip_or_none(xpath_text(itemdoc, 'description'))
timestamp = timeconvert(xpath_text(itemdoc, 'pubDate'))
title_el = None
if title_el is None:
title_el = find_xpath_attr(
itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title')
if title_el is None:
title_el = itemdoc.find(compat_xpath('.//{http://search.yahoo.com/mrss/}title'))
if title_el is None:
title_el = itemdoc.find(compat_xpath('.//title'))
if title_el.text is None:
title_el = None
title = title_el.text
if title is None:
raise ExtractorError('Could not find video title')
title = title.strip()
# This a short id that's used in the webpage urls
mtvn_id = None
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:id')
if mtvn_id_node is not None:
mtvn_id = mtvn_id_node.text
formats = self._extract_video_formats(mediagen_doc, mtvn_id, video_id)
# Some parts of complete video may be missing (e.g. missing Act 3 in
# http://www.southpark.de/alle-episoden/s14e01-sexual-healing)
if not formats:
return None
self._sort_formats(formats)
return {
'title': title,
'formats': formats,
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
'duration': float_or_none(content_el.attrib.get('duration')),
'timestamp': timestamp,
}
def _get_feed_query(self, uri):
data = {'uri': uri}
if self._LANG:
data['lang'] = self._LANG
return data
def _get_videos_info(self, uri, use_hls=True):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri)
info_url = update_url_query(feed_url, self._get_feed_query(uri))
return self._get_videos_info_from_url(info_url, video_id, use_hls)
def _get_videos_info_from_url(self, url, video_id, use_hls=True):
idoc = self._download_xml(
url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
title = xpath_text(idoc, './channel/title')
description = xpath_text(idoc, './channel/description')
entries = []
for item in idoc.findall('.//item'):
info = self._get_video_info(item, use_hls)
if info:
entries.append(info)
return self.playlist_result(
entries, playlist_title=title, playlist_description=description)
def _extract_triforce_mgid(self, webpage, data_zone=None, video_id=None):
triforce_feed = self._parse_json(self._search_regex(
r'triforceManifestFeed\s*=\s*({.+?})\s*;\s*\n', webpage,
'triforce feed', default='{}'), video_id, fatal=False)
data_zone = self._search_regex(
r'data-zone=(["\'])(?P<zone>.+?_lc_promo.*?)\1', webpage,
'data zone', default=data_zone, group='zone')
feed_url = try_get(
triforce_feed, lambda x: x['manifest']['zones'][data_zone]['feed'],
compat_str)
if not feed_url:
return
feed = self._download_json(feed_url, video_id, fatal=False)
if not feed:
return
return try_get(feed, lambda x: x['result']['data']['id'], compat_str)
def _extract_mgid(self, webpage):
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
og_url = self._og_search_video_url(webpage)
mgid = url_basename(og_url)
if mgid.endswith('.swf'):
mgid = mgid[:-4]
except RegexNotFoundError:
mgid = None
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject\.embedSWF\(".*?(mgid:.*?)"'],
webpage, 'mgid', default=None)
if not mgid:
sm4_embed = self._html_search_meta(
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid', default=None)
if not mgid:
mgid = self._extract_triforce_mgid(webpage)
return mgid
def _real_extract(self, url):
title = url_basename(url)
webpage = self._download_webpage(url, title)
mgid = self._extract_mgid(webpage)
videos_info = self._get_videos_info(mgid)
return videos_info
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvservices:embedded'
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
_TEST = {
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
'info_dict': {
'id': '1043906',
'ext': 'mp4',
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
'timestamp': 1400126400,
'upload_date': '20140515',
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
config = self._download_json(
'http://media.mtvnservices.com/pmt/e1/access/index.html?uri=%s&configtype=edge' % uri, video_id)
return self._remove_template_parameter(config['feedWithQueryParams'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mgid = mobj.group('mgid')
return self._get_videos_info(mgid)
class MTVIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv'
_VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.mtv.com/feeds/mrss/'
_TESTS = [{
'url': 'http://www.mtv.com/video-clips/vl8qof/unlocking-the-truth-trailer',
'md5': '1edbcdf1e7628e414a8c5dcebca3d32b',
'info_dict': {
'id': '5e14040d-18a4-47c4-a582-43ff602de88e',
'ext': 'mp4',
'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer',
'description': '"Unlocking the Truth" premieres August 17th at 11/10c.',
'timestamp': 1468846800,
'upload_date': '20160718',
},
}, {
'url': 'http://www.mtv.com/full-episodes/94tujl/unlocking-the-truth-gates-of-hell-season-1-ep-101',
'only_matching': True,
}, {
'url': 'http://www.mtv.com/episodes/g8xu7q/teen-mom-2-breaking-the-wall-season-7-ep-713',
'only_matching': True,
}]
class MTV81IE(InfoExtractor):
IE_NAME = 'mtv81'
_VALID_URL = r'https?://(?:www\.)?mtv81\.com/videos/(?P<id>[^/?#.]+)'
_TEST = {
'url': 'http://www.mtv81.com/videos/artist-to-watch/the-godfather-of-japanese-hip-hop-segment-1/',
'md5': '1edbcdf1e7628e414a8c5dcebca3d32b',
'info_dict': {
'id': '5e14040d-18a4-47c4-a582-43ff602de88e',
'ext': 'mp4',
'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer',
'description': '"Unlocking the Truth" premieres August 17th at 11/10c.',
'timestamp': 1468846800,
'upload_date': '20160718',
},
}
def _extract_mgid(self, webpage):
return self._search_regex(
r'getTheVideo\((["\'])(?P<id>mgid:.+?)\1', webpage,
'mgid', group='id')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mgid = self._extract_mgid(webpage)
return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid)
class MTVVideoIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv:video'
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
_TESTS = [
{
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
'info_dict': {
'id': '853555',
'ext': 'mp4',
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
'timestamp': 1352610000,
'upload_date': '20121111',
},
},
]
def _get_thumbnail_url(self, uri, itemdoc):
return 'http://mtv.mtvnimages.com/uri/' + uri
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
if m_vevo:
vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
class MTVDEIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv.de'
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
'info_dict': {
'id': 'music_video-a50bc5f0b3aa4b3190aa',
'ext': 'flv',
'title': 'MusicVideo_cro-traum',
'description': 'Cro - Traum',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Blocked at Travis CI',
}, {
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
'info_dict': {
'id': 'local_playlist-f5ae778b9832cc837189',
'ext': 'flv',
'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Blocked at Travis CI',
}, {
'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
'info_dict': {
'id': 'local_playlist-4e760566473c4c8c5344',
'ext': 'mp4',
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
'description': 'MTV Movies Supercut',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Das Video kann zur Zeit nicht abgespielt werden.',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
playlist = self._parse_json(
self._search_regex(
r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
video_id)
def _mrss_url(item):
return item['mrss'] + item.get('mrssvars', '')
# news pages contain single video in playlist with different id
if len(playlist) == 1:
return self._get_videos_info_from_url(_mrss_url(playlist[0]), video_id)
for item in playlist:
item_id = item.get('id')
if item_id and compat_str(item_id) == video_id:
return self._get_videos_info_from_url(_mrss_url(item), video_id)
| unlicense |
gangadharkadam/saloon_frappe_install | frappe/model/naming.py | 17 | 5881 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
def set_new_name(doc):
"""Sets the `name`` property for the document based on various rules.
1. If amened doc, set suffix.
3. If `autoname` method is declared, then call it.
4. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
2. If `name` is already defined, use that name
5. If no rule defined, use hash.
#### Note:
:param doc: Document to be named."""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif hasattr(doc, "autoname"):
doc.run_method("autoname")
elif autoname:
if autoname.startswith('field:'):
fieldname = autoname[6:]
doc.name = (doc.get(fieldname) or "").strip()
if not doc.name:
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
raise Exception, 'Name is required'
if autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif "#" in autoname:
doc.name = make_autoname(autoname)
elif autoname=='Prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via Prompt"))
if not doc.name:
doc.name = make_autoname('hash', doc.doctype)
doc.name = validate_name(doc.doctype, doc.name)
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+'.#####')
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype)[:10]
if not "#" in key:
key = key + ".#####"
elif not "." in key:
frappe.throw(_("Invalid naming series (. missing)") + (_(" for {0}").format(doctype) if doctype else ""))
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name!="DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
return name
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '{}-[[:digit:]]+'
order by length(name) desc, name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
def de_duplicate(doctype, name):
original_name = name
count = 0
while True:
if frappe.db.exists(doctype, name):
count += 1
name = "{0}-{1}".format(original_name, count)
else:
break
return name
| mit |
alshedivat/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_and_repeat_dataset_serialization_test.py | 21 | 1562 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ShuffleAndRepeatDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ShuffleAndRepeatSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_ds(self, seed):
return dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=5, seed=seed))
def testCore(self):
self.run_core_tests(lambda: self._build_ds(10), lambda: self._build_ds(20),
100)
if __name__ == "__main__":
test.main()
| apache-2.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/past/translation/__init__.py | 61 | 18459 | # -*- coding: utf-8 -*-
"""
past.translation
==================
The ``past.translation`` package provides an import hook for Python 3 which
transparently runs ``futurize`` fixers over Python 2 code on import to convert
print statements into functions, etc.
It is intended to assist users in migrating to Python 3.x even if some
dependencies still only support Python 2.x.
Usage
-----
Once your Py2 package is installed in the usual module search path, the import
hook is invoked as follows:
>>> from past import autotranslate
>>> autotranslate('mypackagename')
Or:
>>> autotranslate(['mypackage1', 'mypackage2'])
You can unregister the hook using::
>>> from past.translation import remove_hooks
>>> remove_hooks()
Author: Ed Schofield.
Inspired by and based on ``uprefix`` by Vinay M. Sajip.
"""
import imp
import logging
import marshal
import os
import sys
import copy
from lib2to3.pgen2.parse import ParseError
from lib2to3.refactor import RefactoringTool
from libfuturize import fixes
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
myfixes = (list(fixes.libfuturize_fix_names_stage1) +
list(fixes.lib2to3_fix_names_stage1) +
list(fixes.libfuturize_fix_names_stage2) +
list(fixes.lib2to3_fix_names_stage2))
# We detect whether the code is Py2 or Py3 by applying certain lib2to3 fixers
# to it. If the diff is empty, it's Python 3 code.
py2_detect_fixers = [
# From stage 1:
'lib2to3.fixes.fix_apply',
# 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2
'lib2to3.fixes.fix_except',
'lib2to3.fixes.fix_execfile',
'lib2to3.fixes.fix_exitfunc',
'lib2to3.fixes.fix_funcattrs',
'lib2to3.fixes.fix_filter',
'lib2to3.fixes.fix_has_key',
'lib2to3.fixes.fix_idioms',
'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import)
'lib2to3.fixes.fix_intern',
'lib2to3.fixes.fix_isinstance',
'lib2to3.fixes.fix_methodattrs',
'lib2to3.fixes.fix_ne',
'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755
'lib2to3.fixes.fix_paren',
'lib2to3.fixes.fix_print',
'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions
'lib2to3.fixes.fix_renames',
'lib2to3.fixes.fix_reduce',
# 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support
'lib2to3.fixes.fix_repr',
'lib2to3.fixes.fix_standarderror',
'lib2to3.fixes.fix_sys_exc',
'lib2to3.fixes.fix_throw',
'lib2to3.fixes.fix_tuple_params',
'lib2to3.fixes.fix_types',
'lib2to3.fixes.fix_ws_comma',
'lib2to3.fixes.fix_xreadlines',
# From stage 2:
'lib2to3.fixes.fix_basestring',
# 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this.
# 'lib2to3.fixes.fix_callable', # not needed in Py3.2+
# 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc.
'lib2to3.fixes.fix_exec',
# 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports
'lib2to3.fixes.fix_getcwdu',
# 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
# 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
# 'lib2to3.fixes.fix_input',
# 'lib2to3.fixes.fix_itertools',
# 'lib2to3.fixes.fix_itertools_imports',
'lib2to3.fixes.fix_long',
# 'lib2to3.fixes.fix_map',
# 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead
'lib2to3.fixes.fix_next',
'lib2to3.fixes.fix_nonzero', # TODO: add a decorator for mapping __bool__ to __nonzero__
# 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3
'lib2to3.fixes.fix_raw_input',
# 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings
# 'lib2to3.fixes.fix_urllib',
'lib2to3.fixes.fix_xrange',
# 'lib2to3.fixes.fix_zip',
]
class RTs:
"""
A namespace for the refactoring tools. This avoids creating these at
the module level, which slows down the module import. (See issue #117).
There are two possible grammars: with or without the print statement.
Hence we have two possible refactoring tool implementations.
"""
_rt = None
_rtp = None
_rt_py2_detect = None
_rtp_py2_detect = None
@staticmethod
def setup():
"""
Call this before using the refactoring tools to create them on demand
if needed.
"""
if None in [RTs._rt, RTs._rtp]:
RTs._rt = RefactoringTool(myfixes)
RTs._rtp = RefactoringTool(myfixes, {'print_function': True})
@staticmethod
def setup_detect_python2():
"""
Call this before using the refactoring tools to create them on demand
if needed.
"""
if None in [RTs._rt_py2_detect, RTs._rtp_py2_detect]:
RTs._rt_py2_detect = RefactoringTool(py2_detect_fixers)
RTs._rtp_py2_detect = RefactoringTool(py2_detect_fixers,
{'print_function': True})
# We need to find a prefix for the standard library, as we don't want to
# process any files there (they will already be Python 3).
#
# The following method is used by Sanjay Vinip in uprefix. This fails for
# ``conda`` environments:
# # In a non-pythonv virtualenv, sys.real_prefix points to the installed Python.
# # In a pythonv venv, sys.base_prefix points to the installed Python.
# # Outside a virtual environment, sys.prefix points to the installed Python.
# if hasattr(sys, 'real_prefix'):
# _syslibprefix = sys.real_prefix
# else:
# _syslibprefix = getattr(sys, 'base_prefix', sys.prefix)
# Instead, we use the portion of the path common to both the stdlib modules
# ``math`` and ``urllib``.
def splitall(path):
"""
Split a path into all components. From Python Cookbook.
"""
allparts = []
while True:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def common_substring(s1, s2):
"""
Returns the longest common substring to the two strings, starting from the
left.
"""
chunks = []
path1 = splitall(s1)
path2 = splitall(s2)
for (dir1, dir2) in zip(path1, path2):
if dir1 != dir2:
break
chunks.append(dir1)
return os.path.join(*chunks)
# _stdlibprefix = common_substring(math.__file__, urllib.__file__)
def detect_python2(source, pathname):
"""
Returns a bool indicating whether we think the code is Py2
"""
RTs.setup_detect_python2()
try:
tree = RTs._rt_py2_detect.refactor_string(source, pathname)
except ParseError as e:
if e.msg != 'bad input' or e.value != '=':
raise
tree = RTs._rtp.refactor_string(source, pathname)
if source != str(tree)[:-1]: # remove added newline
# The above fixers made changes, so we conclude it's Python 2 code
logger.debug('Detected Python 2 code: {0}'.format(pathname))
with open('/tmp/original_code.py', 'w') as f:
f.write('### Original code (detected as py2): %s\n%s' %
(pathname, source))
with open('/tmp/py2_detection_code.py', 'w') as f:
f.write('### Code after running py3 detection (from %s)\n%s' %
(pathname, str(tree)[:-1]))
return True
else:
logger.debug('Detected Python 3 code: {0}'.format(pathname))
with open('/tmp/original_code.py', 'w') as f:
f.write('### Original code (detected as py3): %s\n%s' %
(pathname, source))
try:
os.remove('/tmp/futurize_code.py')
except OSError:
pass
return False
class Py2Fixer(object):
"""
An import hook class that uses lib2to3 for source-to-source translation of
Py2 code to Py3.
"""
# See the comments on :class:future.standard_library.RenameImport.
# We add this attribute here so remove_hooks() and install_hooks() can
# unambiguously detect whether the import hook is installed:
PY2FIXER = True
def __init__(self):
self.found = None
self.base_exclude_paths = ['future', 'past']
self.exclude_paths = copy.copy(self.base_exclude_paths)
self.include_paths = []
def include(self, paths):
"""
Pass in a sequence of module names such as 'plotrique.plotting' that,
if present at the leftmost side of the full package name, would
specify the module to be transformed from Py2 to Py3.
"""
self.include_paths += paths
def exclude(self, paths):
"""
Pass in a sequence of strings such as 'mymodule' that, if
present at the leftmost side of the full package name, would cause
the module not to undergo any source transformation.
"""
self.exclude_paths += paths
def find_module(self, fullname, path=None):
logger.debug('Running find_module: {0}...'.format(fullname))
if '.' in fullname:
parent, child = fullname.rsplit('.', 1)
if path is None:
loader = self.find_module(parent, path)
mod = loader.load_module(parent)
path = mod.__path__
fullname = child
# Perhaps we should try using the new importlib functionality in Python
# 3.3: something like this?
# thing = importlib.machinery.PathFinder.find_module(fullname, path)
try:
self.found = imp.find_module(fullname, path)
except Exception as e:
logger.debug('Py2Fixer could not find {0}')
logger.debug('Exception was: {0})'.format(fullname, e))
return None
self.kind = self.found[-1][-1]
if self.kind == imp.PKG_DIRECTORY:
self.pathname = os.path.join(self.found[1], '__init__.py')
elif self.kind == imp.PY_SOURCE:
self.pathname = self.found[1]
return self
def transform(self, source):
# This implementation uses lib2to3,
# you can override and use something else
# if that's better for you
# lib2to3 likes a newline at the end
RTs.setup()
source += '\n'
try:
tree = RTs._rt.refactor_string(source, self.pathname)
except ParseError as e:
if e.msg != 'bad input' or e.value != '=':
raise
tree = RTs._rtp.refactor_string(source, self.pathname)
# could optimise a bit for only doing str(tree) if
# getattr(tree, 'was_changed', False) returns True
return str(tree)[:-1] # remove added newline
def load_module(self, fullname):
logger.debug('Running load_module for {0}...'.format(fullname))
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
if self.kind in (imp.PY_COMPILED, imp.C_EXTENSION, imp.C_BUILTIN,
imp.PY_FROZEN):
convert = False
# elif (self.pathname.startswith(_stdlibprefix)
# and 'site-packages' not in self.pathname):
# # We assume it's a stdlib package in this case. Is this too brittle?
# # Please file a bug report at https://github.com/PythonCharmers/python-future
# # if so.
# convert = False
# in theory, other paths could be configured to be excluded here too
elif any([fullname.startswith(path) for path in self.exclude_paths]):
convert = False
elif any([fullname.startswith(path) for path in self.include_paths]):
convert = True
else:
convert = False
if not convert:
logger.debug('Excluded {0} from translation'.format(fullname))
mod = imp.load_module(fullname, *self.found)
else:
logger.debug('Autoconverting {0} ...'.format(fullname))
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
# required by PEP 302
mod.__file__ = self.pathname
mod.__name__ = fullname
mod.__loader__ = self
# This:
# mod.__package__ = '.'.join(fullname.split('.')[:-1])
# seems to result in "SystemError: Parent module '' not loaded,
# cannot perform relative import" for a package's __init__.py
# file. We use the approach below. Another option to try is the
# minimal load_module pattern from the PEP 302 text instead.
# Is the test in the next line more or less robust than the
# following one? Presumably less ...
# ispkg = self.pathname.endswith('__init__.py')
if self.kind == imp.PKG_DIRECTORY:
mod.__path__ = [ os.path.dirname(self.pathname) ]
mod.__package__ = fullname
else:
#else, regular module
mod.__path__ = []
mod.__package__ = fullname.rpartition('.')[0]
try:
cachename = imp.cache_from_source(self.pathname)
if not os.path.exists(cachename):
update_cache = True
else:
sourcetime = os.stat(self.pathname).st_mtime
cachetime = os.stat(cachename).st_mtime
update_cache = cachetime < sourcetime
# # Force update_cache to work around a problem with it being treated as Py3 code???
# update_cache = True
if not update_cache:
with open(cachename, 'rb') as f:
data = f.read()
try:
code = marshal.loads(data)
except Exception:
# pyc could be corrupt. Regenerate it
update_cache = True
if update_cache:
if self.found[0]:
source = self.found[0].read()
elif self.kind == imp.PKG_DIRECTORY:
with open(self.pathname) as f:
source = f.read()
if detect_python2(source, self.pathname):
source = self.transform(source)
with open('/tmp/futurized_code.py', 'w') as f:
f.write('### Futurized code (from %s)\n%s' %
(self.pathname, source))
code = compile(source, self.pathname, 'exec')
dirname = os.path.dirname(cachename)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
with open(cachename, 'wb') as f:
data = marshal.dumps(code)
f.write(data)
except Exception: # could be write-protected
pass
exec(code, mod.__dict__)
except Exception as e:
# must remove module from sys.modules
del sys.modules[fullname]
raise # keep it simple
if self.found[0]:
self.found[0].close()
return mod
_hook = Py2Fixer()
def install_hooks(include_paths=(), exclude_paths=()):
if isinstance(include_paths, str):
include_paths = (include_paths,)
if isinstance(exclude_paths, str):
exclude_paths = (exclude_paths,)
assert len(include_paths) + len(exclude_paths) > 0, 'Pass at least one argument'
_hook.include(include_paths)
_hook.exclude(exclude_paths)
# _hook.debug = debug
enable = sys.version_info[0] >= 3 # enabled for all 3.x
if enable and _hook not in sys.meta_path:
sys.meta_path.insert(0, _hook) # insert at beginning. This could be made a parameter
# We could return the hook when there are ways of configuring it
#return _hook
def remove_hooks():
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def detect_hooks():
"""
Returns True if the import hooks are installed, False if not.
"""
return _hook in sys.meta_path
# present = any([hasattr(hook, 'PY2FIXER') for hook in sys.meta_path])
# return present
class hooks(object):
"""
Acts as a context manager. Use like this:
>>> from past import translation
>>> with translation.hooks():
... import mypy2module
>>> import requests # py2/3 compatible anyway
>>> # etc.
"""
def __enter__(self):
self.hooks_were_installed = detect_hooks()
install_hooks()
return self
def __exit__(self, *args):
if not self.hooks_were_installed:
remove_hooks()
class suspend_hooks(object):
"""
Acts as a context manager. Use like this:
>>> from past import translation
>>> translation.install_hooks()
>>> import http.client
>>> # ...
>>> with translation.suspend_hooks():
>>> import requests # or others that support Py2/3
If the hooks were disabled before the context, they are not installed when
the context is left.
"""
def __enter__(self):
self.hooks_were_installed = detect_hooks()
remove_hooks()
return self
def __exit__(self, *args):
if self.hooks_were_installed:
install_hooks()
| apache-2.0 |
Alir3z4/django-crequest | crequest/middleware.py | 3 | 1411 | import threading
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class CrequestMiddleware(MiddlewareMixin):
"""
Provides storage for the "current" request object, so that code anywhere
in your project can access it, without it having to be passed to that code
from the view.
"""
_requests = {}
def process_request(self, request):
"""
Store the current request.
"""
self.__class__.set_request(request)
def process_response(self, request, response):
"""
Delete the current request to avoid leaking memory.
"""
self.__class__.del_request()
return response
@classmethod
def get_request(cls, default=None):
"""
Retrieve the request object for the current thread, or the optionally
provided default if there is no current request.
"""
return cls._requests.get(threading.current_thread(), default)
@classmethod
def set_request(cls, request):
"""
Save the given request into storage for the current thread.
"""
cls._requests[threading.current_thread()] = request
@classmethod
def del_request(cls):
"""
Delete the request that was stored for the current thread.
"""
cls._requests.pop(threading.current_thread(), None)
| bsd-3-clause |
foss-transportationmodeling/rettina-server | .env/local/lib/python2.7/encodings/iso8859_1.py | 593 | 13432 | """ Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
40223110/2015cd_midterm- | static/Brython3.1.1-20150328-091302/Lib/subprocess.py | 728 | 67282 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import traceback
import gc
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import _posixsubprocess
_create_pipe = _posixsubprocess.cloexec_pipe
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'hash_randomization': 'R',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for POSIX.
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
with os.popen('{ ' + cmd + '; } 2>&1', 'r') as pipe:
try:
text = pipe.read()
sts = pipe.close()
except:
process = pipe._proc
process.kill()
process.wait()
raise
if sts is None:
sts = 0
if text[-1:] == '\n':
text = text[:-1]
return sts, text
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
#fix me brython syntax error
#if mswindows:
# if p2cwrite != -1:
# p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
# if c2pread != -1:
# c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
# if errread != -1:
# errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except EnvironmentError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except EnvironmentError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_winapi.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.base_exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_winapi.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C won't
# kill children.
creationflags |= _winapi.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = _create_pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = _create_pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = _create_pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = _create_pipe()
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'RuntimeError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
RuntimeError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
return self.returncode
def _try_wait(self, wait_flags):
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to return
# 0 even without WNOHANG in odd situations. issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input, endtime,
orig_timeout)
else:
stdout, stderr = self._communicate_with_select(input, endtime,
orig_timeout)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def _communicate_with_poll(self, input, endtime, orig_timeout):
stdout = None # Return
stderr = None # Return
if not self._communication_started:
self._fd2file = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
self._fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
self._fd2file[fd].close()
self._fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fd2output = {}
if self.stdout:
self._fd2output[self.stdout.fileno()] = []
if self.stderr:
self._fd2output[self.stderr.fileno()] = []
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
stdout = self._fd2output[self.stdout.fileno()]
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
stderr = self._fd2output[self.stderr.fileno()]
self._save_input(input)
while self._fd2file:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
ready = poller.poll(timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if self._input_offset >= len(self._input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
self._fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input, endtime, orig_timeout):
if not self._communication_started:
self._read_set = []
self._write_set = []
if self.stdin and input:
self._write_set.append(self.stdin)
if self.stdout:
self._read_set.append(self.stdout)
if self.stderr:
self._read_set.append(self.stderr)
self._save_input(input)
stdout = None # Return
stderr = None # Return
if self.stdout:
if not self._communication_started:
self._stdout_buff = []
stdout = self._stdout_buff
if self.stderr:
if not self._communication_started:
self._stderr_buff = []
stderr = self._stderr_buff
while self._read_set or self._write_set:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
(rlist, wlist, xlist) = \
select.select(self._read_set, self._write_set, [],
timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# According to the docs, returning three empty lists indicates
# that the timeout expired.
if not (rlist or wlist or xlist):
raise TimeoutExpired(self.args, orig_timeout)
# We also check what time it is ourselves for good measure.
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
if self.stdin in wlist:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
self._write_set.remove(self.stdin)
else:
raise
else:
self._input_offset += bytes_written
if self._input_offset >= len(self._input):
self.stdin.close()
self._write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if not data:
self.stdout.close()
self._read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if not data:
self.stderr.close()
self._read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| gpl-3.0 |
KiranJKurian/XScheduler | venv/lib/python2.7/site-packages/oauth2client/service_account.py | 52 | 5038 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service account credentials class.
This credentials class is implemented on top of rsa library.
"""
import base64
import time
from pyasn1.codec.ber import decoder
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client._helpers import _json_encode
from oauth2client._helpers import _to_bytes
from oauth2client._helpers import _urlsafe_b64encode
from oauth2client import util
from oauth2client.client import AssertionCredentials
class _ServiceAccountCredentials(AssertionCredentials):
"""Class representing a service account (signed JWT) credential."""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self, service_account_id, service_account_email,
private_key_id, private_key_pkcs8_text, scopes,
user_agent=None, token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI, **kwargs):
super(_ServiceAccountCredentials, self).__init__(
None, user_agent=user_agent, token_uri=token_uri,
revoke_uri=revoke_uri)
self._service_account_id = service_account_id
self._service_account_email = service_account_email
self._private_key_id = private_key_id
self._private_key = _get_private_key(private_key_pkcs8_text)
self._private_key_pkcs8_text = private_key_pkcs8_text
self._scopes = util.scopes_to_string(scopes)
self._user_agent = user_agent
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._kwargs = kwargs
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
header = {
'alg': 'RS256',
'typ': 'JWT',
'kid': self._private_key_id
}
now = int(time.time())
payload = {
'aud': self._token_uri,
'scope': self._scopes,
'iat': now,
'exp': now + _ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self._service_account_email
}
payload.update(self._kwargs)
first_segment = _urlsafe_b64encode(_json_encode(header))
second_segment = _urlsafe_b64encode(_json_encode(payload))
assertion_input = first_segment + b'.' + second_segment
# Sign the assertion.
rsa_bytes = rsa.pkcs1.sign(assertion_input, self._private_key,
'SHA-256')
signature = base64.urlsafe_b64encode(rsa_bytes).rstrip(b'=')
return assertion_input + b'.' + signature
def sign_blob(self, blob):
# Ensure that it is bytes
blob = _to_bytes(blob, encoding='utf-8')
return (self._private_key_id,
rsa.pkcs1.sign(blob, self._private_key, 'SHA-256'))
@property
def service_account_email(self):
return self._service_account_email
@property
def serialization_data(self):
return {
'type': 'service_account',
'client_id': self._service_account_id,
'client_email': self._service_account_email,
'private_key_id': self._private_key_id,
'private_key': self._private_key_pkcs8_text
}
def create_scoped_required(self):
return not self._scopes
def create_scoped(self, scopes):
return _ServiceAccountCredentials(self._service_account_id,
self._service_account_email,
self._private_key_id,
self._private_key_pkcs8_text,
scopes,
user_agent=self._user_agent,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def _get_private_key(private_key_pkcs8_text):
"""Get an RSA private key object from a pkcs8 representation."""
private_key_pkcs8_text = _to_bytes(private_key_pkcs8_text)
der = rsa.pem.load_pem(private_key_pkcs8_text, 'PRIVATE KEY')
asn1_private_key, _ = decoder.decode(der, asn1Spec=PrivateKeyInfo())
return rsa.PrivateKey.load_pkcs1(
asn1_private_key.getComponentByName('privateKey').asOctets(),
format='DER')
| mit |
slank/ansible | contrib/vault/vault-keyring.py | 35 | 2367 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
#
# Script to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OSes native keyring application
#
# This script requires the ``keyring`` python module
#
# Add a [vault] section to your ansible.cfg file,
# the only option is 'username'. Example:
#
# [vault]
# username = 'ansible_vault'
#
# Additionally, it would be a good idea to configure vault_password_file in
# ansible.cfg
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password: python /path/to/vault-keyring.py set
#
# If you choose to not configure the path to vault_password_file in ansible.cfg
# your ansible-playbook command may look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
import sys
import getpass
import keyring
import ConfigParser
import ansible.constants as C
def main():
(parser,config_path) = C.load_config_file()
try:
username = parser.get('vault', 'username')
except ConfigParser.NoSectionError:
sys.stderr.write('No [vault] section configured in config file: %s\n' % config_path)
sys.exit(1)
if len(sys.argv) == 2 and sys.argv[1] == 'set':
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password('ansible', username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('%s\n' % keyring.get_password('ansible', username))
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
benthomasson/pyre | pyre/pyre_node.py | 3 | 21338 | import zmq
import uuid
import logging
import struct
import socket
import time
from .zactor import ZActor
from .zbeacon import ZBeacon
from .zre_msg import ZreMsg
from .pyre_peer import PyrePeer
from .pyre_group import PyreGroup
BEACON_VERSION = 1
ZRE_DISCOVERY_PORT = 5670
REAP_INTERVAL = 1.0 # Once per second
logger = logging.getLogger(__name__)
class PyreNode(object):
def __init__(self, ctx, pipe, outbox, *args, **kwargs):
self._ctx = ctx #... until we use zbeacon actor
self._pipe = pipe # We send command replies and signals to the pipe
# Pipe back to application
self.outbox = outbox # Outbox back to application
self._terminated = False # API shut us down
self._verbose = False # Log all traffic (logging module?)
self.beacon_port = ZRE_DISCOVERY_PORT # Beacon port number
self.interval = 0 # Beacon interval 0=default
self.beacon = None # Beacon actor
self.beacon_socket = None # Beacon socket for polling
self.poller = zmq.Poller() # Socket poller
self.identity = uuid.uuid4() # Our UUID as object
self.bound = False
self.inbox = ctx.socket(zmq.ROUTER) # Our inbox socket (ROUTER)
try:
self.inbox.setsockopt(zmq.ROUTER_HANDOVER, 1)
except AttributeError as e:
logging.warning("can't set ROUTER_HANDOVER, needs zmq version >=4.1 but installed is {0}".format(zmq.zmq_version()))
self.poller.register(self._pipe, zmq.POLLIN)
self.name = str(self.identity)[:6] # Our public name (default=first 6 uuid chars)
self.endpoint = "" # Our public endpoint
self.port = 0 # Our inbox port, if any
self.status = 0 # Our own change counter
self.peers = {} # Hash of known peers, fast lookup
self.peer_groups = {} # Groups that our peers are in
self.own_groups = {} # Groups that we are in
self.headers = {} # Our header values
# TODO: gossip stuff
#self.start()
self.run()
# def __del__(self):
# destroy beacon
def start(self):
# TODO: If application didn't bind explicitly, we grab an ephemeral port
# on all available network interfaces. This is orthogonal to
# beaconing, since we can connect to other peers and they will
# gossip our endpoint to others.
if self.beacon_port:
# Start beacon discovery
self.beacon = ZActor(self._ctx, ZBeacon)
if self._verbose:
self.beacon.send_unicode("VERBOSE")
# Our hostname is provided by zbeacon
self.beacon.send_unicode("CONFIGURE", zmq.SNDMORE)
self.beacon.send(struct.pack("I", self.beacon_port))
hostname = self.beacon.recv_unicode()
#if self.interval:
# self.beacon.set_interval(self.interval)
# Our hostname is provided by zbeacon
self.port = self.inbox.bind_to_random_port("tcp://*")
if self.port < 0:
# Die on bad interface or port exhaustion
logging.critical("Random port assignment for incoming messages failed. Exiting.")
sys.exit(-1)
else:
self.bound = True
self.endpoint = "tcp://%s:%d" %(hostname, self.port)
# Set broadcast/listen beacon
transmit = struct.pack('cccb16sH', b'Z', b'R', b'E',
BEACON_VERSION, self.identity.bytes,
socket.htons(self.port))
self.beacon.send_unicode("PUBLISH", zmq.SNDMORE)
self.beacon.send(transmit)
# construct the header filter (to discard none zre messages)
filter = struct.pack("ccc", b'Z', b'R', b'E')
self.beacon.send_unicode("SUBSCRIBE",zmq.SNDMORE)
self.beacon.send(filter)
self.beacon_socket = self.beacon.resolve()
self.poller.register(self.beacon_socket, zmq.POLLIN)
#else:
# TODO: gossip stuff
# Start polling on inbox
self.poller.register(self.inbox, zmq.POLLIN)
#logger.debug("Node identity: {0}".format(self.identity))
def stop(self):
logger.debug("Pyre node: stopping beacon")
if self.beacon:
stop_transmit = struct.pack('cccb16sH', b'Z',b'R',b'E',
BEACON_VERSION, self.identity.bytes,
socket.htons(0))
self.beacon.send_unicode("PUBLISH", zmq.SNDMORE)
self.beacon.send(stop_transmit)
# Give time for beacon to go out
time.sleep(0.001)
self.poller.unregister(self.beacon_socket)
self.beacon.destroy()
self.beacon = None
self.beacon_socket = None
self.beacon_port = 0
if self.bound:
# Stop polling on inbox
self.poller.unregister(self.inbox)
self.outbox.send_unicode("STOP", zmq.SNDMORE)
self.outbox.send(self.identity.bytes, zmq.SNDMORE)
self.outbox.send_unicode(self.name)
def bind(self, endpoint):
logger.warning("Not implemented")
# Send message to all peers
def send_peer(self, peer, msg):
peer.send(msg)
# TODO: log_item, dump
# Here we handle the different control messages from the front-end
def recv_api(self):
request = self._pipe.recv_multipart()
command = request.pop(0).decode('UTF-8')
if command == "UUID":
self._pipe.send(self.identity.bytes)
elif command == "NAME":
self._pipe.send_unicode(self.name)
elif command == "SET NAME":
self.name = request.pop(0).decode('UTF-8')
elif command == "SET HEADER":
header_name = request.pop(0).decode('UTF-8')
header_value = request.pop(0).decode('UTF-8')
self.headers.update({header_name: header_value})
elif command == "SET VERBOSE":
self.verbose = True
elif command == "SET PORT":
self.beacon_port = int(request.pop(0))
elif command == "SET INTERVAL":
self.interval = int(request.pop(0))
#elif command == "SET ENDPOINT":
# TODO: gossip start and endpoint setting
# TODO: GOSSIP BIND, GOSSIP CONNECT
#elif command == "BIND":
# # TODO: Needs a wait-signal
# endpoint = request.pop(0).decode('UTF-8')
# self.bind(endpoint)
#elif command == "CONNECT":
# # TODO: Needs a wait-signal
# endpoint = request.pop(0).decode('UTF-8')
# self.connect(endpoint)
elif command == "START":
# zsock_signal (self->pipe, zyre_node_start (self));
self.start()
self._pipe.signal()
elif command == "STOP":
# zsock_signal (self->pipe, zyre_node_stop (self));
self.stop()
self._pipe.signal()
elif command == "WHISPER":
# Get peer to send message to
peer_id = uuid.UUID(bytes=request.pop(0))
# Send frame on out to peer's mailbox, drop message
# if peer doesn't exist (may have been destroyed)
if self.peers.get(peer_id):
msg = ZreMsg(ZreMsg.WHISPER)
msg.set_address(peer_id)
msg.content = request
self.peers[peer_id].send(msg)
elif command == "SHOUT":
# Get group to send message to
grpname = request.pop(0).decode('UTF-8')
msg = ZreMsg(ZreMsg.SHOUT)
msg.set_group(grpname)
msg.content = request.pop(0)
if self.peer_groups.get(grpname):
self.peer_groups[grpname].send(msg)
else:
logger.warning("Group {0} not found.".format(grpname))
elif command == "JOIN":
grpname = request.pop(0).decode('UTF-8')
grp = self.own_groups.get(grpname)
if not grp:
# Only send if we're not already in group
grp = PyreGroup(grpname)
self.own_groups[grpname] = grp
msg = ZreMsg(ZreMsg.JOIN)
msg.set_group(grpname)
self.status += 1
msg.set_status(self.status)
for peer in self.peers.values():
peer.send(msg)
logger.debug("Node is joining group {0}".format(grpname))
elif command == "LEAVE":
grpname = request.pop(0).decode('UTF-8')
grp = self.own_groups.get(grpname)
if grp:
# Only send if we're actually in group
msg = ZreMsg(ZreMsg.LEAVE)
msg.set_group(grpname)
self.status += 1
msg.set_status(self.status)
for peer in self.peers.values():
peer.send(msg)
self.own_groups.pop(grpname)
logger.debug("Node is leaving group {0}".format(grpname))
elif command == "PEERS":
self._pipe.send_pyobj(list(self.peers.keys()))
elif command == "PEER NAME":
id = uuid.UUID(bytes=request.pop(0))
peer = self.peers.get(id)
if peer:
self._pipe.send_unicode("%s" %peer.get_name())
else:
self._pipe.send_unicode("")
elif command == "PEER ENDPOINT":
id = uuid.UUID(bytes=request.pop(0))
peer = self.peers.get(id)
if peer:
self._pipe.send_unicode("%s" %peer.get_endpoint())
else:
self._pipe.send_unicode("")
elif command == "PEER HEADER":
id = uuid.UUID(bytes=request.pop(0))
key = request.pop(0).decode('UTF-8')
peer = self.peers.get(id)
if not peer:
self._pipe.send_unicode("")
else:
self._pipe.send_unicode(peer.get_header(key))
elif command == "PEER GROUPS":
self._pipe.send_pyobj(list(self.peer_groups.keys()))
elif command == "OWN GROUPS":
self._pipe.send_pyobj(list(self.own_groups.keys()))
elif command == "DUMP":
# TODO: zyre_node_dump (self);
pass
elif command == "$TERM":
# this is often not printed if program terminates
logger.debug("Pyre node: shutting down")
self._terminated = True
else:
logger.warning("Unkown Node API command: {0}".format(command))
def purge_peer(self, peer, endpoint):
if (peer.get_endpoint() == endpoint):
peer.disconnect()
logger.debug("Purge peer: {0}{1}".format(peer,endpoint))
# Find or create peer via its UUID string
def require_peer(self, identity, endpoint):
p = self.peers.get(identity)
if not p:
# Purge any previous peer on same endpoint
for peer_id, peer in self.peers.copy().items():
self.purge_peer(peer, endpoint)
p = PyrePeer(self._ctx, identity)
self.peers[identity] = p
p.set_origin(self.name);
# TODO: this could be handy, to set verbosity on a specific peer
#zyre_peer_set_verbose (peer, self->verbose);
p.connect(self.identity, endpoint)
# Handshake discovery by sending HELLO as first message
m = ZreMsg(ZreMsg.HELLO)
m.set_endpoint(self.endpoint)
m.set_groups(self.own_groups.keys())
m.set_status(self.status)
m.set_name(self.name)
m.set_headers(self.headers)
p.send(m)
return p
# Remove peer from group, if it's a member
def delete_peer(self, peer, group):
group.leave(peer)
# Remove a peer from our data structures
def remove_peer(self, peer):
# Tell the calling application the peer has gone
self.outbox.send_unicode("EXIT", zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name())
logger.debug("({0}) EXIT name={1}".format(peer, peer.get_endpoint()))
# Remove peer from any groups we've got it in
for grp in self.peer_groups.values():
self.delete_peer(peer, grp)
# To destroy peer, we remove from peers hash table (dict)
self.peers.pop(peer.get_identity())
# Find or create group via its name
def require_peer_group(self, groupname):
grp = self.peer_groups.get(groupname)
if not grp:
# somehow a dict containing peers is passed if
# I don't force the peers arg to an empty dict
grp = PyreGroup(groupname, peers={})
self.peer_groups[groupname] = grp
return grp
def join_peer_group(self, peer, groupname):
grp = self.require_peer_group(groupname)
grp.join(peer)
# Now tell the caller about the peer joined group
self.outbox.send_unicode("JOIN", flags=zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, flags=zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name(), flags=zmq.SNDMORE)
self.outbox.send_unicode(groupname)
logger.debug("({0}) JOIN name={1} group={2}".format(self.name, peer.get_name(), groupname))
return grp
def leave_peer_group(self, peer, groupname):
# Tell the caller about the peer joined group
self.outbox.send_unicode("LEAVE", flags=zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, flags=zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name(), flags=zmq.SNDMORE)
self.outbox.send_unicode(groupname)
# Now remove the peer from the group
grp = self.require_peer_group(groupname)
grp.leave(peer)
logger.debug("({0}) LEAVE name={1} group={2}".format(self.name, peer.get_name(), groupname))
# Here we handle messages coming from other peers
def recv_peer(self):
zmsg = ZreMsg()
zmsg.recv(self.inbox)
#msgs = self.inbox.recv_multipart()
# Router socket tells us the identity of this peer
# First frame is sender identity
id = zmsg.get_address()
# On HELLO we may create the peer if it's unknown
# On other commands the peer must already exist
peer = self.peers.get(id)
if zmsg.id == ZreMsg.HELLO:
if (peer):
# remove fake peers
if peer.get_ready():
self.remove_peer(peer)
elif peer.endpoint == self.endpoint:
# We ignore HELLO, if peer has same endpoint as current node
return
peer = self.require_peer(id, zmsg.get_endpoint())
peer.set_ready(True)
# Ignore command if peer isn't ready
if not peer or not peer.get_ready():
logger.warning("Peer {0} isn't ready".format(peer))
return
if peer.messages_lost(zmsg):
logger.warning("{0} messages lost from {1}".format(self.identity, peer.identity))
self.remove_peer(peer)
return
# Now process each command
if zmsg.id == ZreMsg.HELLO:
# Store properties from HELLO command into peer
peer.set_name(zmsg.get_name())
peer.set_headers(zmsg.get_headers())
# Now tell the caller about the peer
self.outbox.send_unicode("ENTER", flags=zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, flags=zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name(), flags=zmq.SNDMORE)
self.outbox.send_json(peer.get_headers(),flags=zmq.SNDMORE)
self.outbox.send_unicode(peer.get_endpoint())
logger.debug("({0}) ENTER name={1} endpoint={2}".format(self.name, peer.get_name(), peer.get_endpoint()))
# Join peer to listed groups
for grp in zmsg.get_groups():
self.join_peer_group(peer, grp)
# Now take peer's status from HELLO, after joining groups
peer.set_status(zmsg.get_status())
elif zmsg.id == ZreMsg.WHISPER:
# Pass up to caller API as WHISPER event
self.outbox.send_unicode("WHISPER", zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name(), zmq.SNDMORE)
self.outbox.send(zmsg.content)
elif zmsg.id == ZreMsg.SHOUT:
# Pass up to caller API as WHISPER event
self.outbox.send_unicode("SHOUT", zmq.SNDMORE)
self.outbox.send(peer.get_identity().bytes, zmq.SNDMORE)
self.outbox.send_unicode(peer.get_name(), zmq.SNDMORE)
self.outbox.send_unicode(zmsg.get_group(), zmq.SNDMORE)
self.outbox.send(zmsg.content)
elif zmsg.id == ZreMsg.PING:
peer.send(ZreMsg(id=ZreMsg.PING_OK))
elif zmsg.id == ZreMsg.JOIN:
self.join_peer_group(peer, zmsg.get_group())
assert(zmsg.get_status() == peer.get_status())
elif zmsg.id == ZreMsg.LEAVE:
#self.leave_peer_group(zmsg.get_group())
self.leave_peer_group(peer, zmsg.get_group())
assert(zmsg.get_status() == peer.get_status())
# Activity from peer resets peer timers
peer.refresh()
def recv_beacon(self):
# Get IP address and beacon of peer
try:
ipaddress, frame = self.beacon_socket.recv_multipart()
except ValueError:
return
beacon = struct.unpack('cccb16sH', frame)
# Ignore anything that isn't a valid beacon
if beacon[3] != BEACON_VERSION:
logger.warning("Invalid ZRE Beacon version: {0}".format(beacon[3]))
return
peer_id = uuid.UUID(bytes=beacon[4])
#print("peerId: %s", peer_id)
port = socket.ntohs(beacon[5])
# if we receive a beacon with port 0 this means the peer exited
if port:
endpoint = "tcp://%s:%d" %(ipaddress.decode('UTF-8'), port)
peer = self.require_peer(peer_id, endpoint)
peer.refresh()
else:
# Zero port means peer is going away; remove it if
# we had any knowledge of it already
peer = self.peers.get(peer_id)
# remove the peer (delete)
if peer:
logger.debug("Received 0 port beacon, removing peer {0}".format(peer))
self.remove_peer(peer)
else:
logger.warning(self.peers)
logger.warning("We don't know peer id {0}".format(peer_id))
# TODO: Handle gossip dat
# We do this once a second:
# - if peer has gone quiet, send TCP ping
# - if peer has disappeared, expire it
def ping_peer(self, peer_id):
peer = self.peers.get(peer_id)
if time.time() > peer.expired_at:
logger.debug("({0}) peer expired name={1} endpoint={2}".format(self.name, peer.get_name(), peer.get_endpoint()))
self.remove_peer(peer)
elif time.time() > peer.evasive_at:
# If peer is being evasive, force a TCP ping.
# TODO: do this only once for a peer in this state;
# it would be nicer to use a proper state machine
# for peer management.
logger.debug("({0}) peer seems dead/slow name={1} endpoint={2}".format(self.name, peer.get_name(), peer.get_endpoint()))
msg = ZreMsg(ZreMsg.PING)
peer.send(msg)
# --------------------------------------------------------------------------
# This is the actor that runs a single node; it uses one thread, creates
# a zyre_node object at start and destroys that when finishing.
def run(self):
# Signal actor successfully initialized
self._pipe.signal()
reap_at = time.time() + REAP_INTERVAL
while not self._terminated:
timeout = reap_at - time.time()
if timeout < 0:
timeout = 0
items = dict(self.poller.poll(timeout * 1000))
if self._pipe in items and items[self._pipe] == zmq.POLLIN:
self.recv_api()
if self.inbox in items and items[self.inbox] == zmq.POLLIN:
self.recv_peer()
if self.beacon_socket in items and items[self.beacon_socket] == zmq.POLLIN:
self.recv_beacon()
if time.time() >= reap_at:
reap_at = time.time() + REAP_INTERVAL
# Ping all peers and reap any expired ones
for peer_id in self.peers.copy().keys():
self.ping_peer(peer_id)
| lgpl-3.0 |
loneknightpy/spark | examples/src/main/python/ml/decision_tree_classification_example.py | 123 | 3003 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
| apache-2.0 |
memtoko/django | django/utils/formats.py | 291 | 8379 | import datetime
import decimal
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, datetime_safe, numberformat, six
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import (
check_for_language, get_language, to_locale,
)
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
'DATETIME_INPUT_FORMATS': [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
],
}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""
Does the heavy lifting of finding format modules.
"""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, six.string_types):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
else:
# Return the general setting by default
return getattr(settings, format_type)
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N and isinstance(value, six.string_types):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
# Special case where we suspect a dot meant decimal separator (see #22171)
pass
else:
for replacement in {
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value
| bsd-3-clause |
CollabQ/CollabQ | vendor/django/forms/fields.py | 3 | 36775 | """
Field classes.
"""
import copy
import datetime
import os
import re
import time
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Python 2.3 fallbacks
try:
from decimal import Decimal, DecimalException
except ImportError:
from django.utils._decimal import Decimal, DecimalException
try:
set
except NameError:
from sets import Set as set
import django.core.exceptions
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from util import ErrorList, ValidationError
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, FileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget
from django.core.files.uploadedfile import SimpleUploadedFile as UploadedFile
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField'
)
# These values, if given to to_python(), will trigger the self.required check.
EMPTY_VALUES = (None, '')
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
def set_class_error_messages(messages, klass):
for base_class in klass.__bases__:
set_class_error_messages(messages, base_class)
messages.update(getattr(klass, 'default_error_messages', {}))
messages = {}
set_class_error_messages(messages, self.__class__)
messages.update(error_messages or {})
self.error_messages = messages
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
if self.required and value in EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
return value
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
default_error_messages = {
'max_length': _(u'Ensure this value has at most %(max)d characters (it has %(length)d).'),
'min_length': _(u'Ensure this value has at least %(min)d characters (it has %(length)d).'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
def clean(self, value):
"Validates max_length and min_length. Returns a Unicode object."
super(CharField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = smart_unicode(value)
value_length = len(value)
if self.max_length is not None and value_length > self.max_length:
raise ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length})
if self.min_length is not None and value_length < self.min_length:
raise ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length})
return value
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %s.'),
'min_value': _(u'Ensure this value is greater than or equal to %s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
super(IntegerField, self).__init__(*args, **kwargs)
def clean(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
super(IntegerField, self).clean(value)
if value in EMPTY_VALUES:
return None
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.error_messages['max_value'] % self.max_value)
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.error_messages['min_value'] % self.min_value)
return value
class FloatField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %s.'),
'min_value': _(u'Ensure this value is greater than or equal to %s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
Field.__init__(self, *args, **kwargs)
def clean(self, value):
"""
Validates that float() can be called on the input. Returns a float.
Returns None for empty values.
"""
super(FloatField, self).clean(value)
if not self.required and value in EMPTY_VALUES:
return None
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.error_messages['max_value'] % self.max_value)
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.error_messages['min_value'] % self.min_value)
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %s.'),
'min_value': _(u'Ensure this value is greater than or equal to %s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
def clean(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
super(DecimalField, self).clean(value)
if not self.required and value in EMPTY_VALUES:
return None
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.error_messages['max_value'] % self.max_value)
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.error_messages['min_value'] % self.min_value)
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
DEFAULT_DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats or DEFAULT_DATE_INPUT_FORMATS
def clean(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
super(DateField, self).clean(value)
if value in EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats:
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
DEFAULT_TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats or DEFAULT_TIME_INPUT_FORMATS
def clean(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
super(TimeField, self).clean(value)
if value in EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats:
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
DEFAULT_DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats or DEFAULT_DATETIME_INPUT_FORMATS
def clean(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
super(DateTimeField, self).clean(value)
if value in EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
value = '%s %s' % tuple(value)
for format in self.input_formats:
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
def clean(self, value):
"""
Validates that the input matches the regular expression. Returns a
Unicode object.
"""
value = super(RegexField, self).clean(value)
if value == u'':
return value
if not self.regex.search(value):
raise ValidationError(self.error_messages['invalid'])
return value
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9]+(?:-*[A-Z0-9]+)*\.)+[A-Z]{2,6}$', re.IGNORECASE) # domain
class EmailField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
RegexField.__init__(self, email_re, max_length, min_length, *args,
**kwargs)
try:
from django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class FileField(Field):
widget = FileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def clean(self, data, initial=None):
super(FileField, self).clean(initial or data)
if not self.required and data in EMPTY_VALUES:
return None
elif not data and initial:
return initial
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def clean(self, data, initial=None):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).clean(data, initial)
if f is None:
return None
elif not data and initial:
return initial
from PIL import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
url_re = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9]+(?:-*[A-Z0-9]+)*\.)+[A-Z]{2,6}|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|/\S+)$', re.IGNORECASE)
class URLField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(url_re, max_length, min_length, *args,
**kwargs)
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def clean(self, value):
# If no URL scheme given, assume http://
if value and '://' not in value:
value = u'http://%s' % value
# If no URL path given, assume /
if value and not urlparse.urlsplit(value)[2]:
value += '/'
value = super(URLField, self).clean(value)
if value == u'':
return value
if self.verify_exists:
import urllib2
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
try:
req = urllib2.Request(value, None, headers)
u = urllib2.urlopen(req)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise ValidationError(self.error_messages['invalid_link'])
return value
class BooleanField(Field):
widget = CheckboxInput
def clean(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
super(BooleanField, self).clean(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def clean(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required, widget, label, initial,
help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def clean(self, value):
"""
Validates that the input is in self.choices.
"""
value = super(ChoiceField, self).clean(value)
if value in EMPTY_VALUES:
value = u''
value = smart_unicode(value)
if value == u'':
return value
if not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if type(v) in (tuple, list):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def clean(self, value):
"""
Validate that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).clean(value)
if value == self.empty_value or value in EMPTY_VALUES:
return self.empty_value
# Hack alert: This field is purpose-made to use with Field.to_python as
# a coercion function so that ModelForms with choices work. However,
# Django's Field.to_python raises
# django.core.exceptions.ValidationError, which is a *different*
# exception than django.forms.util.ValidationError. So we need to catch
# both.
try:
value = self.coerce(value)
except (ValueError, TypeError, django.core.exceptions.ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def clean(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
new_value = [smart_unicode(val) for val in value]
# Validate that each value in the value list is in self.choices.
for val in new_value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
return new_value
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
return self.compress(clean_data)
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
fields = (
DateField(input_formats=input_date_formats, error_messages={'invalid': errors['invalid_date']}),
TimeField(input_formats=input_time_formats, error_messages={'invalid': errors['invalid_time']}),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
class IPAddressField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
def __init__(self, *args, **kwargs):
super(IPAddressField, self).__init__(ipv4_re, *args, **kwargs)
slug_re = re.compile(r'^[-\w]+$')
class SlugField(RegexField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
def __init__(self, *args, **kwargs):
super(SlugField, self).__init__(slug_re, *args, **kwargs)
| apache-2.0 |
soarpenguin/ansible | lib/ansible/modules/network/sros/sros_command.py | 8 | 7153 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: sros_command
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Run commands on remote devices running Nokia SR OS
description:
- Sends arbitrary commands to an SR OS node and returns the results
read from the device. This module includes an argument that will
cause the module to wait for a specific condition before returning
or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(sros_config) to configure SR OS devices.
extends_documentation_fragment: sros
options:
commands:
description:
- List of commands to send to the remote SR OS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
---
tasks:
- name: run show version on remote devices
sros_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains sros
sros_command:
commands: show version
wait_for: result[0] contains sros
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
sros_command:
commands:
- show version
- show port detail
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
sros_command:
commands:
- show version
- show port detail
wait_for:
- result[0] contains TiMOS-B-14.0.R4
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.six import string_types
from ansible.module_utils.sros import run_commands, sros_argument_spec, check_args
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='sros_command does not support running config mode '
'commands. Please use sros_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(sros_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sam-m888/gramps | windows/nonAIO/nsis/gcheck.py | 13 | 1485 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Steve Hall
# Copyright (C) 2008 Stephen George
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
fn = sys.argv[1]
f = open(fn,"w")
f.write('[tests]\n')
try:
from gi.repository import Gtk
f.write('gtk=yes\n')
f.write('gtkver=%d.%d.%d\n' % Gtk.gtk_version)
f.write('pygtk=yes\n')
f.write('pygtkver=%d.%d.%d\n' % Gtk.pygtk_version)
except ImportError:
f.write('gtk=no\n')
f.write('gtkver=no\n')
f.write('pygtk=no\n')
f.write('pygtkver=no\n')
try:
import cairo
f.write('pycairo=yes\n')
#f.write('pycairover=%s\n' % cairo.version_info)
f.write('pycairover=%s\n' % str(cairo.version_info) )
except ImportError:
f.write('pycairo=no\n')
f.write('pycairover=no\n')
f.close()
| gpl-2.0 |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/numpy/lib/arraypad.py | 48 | 52230 | """
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['pad']
###############################################################################
# Private utility functions.
def _arange_ndarray(arr, shape, axis, reverse=False):
"""
Create an ndarray of `shape` with increments along specified `axis`
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
shape : tuple of ints
Shape of desired array. Should be equivalent to `arr.shape` except
`shape[axis]` which may have any positive value.
axis : int
Axis to increment along.
reverse : bool
If False, increment in a positive fashion from 1 to `shape[axis]`,
inclusive. If True, the bounds are the same but the order reversed.
Returns
-------
padarr : ndarray
Output array sized to pad `arr` along `axis`, with linear range from
1 to `shape[axis]` along specified `axis`.
Notes
-----
The range is deliberately 1-indexed for this specific use case. Think of
this algorithm as broadcasting `np.arange` to a single `axis` of an
arbitrarily shaped ndarray.
"""
initshape = tuple(1 if i != axis else shape[axis]
for (i, x) in enumerate(arr.shape))
if not reverse:
padarr = np.arange(1, shape[axis] + 1)
else:
padarr = np.arange(shape[axis], 0, -1)
padarr = padarr.reshape(initshape)
for i, dim in enumerate(shape):
if padarr.shape[i] != dim:
padarr = padarr.repeat(dim, axis=i)
return padarr
def _round_ifneeded(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
Parameters
----------
arr : ndarray
Input array.
dtype : dtype
The dtype of the destination array.
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` prepended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
axis=axis)
else:
return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
arr), axis=axis)
def _append_const(arr, pad_amt, val, axis=-1):
"""
Append constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` appended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
axis=axis)
else:
return np.concatenate(
(arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
def _prepend_edge(arr, pad_amt, axis=-1):
"""
Prepend `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values appended along `axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_edge(arr, pad_amt, axis=-1):
"""
Append `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values prepended along
`axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
"""
Prepend linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
"""
Append linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
edge_slice = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` maximum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
prepended region is the maximum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
max_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_max(arr, pad_amt, num, axis=-1):
"""
Pad one `axis` of `arr` with the maximum of the last `num` elements.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
max_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
max_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the mean of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
mean_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
arr), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
mean_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
mean_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the median of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
med_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the median of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
med_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
med_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` minimum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the minimum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
min_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the minimum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
min_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
min_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by reflection.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of reflection; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with reflected
values from the original array.
Notes
-----
This algorithm does not pad with repetition, i.e. the edges are not
repeated in the reflection. For that behavior, use `mode='symmetric'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk1 = arr[ref_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
ref_chunk1 = ref_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
ref_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk2 = arr[ref_slice][rev_idx]
if pad_amt[1] == 1:
ref_chunk2 = ref_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
def _pad_sym(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by symmetry.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of symmetry; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with symmetric
values from the original array.
Notes
-----
This algorithm DOES pad with repetition, i.e. the edges are repeated.
For padding without repeated edges, use `mode='reflect'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
sym_chunk1 = arr[sym_slice][rev_idx]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
sym_chunk1 = sym_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1]
end = arr.shape[axis]
sym_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
sym_chunk2 = arr[sym_slice][rev_idx]
if pad_amt[1] == 1:
sym_chunk2 = sym_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
def _pad_wrap(arr, pad_amt, axis=-1):
"""
Pad `axis` of `arr` via wrapping.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded wrapped values
from the opposite end of `axis`.
Notes
-----
This method of padding is also known as 'tile' or 'tiling'.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
start = arr.shape[axis] - pad_amt[0]
end = arr.shape[axis]
wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
wrap_chunk1 = arr[wrap_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
for (i, x) in enumerate(arr.shape))
wrap_chunk2 = arr[wrap_slice]
if pad_amt[1] == 1:
wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
def _normalize_shape(ndarray, shape, cast_to_int=True):
"""
Private function which does some checks and normalizes the possibly
much simpler representations of 'pad_width', 'stat_length',
'constant_values', 'end_values'.
Parameters
----------
narray : ndarray
Input ndarray
shape : {sequence, array_like, float, int}, optional
The width of padding (pad_width), the number of elements on the
edge of the narray used for statistics (stat_length), the constant
value(s) to use when filling padded regions (constant_values), or the
endpoint target(s) for linear ramps (end_values).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis where `N` is rank of `narray`.
((before, after),) yields same before and after constants for each
axis.
(constant,) or val is a shortcut for before = after = constant for
all axes.
cast_to_int : bool, optional
Controls if values in ``shape`` will be rounded and cast to int
before being returned.
Returns
-------
normalized_shape : tuple of tuples
val => ((val, val), (val, val), ...)
[[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
((val1, val2), (val3, val4), ...) => no change
[[val1, val2], ] => ((val1, val2), (val1, val2), ...)
((val1, val2), ) => ((val1, val2), (val1, val2), ...)
[[val , ], ] => ((val, val), (val, val), ...)
((val , ), ) => ((val, val), (val, val), ...)
"""
ndims = ndarray.ndim
# Shortcut shape=None
if shape is None:
return ((None, None), ) * ndims
# Convert any input `info` to a NumPy array
arr = np.asarray(shape)
# Switch based on what input looks like
if arr.ndim <= 1:
if arr.shape == () or arr.shape == (1,):
# Single scalar input
# Create new array of ones, multiply by the scalar
arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr
elif arr.shape == (2,):
# Apply padding (before, after) each axis
# Create new axis 0, repeat along it for every axis
arr = arr[np.newaxis, :].repeat(ndims, axis=0)
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
elif arr.ndim == 2:
if arr.shape[1] == 1 and arr.shape[0] == ndims:
# Padded before and after by the same amount
arr = arr.repeat(2, axis=1)
elif arr.shape[0] == ndims:
# Input correctly formatted, pass it on as `arr`
arr = shape
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
# Cast if necessary
if cast_to_int is True:
arr = np.round(arr).astype(int)
# Convert list of lists to tuple of tuples
return tuple(tuple(axis) for axis in arr.tolist())
def _validate_lengths(narray, number_elements):
"""
Private function which does some checks and reformats pad_width and
stat_length using _normalize_shape.
Parameters
----------
narray : ndarray
Input ndarray
number_elements : {sequence, int}, optional
The width of padding (pad_width) or the number of elements on the edge
of the narray used for statistics (stat_length).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for all
axes.
Returns
-------
_validate_lengths : tuple of tuples
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
[[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
[[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
"""
normshp = _normalize_shape(narray, number_elements)
for i in normshp:
chk = [1 if x is None else x for x in i]
chk = [1 if x >= 0 else -1 for x in chk]
if (chk[0] < 0) or (chk[1] < 0):
fmt = "%s cannot contain negative values."
raise ValueError(fmt % (number_elements,))
return normshp
###############################################################################
# Public functions
def pad(array, pad_width, mode=None, **kwargs):
"""
Pads an array.
Parameters
----------
array : array_like of rank N
Input array
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function
One of the following string values or a user supplied function.
'constant'
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis.
((before, after),) yields same before and after statistic lengths
for each axis.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or int, optional
Used in 'constant'. The values to set the padded values for each
axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for
all axes.
Default is 0.
end_values : sequence or int, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end values for each
axis.
(constant,) or int is a shortcut for before = after = end value for
all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extented part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Notes
-----
.. versionadded:: 1.7.0
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should return a rank 1 array equal in
length to the vector argument with padded values replaced. It has the
following signature::
padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
where
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
the end of vector.
iaxis : int
The axis currently being calculated.
kwargs : misc
Any keyword arguments the function requires.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))
array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])
>>> np.lib.pad(a, (2, 3), 'edge')
array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])
>>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> np.lib.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.lib.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> np.lib.pad(a, (2,), 'median')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> np.lib.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def padwithtens(vector, pad_width, iaxis, kwargs):
... vector[:pad_width[0]] = 10
... vector[-pad_width[1]:] = 10
... return vector
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.lib.pad(a, 2, padwithtens)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
narray = np.array(array)
pad_width = _validate_lengths(narray, pad_width)
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
kwdefaults = {
'stat_length': None,
'constant_values': 0,
'end_values': 0,
'reflect_type': 'even',
}
if isinstance(mode, str):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[mode]))
# Set kwarg defaults
for kw in allowedkwargs[mode]:
kwargs.setdefault(kw, kwdefaults[kw])
# Need to only normalize particular keywords.
for i in kwargs:
if i == 'stat_length':
kwargs[i] = _validate_lengths(narray, kwargs[i])
if i in ['end_values', 'constant_values']:
kwargs[i] = _normalize_shape(narray, kwargs[i],
cast_to_int=False)
elif mode is None:
raise ValueError('Keyword "mode" must be a function or one of %s.' %
(list(allowedkwargs.keys()),))
else:
# Drop back to old, slower np.apply_along_axis mode for user-supplied
# vector function
function = mode
# Create a new padded array
rank = list(range(len(narray.shape)))
total_dim_increase = [np.sum(pad_width[i]) for i in rank]
offset_slices = [slice(pad_width[i][0],
pad_width[i][0] + narray.shape[i])
for i in rank]
new_shape = np.array(narray.shape) + total_dim_increase
newmat = np.zeros(new_shape, narray.dtype)
# Insert the original array into the padded array
newmat[offset_slices] = narray
# This is the core of pad ...
for iaxis in rank:
np.apply_along_axis(function,
iaxis,
newmat,
pad_width[iaxis],
iaxis,
kwargs)
return newmat
# If we get here, use new padding method
newmat = narray.copy()
# API preserved, but completely new algorithm which pads by building the
# entire block to pad before/after `arr` with in one step, for each axis.
if mode == 'constant':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['constant_values'])):
newmat = _prepend_const(newmat, pad_before, before_val, axis)
newmat = _append_const(newmat, pad_after, after_val, axis)
elif mode == 'edge':
for axis, (pad_before, pad_after) in enumerate(pad_width):
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
elif mode == 'linear_ramp':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['end_values'])):
newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
newmat = _append_ramp(newmat, pad_after, after_val, axis)
elif mode == 'maximum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
newmat = _append_max(newmat, pad_after, chunk_after, axis)
elif mode == 'mean':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
newmat = _append_mean(newmat, pad_after, chunk_after, axis)
elif mode == 'median':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
newmat = _append_med(newmat, pad_after, chunk_after, axis)
elif mode == 'minimum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
newmat = _append_min(newmat, pad_after, chunk_after, axis)
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
if ((pad_before > 0) or
(pad_after > 0)) and newmat.shape[axis] == 1:
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
continue
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis] - 1
while ((pad_before > safe_pad) or (pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_ref(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
elif mode == 'symmetric':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_sym(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
elif mode == 'wrap':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
return newmat
| mit |
wevoice/wesub | apps/webdriver_testing/check_api/test_subtitles.py | 5 | 7543 | import os
import time
import codecs
from rest_framework.test import APILiveServerTestCase, APIClient
from videos.models import *
from utils.factories import *
from webdriver_testing.webdriver_base import WebdriverTestCase
from webdriver_testing.pages.site_pages import video_language_page
from webdriver_testing.pages.site_pages import video_page
from webdriver_testing import data_helpers
from webdriver_testing.pages.site_pages import editor_page
class TestCaseSubtitles(APILiveServerTestCase, WebdriverTestCase):
"""TestSuite for site video searches. """
NEW_BROWSER_PER_TEST_CASE = False
@classmethod
def setUpClass(cls):
super(TestCaseSubtitles, cls).setUpClass()
cls.user = UserFactory()
#Create some test data and set subtitle data dir
cls.subs_data_dir = os.path.join(os.getcwd(), 'apps',
'webdriver_testing', 'subtitle_data')
cls.video_pg = video_page.VideoPage(cls)
cls.editor_pg = editor_page.EditorPage(cls)
cls.video_language_pg = video_language_page.VideoLanguagePage(cls)
def _get (self, url):
self.client.force_authenticate(self.user)
response = self.client.get(url)
response.render()
r = (json.loads(response.content))
return r
def _post(self, url, data=None):
self.client.force_authenticate(self.user)
response = self.client.post(url, data)
status = response.status_code
response.render()
r = (json.loads(response.content))
return r, status
def _post_subs(self, url, data=None):
self.client.force_authenticate(self.user)
response = self.client.post(url, json.dumps(data),
content_type="application/json; charset=utf-8")
status = response.status_code
response.render()
r = (json.loads(response.content))
return r, status
def test_add_language(self):
"""Set a language as complete via the api
"""
video = VideoFactory()
url = '/api/videos/%s/languages/' % video.video_id
data = {'language_code': 'en',
}
r, status = self._post(url, data)
self.assertEqual(201, status)
def test_add_original_language(self):
"""Set a language as original via the api
"""
video = VideoFactory()
url = '/api/videos/%s/languages/' % video.video_id
#Create the language for the test video
data = {'language_code': 'fr',
'is_primary_audio_language': True,
'subtitles_complete': False
}
r, status = self._post(url, data)
self.assertEqual(201, status)
self.video_pg.open_video_page(video.video_id)
self.video_pg.add_subtitles()
self.assertTrue(self.video_pg.is_text_present("th", "This video is in French"))
def test_upload_untimed_subtitles(self):
"""Upload untimed subtitles via api """
#Create the language for the test video
video = VideoFactory()
url = '/api/videos/%s/languages/' % video.video_id
data = {'language_code': 'en',
'is_original': True
}
r, status = self._post(url, data)
url = '/api/videos/%s/languages/en/subtitles/' % video.video_id
subtitles = open(os.path.join(self.subs_data_dir, 'Untimed_text.srt'))
data = { 'subtitles': subtitles.read(),
'sub_format': 'srt',
}
r, status = self._post_subs(url, data)
self.video_pg.open_video_page(video.video_id)
self.video_language_pg.open_video_lang_page(video.video_id, 'en')
verification_file = os.path.join(self.subs_data_dir,'Untimed_lines.txt')
expected_list = [line.strip() for line in codecs.open(
verification_file, encoding='utf-8')]
displayed_list = self.video_language_pg.displayed_lines()
self.assertEqual(expected_list, displayed_list)
self.video_language_pg.log_in(self.user.username, 'password')
self.video_language_pg.open_video_lang_page(
video.video_id, 'en')
self.video_language_pg.edit_subtitles()
editor_sub_list = self.editor_pg.working_text()
#Verify uploaded subs are displayed and editable
self.assertLess(0, len(editor_sub_list))
typed_line = "I'd like to be"
self.editor_pg.edit_sub_line(typed_line, 1)
self.editor_pg.save('Exit')
self.video_language_pg.open_video_lang_page(
video.video_id, 'en')
displayed_list = self.video_language_pg.displayed_lines()
#Verify the edited text is in the sub list
self.assertIn(typed_line, displayed_list[0])
#Verify the origal unedited text is still present in the sub list.
self.assertEqual(expected_list[-1], displayed_list[-1])
def test_formats_and_langs(self):
"""Upload subs via api.
"""
errors = []
video = VideoFactory()
testdata = {
"srt": "en",
"ssa": "hu",
'sbv': 'zh-cn',
'dfxp': 'sv',
'txt': 'en-gb',
'vtt': 'es-mx'
}
for sub_format, lc in testdata.iteritems():
#Post the language
url = '/api/videos/%s/languages/' % video.video_id
data = {'language_code': lc,
}
try:
r, status = self._post(url, data)
self.logger.info(status)
self.assertEqual(201, status)
except Exception as e:
errors.append('failed adding language code: {0} error: {1}'.format(lc, e))
#Post the subtitles
try:
url = '/api/videos/{0}/languages/{1}/subtitles/'.format(video.video_id, lc)
subfile = os.path.join(self.subs_data_dir, 'Timed_text.{0}.{1}'.format(lc, sub_format))
self.logger.info(subfile)
#subtitles = codecs.open(subfile, encoding='utf-8')
subtitles = open(subfile)
data = { "subtitles": subtitles.read(),
"sub_format": sub_format,
}
r, status = self._post_subs(url, data)
self.assertEqual(201, status)
except Exception as e:
errors.append('failed adding format: {0}, error {1}'.format(sub_format,e))
self.assertEqual(errors, [])
def test_false_subtitles(self):
"""Return error when 'false' passed for subtitles'
"""
video = VideoFactory()
url = '/api/videos/%s/languages/' % video.video_id
data = {'language_code': 'en', }
r, status = self._post(url, data)
url = '/api/videos/{0}/languages/en/subtitles/'.format(video.video_id)
data = { "subtitles": False,
"sub_format": 'json',
}
r, status = self._post_subs(url, data)
self.logger.info(r)
self.assertEqual({u'subtitles': [u'Invalid subtitle data']}, r)
def test_invalid_videoid(self):
"""Return error when video id is None'
"""
video = VideoFactory()
url = '/api/videos/None/languages/en/subtitles/'
r = self._get(url)
self.logger.info(r)
self.assertEqual({u'detail': u'Not found'}, r)
| agpl-3.0 |
davipeterlini/routeflow_ha | pox/pox/lib/revent/revent.py | 25 | 19848 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
#TODO:
#-----
# decorator for adding event classes to a class?
# make mixin-able to existing classes
# make mixin-able to existing objects
"""
Revent is an event system wherein objects become a composition of data,
methods, and now events. It fits with the publish/subscribe communication
pattern.
Events themselves are generally instances of some subclass of the Event
class. In fact, they can be arbitrary values of any sort, though
subclasses of Event get special handling (and support for values of other
sorts may eventually be removed).
To subscribe to an event, you create a callback function and register it
with the source. For example:
def bar_handler(self, event):
print("bar!", event)
pox.core.addListener(UpEvent, bar_handler)
Often (especially if you are going to listen to multiple events from a
single source), it is easier to inherit from EventMixin just so that you
can use the listenTo() method. For example:
class Sink (EventMixin):
def __init__(self):
# Listen to events sourced by pox.core
pox.core.addListeners(self)
self.listenTo(pox.core)
def _handle_ComponentRegistered (self, event):
# The name of this method has a special meaning to addListeners().
# If a method name starts with _handle_ and ends with the name of
# an event that the source publishes, the method is automatically
# registered as an event handler.
#
# This method will now be called whenever pox.core triggers a
# ComponentRegistered event.
# Most event handlers are passed an event object as a parameter (though
# individual Event classes can override this behavior by altering their
# _invoke() method).
component = event.component
name = event.name
print("I see you,", name, "!")
Event sources can also use the EventMixin library:
class Source (EventMixin):
# Defining this variable tells the revent library what kind of events
# this source can raise.
_eventMixin_events = set([ComponentRegistered])
def __init__ (self):
foo()
def foo (self):
# We can raise events as follows:
component = "fake_pox_component"
self.raiseEvent(ComponentRegistered(component))
# In the above invocation, the argument is an instance of
# ComponentRegistered (which is a subclass of Event). The following is
# functionally equivalent, but has the nice property that
# ComponentRegistered is never instantiated if there are no listeners.
#self.raiseEvent(ComponentRegistered, component)
# In both cases, "component" is passed to the __init__ method for the
# ComponentRegistered class.
# The above method invocation will raise an exception if an event
# handler rauses an exception. To project yourself from exceptions in
# handlers, see raiseEventNoErrors().
"""
from __future__ import print_function
import operator
# weakrefs are used for some event handlers so that just having an event
# handler set will not keep the source (publisher) alive.
import weakref
_nextEventID = 0
def _generateEventID ():
"""
Generates an event ID
This is (at present) mostly so that an event can later be removed.
Note that this function is not threadsafe.
"""
global _nextEventID
_nextEventID += 1
return _nextEventID
def EventReturn (halt = False, remove = False):
"""
Event handlers can return special values. You can craft these with this
function.
If halt is True, further handlers will not be called for this particular
event.
If remove is True, the handler will be removed (i.e. unsubscribed) and
will not be called anymore.
Shortcut names are also available. You can also simply do:
return EventHalt
return EventRemove
return HaltAndRemove
"""
return (halt, remove)
EventContinue = EventReturn(halt=False, remove=False)
# Event handlers can return this to stop further handling of this event
EventHalt = EventReturn(halt=True)
# A handler can return this if it wants to remove itself (unsubscribe)
EventRemove = EventReturn(remove=True)
# A handler can return this if it wants to both stop further processing
# and unsubscribe
EventHaltAndRemove = EventReturn(remove=True, halt=True)
class Event (object):
"""
Superclass for events
"""
def __init__ (self):
self.halt = False
self.source = None
def _invoke (self, handler, *args, **kw):
return handler(self, *args, **kw)
def handleEventException (source, event, args, kw, exc_info):
"""
Called when an exception is raised by an event handler when the event
was raised by raiseEventNoErrors().
You can replace this method if you'd like to replace the default handling
(printing an error message an a traceback) with your own (for example if
you are using a logging system and would like to use that). You can also
replace it with None to have events fail silently.
"source" is the object sourcing the event. "event" is the event that was
being raised when the exception occurred. "args" and "kw" were the args
and kwargs passed to raiseEventNoErrors. "exc_info" is the exception
info as returned by sys.exc_info()).
"""
try:
c = source
t = event
if hasattr(c, "__class__"): c = c.__class__.__name__
if isinstance(t, Event): t = t.__class__.__name__
elif issubclass(t, Event): t = t.__name__
except:
pass
import sys
sys.stderr.write("Exception while handling %s!%s...\n" % (c,t))
import traceback
traceback.print_exception(*exc_info)
class EventMixin (object):
"""
Mixin for classes that want to source events
"""
# _eventMixin_events contains the set of events that the subclassing
# object will raise.
# You can't raise events that aren't in this set -- unless you set this
# to True in which all events are acceptable.
_eventMixin_events = set()
def _eventMixin_addEvents (self, events):
for e in events:
self._eventMixin_addEvent(e)
def _eventMixin_addEvent (self, eventType):
self._eventMixin_init()
assert self._eventMixin_events is not True
if False:
pass
#if self._eventMixin_events == True:
# # Do nothing, all events already accepted!
# # print warning?
# return
elif self._eventMixin_events == None:
self._eventMixin_events = set()
self._eventMixin_events.add(eventType)
def __init__ (self):
self._eventMixin_init()
def _eventMixin_init (self):
if not hasattr(self, "_eventMixin_events"):
setattr(self, "_eventMixin_events", True)
if not hasattr(self, "_eventMixin_handlers"):
setattr(self, "_eventMixin_handlers", {})
def raiseEventNoErrors (self, event, *args, **kw):
"""
Raise an event, catching exceptions thrown by the handler.
If exceptions are caught, the global handleEventExceptions() is called.
Also see raiseEvent()
"""
#TODO: this should really keep subsequent events executing and print
# the specific handler that failed...
try:
return self.raiseEvent(event, *args, **kw)
except:
if handleEventException is not None:
import sys
handleEventException(self, event, args, kw, sys.exc_info())
return None
def raiseEvent (self, event, *args, **kw):
"""
Raises an event.
If "event" is an Event type, it will be initialized with args and kw,
but only if there are actually listeners.
Returns the event object, unless it was never created (because there
were no listeners) in which case returns None.
"""
self._eventMixin_init()
classCall = False
if isinstance(event, Event):
eventType = event.__class__
classCall = True
if event.source is None: event.source = self
elif issubclass(event, Event):
# Check for early-out
if event not in self._eventMixin_handlers:
return None
if len(self._eventMixin_handlers[event]) == 0:
return None
classCall = True
eventType = event
event = eventType(*args, **kw)
args = ()
kw = {}
if event.source is None:
event.source = self
#print("raise",event,eventType)
if (self._eventMixin_events is not True
and eventType not in self._eventMixin_events):
raise RuntimeError("Event %s not defined on object of type %s"
% (eventType, type(self)))
# Create a copy so that it can be modified freely during event
# processing. It might make sense to change this.
handlers = self._eventMixin_handlers.get(eventType, [])
for (priority, handler, once, eid) in handlers:
if classCall:
rv = event._invoke(handler, *args, **kw)
else:
rv = handler(event, *args, **kw)
if once: self.removeListener(eid)
if rv is None: continue
if rv is False:
self.removeListener(eid)
if rv is True:
if classCall: event.halt = True
break
if type(rv) == tuple:
if len(rv) >= 2 and rv[1] == True:
self.removeListener(eid)
if len(rv) >= 1 and rv[0]:
if classCall: event.halt = True
break
if len(rv) == 0:
if classCall: event.halt = True
break
#if classCall and hasattr(event, "halt") and event.halt:
if classCall and event.halt:
break
return event
def removeListeners (self, listeners):
altered = False
for l in listeners:
if self.removeListener(l): altered = True
return altered
def _eventMixin_get_listener_count (self):
"""
Returns the number of listeners.
"""
return sum((len(x) for x in self._eventMixin_handlers.itervalues()))
def removeListener (self, handlerOrEID, eventType=None):
"""
handlerOrEID : a reference to a handler object, an event ID (EID)
identifying the event type, or (eventType, EID) pair
eventType : the type of event to remove the listener(s) for
"""
#TODO: This method could use an elegant refactoring.
#print("Remove listener", handlerOrEID)
self._eventMixin_init()
handler = handlerOrEID
altered = False
if type(handler) == tuple:
# It's a type/eid pair
if eventType == None: eventType = handler[0]
handlers = self._eventMixin_handlers[eventType]
l = len(handlers)
self._eventMixin_handlers[eventType] = [x for x in handlers
if x[3] != handler[1]]
altered = altered or l != len(self._eventMixin_handlers[eventType])
elif type(handler) == int:
# It's an EID
if eventType == None:
for event in self._eventMixin_handlers:
handlers = self._eventMixin_handlers[event]
l = len(handlers)
self._eventMixin_handlers[event] = [x for x in handlers
if x[3] != handler]
altered = altered or l != len(self._eventMixin_handlers[event])
else:
l = len(handlers)
handlers = self._eventMixin_handlers[eventType]
self._eventMixin_handlers[eventType] = [x for x in handlers
if x[3] != handler]
altered = altered or l != len(self._eventMixin_handlers[event])
else:
if eventType == None:
for event in self._eventMixin_handlers:
handlers = self._eventMixin_handlers[event]
l = len(handlers)
self._eventMixin_handlers[event] = [x for x in handlers
if x[1] != handler]
altered = altered or l != len(self._eventMixin_handlers[event])
else:
handlers = self._eventMixin_handlers[eventType]
l = len(handlers)
self._eventMixin_handlers[eventType] = [x for x in handlers
if x[1] != handler]
altered = altered or l != len(self._eventMixin_handlers[eventType])
return altered
def addListenerByName (self, *args, **kw):
"""
Add a listener by name. An eventType argument must be present, which is
used as the name. A handler argument must also be present.
Also see addListener().
"""
kw['byName'] = True
return self.addListener(*args,**kw)
def addListener (self, eventType, handler, once=False, weak=False,
priority=None, byName=False):
"""
Add an event handler for an event triggered by this object (subscribe).
eventType : event class object (e.g. ConnectionUp). If byName is True,
should be a string (e.g. "ConnectionUp")
handler : function/method to be invoked when event is raised
once : if True, this handler is removed after being fired once
weak : If handler is a method on object A, then listening to an event
on object B will normally make B have a reference to A, so A
can not be released until after B is released or the listener
is removed.
If weak is True, there is no relationship between the lifetimes
of the publisher and subscriber.
priority : The order in which to call event handlers if there are
multiple for an event type. Should probably be an integer,
where higher means to call it earlier. Do not specify if
you don't care.
byName : True if eventType is a string name, else an Event subclass
Raises an exception unless eventType is in the source's
_eventMixin_events set (or, alternately, _eventMixin_events must
be True).
The return value can be used for removing the listener.
"""
self._eventMixin_init()
if (self._eventMixin_events is not True
and eventType not in self._eventMixin_events):
# eventType wasn't found
fail = True
if byName:
# if we were supposed to find the event by name, see if one of the
# event names matches
for e in self._eventMixin_events:
if issubclass(e, Event):
if e.__name__ == eventType:
eventType = e
fail = False
break
if fail:
raise RuntimeError("Event %s not defined on object of type %s"
% (eventType, type(self)))
if eventType not in self._eventMixin_handlers:
# if no handlers are already registered, initialize
handlers = self._eventMixin_handlers[eventType] = []
self._eventMixin_handlers[eventType] = handlers
else:
handlers = self._eventMixin_handlers[eventType]
eid = _generateEventID()
if weak: handler = CallProxy(self, handler, (eventType, eid))
entry = (priority, handler, once, eid)
handlers.append(entry)
if priority is not None:
# If priority is specified, sort the event handlers
handlers.sort(reverse = True, key = operator.itemgetter(0))
return (eventType,eid)
def listenTo (self, source, *args, **kv):
"""
Automatically subscribe to events on source.
This method tries to bind all _handle_ methods on self to events
on source. Kind of the opposite of addListeners().
See also: addListeners(), autoBindEvents()
"""
return autoBindEvents(self, source, *args, **kv)
def addListeners (self, sink, prefix='', weak=False, priority=None):
"""
Automatically subscribe sink to our events.
Tries to bind all _handle_ methods on sink to events that this object
raises. Kind of the opposite of listenTo().
See also: listenTo(), autoBindEvents()
"""
return autoBindEvents(sink, self, prefix, weak, priority)
def clearHandlers(self):
"""
Remove all handlers from this object
"""
self._eventMixin_handlers = {}
def autoBindEvents (sink, source, prefix='', weak=False, priority=None):
"""
Automatically set up listeners on sink for events raised by source.
Often you have a "sink" object that is interested in multiple events
raised by some other "source" object. This method makes setting that
up easy.
You name handler methods on the sink object in a special way. For
example, lets say you have an object mySource which raises events of
types FooEvent and BarEvent. You have an object mySink which wants to
listen to these events. To do so, it names its handler methods
"_handle_FooEvent" and "_handle_BarEvent". It can then simply call
autoBindEvents(mySink, mySource), and the handlers are set up.
You can also set a prefix which changes how the handlers are to be named.
For example, autoBindEvents(mySink, mySource, "source1") would use a
handler named "_handle_source1_FooEvent".
"weak" has the same meaning as with addListener().
Returns the added listener IDs (so that you can remove them later).
"""
if len(prefix) > 0 and prefix[0] != '_': prefix = '_' + prefix
if hasattr(source, '_eventMixin_events') is False:
# If source does not declare that it raises any events, do nothing
print("Warning: source class %s doesn't specify any events!" % (
source.__class__.__name__,))
return []
events = {}
for e in source._eventMixin_events:
if type(e) == str:
events[e] = e
else:
events[e.__name__] = e
listeners = []
# for each method in sink
for m in dir(sink):
# get the method object
a = getattr(sink, m)
if callable(a):
# if it has the revent prefix signature,
if m.startswith("_handle" + prefix + "_"):
event = m[8+len(prefix):]
# and it is one of the events our source triggers
if event in events:
# append the listener
listeners.append(source.addListener(events[event], a, weak=weak,
priority=priority))
#print("autoBind: ",source,m,"to",sink)
elif len(prefix) > 0 and "_" not in event:
print("Warning: %s found in %s, but %s not raised by %s" %
(m, sink.__class__.__name__, event,
source.__class__.__name__))
return listeners
class CallProxy (object):
"""
Internal use.
Custom proxy wrapper for /weak reference/ event handlers. When the
publisher or subscriber objects are lost, this cleans up by removing
the listener entry in the publisher object.
"""
def __init__ (self, source, handler, removeData):
"""
source : Event source (publisher)
handler : A "weak handler" callback
removeData : The identifier used for removal of the handler
"""
self.source = weakref.ref(source, self._forgetMe)
self.obj = weakref.ref(handler.im_self, self._forgetMe)
self.method = handler.im_func
self.removeData = removeData
self.name = str(handler)
def _forgetMe (self, o):
# o is the weak reference object; we don't use it
#print("Forgetting",self.removeData,self.method)
source = self.source()
if source is not None:
source.removeListener(self.removeData)
self.obj = None
def __call__ (self, *args, **kw):
#print("weak call")
if self.obj is None: return
o = self.obj()
if o is not None:
return self.method(o, *args, **kw)
print("callProxy object is gone!")
raise RuntimeError("callProxy object is gone!")
def __str__ (self):
return "<CallProxy for " + self.name + ">"
| apache-2.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/etc/apps/headphones/headphones/transmission.py | 7 | 5994 | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger, request
import time
import json
import base64
import urlparse
import headphones
# This is just a simple script to send torrents to transmission. The
# intention is to turn this into a class where we can check the state
# of the download, set the download dir, etc.
# TODO: Store the session id so we don't need to make 2 calls
# Store torrent id so we can check up on it
def addTorrent(link, data=None):
method = 'torrent-add'
if link.endswith('.torrent') or data:
if data:
metainfo = str(base64.b64encode(data))
else:
with open(link, 'rb') as f:
metainfo = str(base64.b64encode(f.read()))
arguments = {'metainfo': metainfo, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
else:
arguments = {'filename': link, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
response = torrentAction(method, arguments)
if not response:
return False
if response['result'] == 'success':
if 'torrent-added' in response['arguments']:
retid = response['arguments']['torrent-added']['hashString']
elif 'torrent-duplicate' in response['arguments']:
retid = response['arguments']['torrent-duplicate']['hashString']
else:
retid = False
logger.info(u"Torrent sent to Transmission successfully")
return retid
else:
logger.info('Transmission returned status %s' % response['result'])
return False
def getTorrentFolder(torrentid):
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['name', 'percentDone']}
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
torrent_folder_name = response['arguments']['torrents'][0]['name']
tries = 1
while percentdone == 0 and tries < 10:
tries += 1
time.sleep(5)
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
torrent_folder_name = response['arguments']['torrents'][0]['name']
return torrent_folder_name
def setSeedRatio(torrentid, ratio):
method = 'torrent-set'
if ratio != 0:
arguments = {'seedRatioLimit': ratio, 'seedRatioMode': 1, 'ids': torrentid}
else:
arguments = {'seedRatioMode': 2, 'ids': torrentid}
response = torrentAction(method, arguments)
if not response:
return False
def removeTorrent(torrentid, remove_data=False):
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['isFinished', 'name']}
response = torrentAction(method, arguments)
if not response:
return False
try:
finished = response['arguments']['torrents'][0]['isFinished']
name = response['arguments']['torrents'][0]['name']
if finished:
logger.info('%s has finished seeding, removing torrent and data' % name)
method = 'torrent-remove'
if remove_data:
arguments = {'delete-local-data': True, 'ids': torrentid}
else:
arguments = {'ids': torrentid}
response = torrentAction(method, arguments)
return True
else:
logger.info('%s has not finished seeding yet, torrent will not be removed, will try again on next run' % name)
except:
return False
return False
def torrentAction(method, arguments):
host = headphones.CONFIG.TRANSMISSION_HOST
username = headphones.CONFIG.TRANSMISSION_USERNAME
password = headphones.CONFIG.TRANSMISSION_PASSWORD
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
# Fix the URL. We assume that the user does not point to the RPC endpoint,
# so add it if it is missing.
parts = list(urlparse.urlparse(host))
if not parts[0] in ("http", "https"):
parts[0] = "http"
if not parts[2].endswith("/rpc"):
parts[2] += "/transmission/rpc"
host = urlparse.urlunparse(parts)
# Retrieve session id
auth = (username, password) if username and password else None
response = request.request_response(host, auth=auth,
whitelist_status_code=[401, 409])
if response is None:
logger.error("Error gettings Transmission session ID")
return
# Parse response
if response.status_code == 401:
if auth:
logger.error("Username and/or password not accepted by " \
"Transmission")
else:
logger.error("Transmission authorization required")
return
elif response.status_code == 409:
session_id = response.headers['x-transmission-session-id']
if not session_id:
logger.error("Expected a Session ID from Transmission")
return
# Prepare next request
headers = {'x-transmission-session-id': session_id}
data = {'method': method, 'arguments': arguments}
response = request.request_json(host, method="POST", data=json.dumps(data),
headers=headers, auth=auth)
print response
if not response:
logger.error("Error sending torrent to Transmission")
return
return response
| gpl-2.0 |
trishnaguha/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg_role.py | 52 | 7909 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
type: bool
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: str
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 |
carolinux/QGIS | scripts/context_help_id.py | 48 | 2262 | #!/usr/bin/python
"""
/***************************************************************************
context_help_id.py
-------------------
begin : 2009-11-16
copyright : (C) 2009 by Gary E.Sherman
email : gsherman at mrcc.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script generates a unique context id based for use in the QGIS
context sensitive help system. It uses the SHA1 hash for the class name
and converts the first 12 characters to a unique integer.
To create a context id, pass the name of the QGIS class on the command line.
Example:
./context_help_id.py QgsAbout
This script requires Python 2.5 or higher (hashlib was introduced at 2.5).
NOTE: Due to a change in the way context ids are generated, ids
generated by the old method (Java hashCode function) will be different than
the id generated by the new method for the same class.
"""
import hashlib
import sys
# check to see if a class name was specified and if so, craete the context id
if len(sys.argv) > 1:
hash = hashlib.sha1()
# set the hash to the name passed on the command line
hash.update(sys.argv[1])
# generate the context id by converting the first 12 characters of the hash
# to decimal
context_id = int(hash.hexdigest()[:12],16)
# print the result
print context_id
else:
# if no class name was specified, give a bit of help
print "To generate a context sensitive help id, specify the QGIS class name on the command line"
| gpl-2.0 |
zhaishaomin/LDS-prefetcher-research | gem5_src/arch/arm/ArmPMU.py | 39 | 5706 | # -*- mode:python -*-
# Copyright (c) 2009-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Matt Horsnell
# Andreas Sandberg
from m5.defines import buildEnv
from m5.SimObject import SimObject
from m5.params import *
from m5.params import isNullPointer
from m5.proxy import *
class ArmPMU(SimObject):
type = 'ArmPMU'
cxx_class = 'ArmISA::PMU'
cxx_header = 'arch/arm/pmu.hh'
@classmethod
def export_methods(cls, code):
code('''
void addEventProbe(unsigned int id,
SimObject *obj, const char *name);
''')
# To prevent cycles in the configuration hierarchy, we don't keep
# a list of supported events as a configuration param. Instead, we
# keep them in a local list and register them using the
# addEventProbe interface when other SimObjects register their
# probe listeners.
_deferred_event_types = []
# Override the normal SimObject::regProbeListeners method and
# register deferred event handlers.
def regProbeListeners(self):
for event_id, obj, name in self._deferred_event_types:
self.getCCObject().addEventProbe(event_id, obj.getCCObject(), name)
self.getCCObject().regProbeListeners()
def addEventProbe(self, event_id, obj, *args):
"""Add a probe-based event to the PMU if obj is not None."""
if obj is None:
return
for name in args:
self._deferred_event_types.append((event_id, obj, name))
def addArchEvents(self,
cpu=None,
itb=None, dtb=None,
icache=None, dcache=None,
l2cache=None):
"""Add architected events to the PMU.
This method can be called multiple times with only a subset of
the keyword arguments set. This enables event registration in
configuration scripts to happen closer to the instantiation of
the instrumented objects (e.g., the memory system) instead of
a central point.
CPU events should also be registered once per CPU that is
sharing the PMU (e.g., when switching between CPU models).
"""
bpred = cpu.branchPred if cpu and not isNullPointer(cpu.branchPred) \
else None
# 0x01: L1I_CACHE_REFILL
self.addEventProbe(0x02, itb, "Refills")
# 0x03: L2D_CACHE_REFILL
# 0x04: L1D_CACHE
self.addEventProbe(0x05, dtb, "Refills")
self.addEventProbe(0x06, cpu, "RetiredLoads")
self.addEventProbe(0x07, cpu, "RetiredStores")
self.addEventProbe(0x08, cpu, "RetiredInsts")
# 0x09: EXC_TAKEN
# 0x0A: EXC_RETURN
# 0x0B: CID_WRITE_RETIRED
self.addEventProbe(0x0C, cpu, "RetiredBranches")
# 0x0D: BR_IMMED_RETIRED
# 0x0E: BR_RETURN_RETIRED
# 0x0F: UNALIGEND_LDST_RETIRED
self.addEventProbe(0x10, bpred, "Misses")
self.addEventProbe(0x11, cpu, "Cycles")
self.addEventProbe(0x12, bpred, "Branches")
self.addEventProbe(0x13, cpu, "RetiredLoads", "RetiredStores")
# 0x14: L1I_CACHE
# 0x15: L1D_CACHE_WB
# 0x16: L2D_CACHE
# 0x17: L2D_CACHE_REFILL
# 0x18: L2D_CACHE_WB
# 0x19: BUS_ACCESS
# 0x1A: MEMORY_ERROR
# 0x1B: INST_SPEC
# 0x1C: TTBR_WRITE_RETIRED
# 0x1D: BUS_CYCLES
# 0x1E: CHAIN
# 0x1F: L1D_CACHE_ALLOCATE
# 0x20: L2D_CACHE_ALLOCATE
platform = Param.Platform(Parent.any, "Platform this device is part of.")
eventCounters = Param.Int(31, "Number of supported PMU counters")
pmuInterrupt = Param.Int(68, "PMU GIC interrupt number")
| apache-2.0 |
bikash/kaggleCompetition | microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/unique_gram.py | 2 | 1850 | from csv import DictReader
from datetime import datetime
import pickle
import heapq
import sys
# load data
def load_label(path, label):
result = []
for row in DictReader(open(path)):
if int(row['Class']) == label:
result.append((row['Id']))
return result
# generate grams dictionary for one file
def grams_dict(f_name, N=4):
path = "train/%s.bytes"%f_name
one_list = []
with open(path, 'rb') as f:
for line in f:
one_list += line.rstrip().split(" ")[1:]
grams_string = [''.join(one_list[i:i+N]) for i in xrange(len(one_list)-N+1)]
tree = dict()
for gram in grams_string:
if gram not in tree:
tree[gram] = 1
return tree
# add up ngram dictionaries
def reduce_dict(f_labels):
result = dict()
for f_name in f_labels:
d = grams_dict(f_name)
for k,v in d.iteritems():
if k in result:
result[k] += v
else:
result[k] = v
del d
#print "this class has %i keys"%len(result)
#pickle.dump(result, open('gram/ngram_%i'%label,'wb'))
return result
# heap to get the top 100,000 features.
def Heap_top(dictionary, label, num = 100000):
heap = [(0,'tmp')]* num # initialize the heap
root = heap[0]
for ngram,count in dictionary.iteritems():
if count > root[0]:
root = heapq.heapreplace(heap, (count, ngram))
pickle.dump(heap, open('gram/ngram_%i_top%i'%(label,num),'wb'))
if __name__ == '__main__':
start = datetime.now()
#for label in range(1,10): # take too much memory
label = int(sys.argv[1])
print "Gathering 4 grams, Class %i out of 9..."%label
f_labels = load_label('trainLabels.csv', label)
Heap_top(reduce_dict(f_labels),label)
#print datetime.now() - start | apache-2.0 |
davidyezsetz/kuma | vendor/packages/ipython/IPython/kernel/map.py | 7 | 3762 | # encoding: utf-8
"""Classes used in scattering and gathering sequences.
Scattering consists of partitioning a sequence and sending the various
pieces to individual nodes in a cluster.
"""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import types
from IPython.genutils import flatten as genutil_flatten
#-------------------------------------------------------------------------------
# Figure out which array packages are present and their array types
#-------------------------------------------------------------------------------
arrayModules = []
try:
import Numeric
except ImportError:
pass
else:
arrayModules.append({'module':Numeric, 'type':Numeric.arraytype})
try:
import numpy
except ImportError:
pass
else:
arrayModules.append({'module':numpy, 'type':numpy.ndarray})
try:
import numarray
except ImportError:
pass
else:
arrayModules.append({'module':numarray,
'type':numarray.numarraycore.NumArray})
class Map:
"""A class for partitioning a sequence using a map."""
def getPartition(self, seq, p, q):
"""Returns the pth partition of q partitions of seq."""
# Test for error conditions here
if p<0 or p>=q:
print "No partition exists."
return
remainder = len(seq)%q
basesize = len(seq)/q
hi = []
lo = []
for n in range(q):
if n < remainder:
lo.append(n * (basesize + 1))
hi.append(lo[-1] + basesize + 1)
else:
lo.append(n*basesize + remainder)
hi.append(lo[-1] + basesize)
result = seq[lo[p]:hi[p]]
return result
def joinPartitions(self, listOfPartitions):
return self.concatenate(listOfPartitions)
def concatenate(self, listOfPartitions):
testObject = listOfPartitions[0]
# First see if we have a known array type
for m in arrayModules:
#print m
if isinstance(testObject, m['type']):
return m['module'].concatenate(listOfPartitions)
# Next try for Python sequence types
if isinstance(testObject, (types.ListType, types.TupleType)):
return genutil_flatten(listOfPartitions)
# If we have scalars, just return listOfPartitions
return listOfPartitions
class RoundRobinMap(Map):
"""Partitions a sequence in a roun robin fashion.
This currently does not work!
"""
def getPartition(self, seq, p, q):
return seq[p:len(seq):q]
#result = []
#for i in range(p,len(seq),q):
# result.append(seq[i])
#return result
def joinPartitions(self, listOfPartitions):
#lengths = [len(x) for x in listOfPartitions]
#maxPartitionLength = len(listOfPartitions[0])
#numberOfPartitions = len(listOfPartitions)
#concat = self.concatenate(listOfPartitions)
#totalLength = len(concat)
#result = []
#for i in range(maxPartitionLength):
# result.append(concat[i:totalLength:maxPartitionLength])
return self.concatenate(listOfPartitions)
dists = {'b':Map}
| mpl-2.0 |
cloudsigma/cloud-init | tests/unittests/test_data.py | 6 | 14823 | """Tests for handling of userdata within cloud init."""
import StringIO
import gzip
import logging
import os
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from cloudinit import handlers
from cloudinit import helpers as c_helpers
from cloudinit import log
from cloudinit.settings import (PER_INSTANCE)
from cloudinit import sources
from cloudinit import stages
from cloudinit import util
INSTANCE_ID = "i-testing"
from tests.unittests import helpers
class FakeDataSource(sources.DataSource):
def __init__(self, userdata=None, vendordata=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
# FIXME: these tests shouldn't be checking log output??
# Weirddddd...
class TestConsumeUserData(helpers.FilesystemMockingTestCase):
def setUp(self):
helpers.FilesystemMockingTestCase.setUp(self)
self._log = None
self._log_file = None
self._log_handler = None
def tearDown(self):
helpers.FilesystemMockingTestCase.tearDown(self)
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
def _patchIn(self, root):
self.restore()
self.patchOS(root)
self.patchUtils(root)
def capture_log(self, lvl=logging.DEBUG):
log_file = StringIO.StringIO()
self._log_handler = logging.StreamHandler(log_file)
self._log_handler.setLevel(lvl)
self._log = log.getLogger()
self._log.addHandler(self._log_handler)
return log_file
def test_simple_jsonp(self):
blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
'''
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
new_root = self.makeDir()
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(2, len(cc))
self.assertEquals('qux', cc['baz'])
self.assertEquals('qux2', cc['bar'])
def test_simple_jsonp_vendor_and_user(self):
# test that user-data wins over vendor
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertEquals('quxC', cfg['foo'])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" },
{ "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertNotIn('foo', cfg)
def test_mixed_cloud_config(self):
blob_cc = '''
#cloud-config
a: b
c: d
'''
message_cc = MIMEBase("text", "cloud-config")
message_cc.set_payload(blob_cc)
blob_jp = '''
#cloud-config-jsonp
[
{ "op": "replace", "path": "/a", "value": "c" },
{ "op": "remove", "path": "/c" }
]
'''
message_jp = MIMEBase('text', "cloud-config-jsonp")
message_jp.set_payload(blob_jp)
message = MIMEMultipart()
message.attach(message_cc)
message.attach(message_jp)
ci = stages.Init()
ci.datasource = FakeDataSource(str(message))
new_root = self.makeDir()
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(1, len(cc))
self.assertEquals('c', cc['a'])
def test_vendor_user_yaml_cloud_config(self):
vendor_blob = '''
#cloud-config
a: b
name: vendor
run:
- x
- y
'''
user_blob = '''
#cloud-config
a: c
vendor_data:
enabled: True
prefix: /bin/true
name: user
run:
- z
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('c', cfg['a'])
self.assertEquals('user', cfg['name'])
self.assertNotIn('x', cfg['run'])
self.assertNotIn('y', cfg['run'])
self.assertIn('z', cfg['run'])
def test_vendordata_script(self):
vendor_blob = '''
#!/bin/bash
echo "test"
'''
user_blob = '''
#cloud-config
vendor_data:
enabled: True
prefix: /bin/true
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
_cfg = mods.cfg
vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
self.assertTrue(os.path.exists(vendor_script_fns))
def test_merging_cloud_config(self):
blob = '''
#cloud-config
a: b
e: f
run:
- b
- c
'''
message1 = MIMEBase("text", "cloud-config")
message1.set_payload(blob)
blob2 = '''
#cloud-config
a: e
e: g
run:
- stuff
- morestuff
'''
message2 = MIMEBase("text", "cloud-config")
message2['X-Merge-Type'] = ('dict(recurse_array,'
'recurse_str)+list(append)+str(append)')
message2.set_payload(blob2)
blob3 = '''
#cloud-config
e:
- 1
- 2
- 3
p: 1
'''
message3 = MIMEBase("text", "cloud-config")
message3.set_payload(blob3)
messages = [message1, message2, message3]
paths = c_helpers.Paths({}, ds=FakeDataSource(''))
cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
new_root = self.makeDir()
self.patchUtils(new_root)
self.patchOS(new_root)
cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
None)
for i, m in enumerate(messages):
headers = dict(m)
fn = "part-%s" % (i + 1)
payload = m.get_payload(decode=True)
cloud_cfg.handle_part(None, headers['Content-Type'],
fn, payload, None, headers)
cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
None)
contents = util.load_file(paths.get_ipath('cloud_config'))
contents = util.load_yaml(contents)
self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
self.assertEquals(contents['a'], 'be')
self.assertEquals(contents['e'], [1, 2, 3])
self.assertEquals(contents['p'], 1)
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning."""
ci = stages.Init()
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
def test_mime_gzip_compressed(self):
"""Tests that individual message gzip encoding works."""
def gzip_part(text):
contents = StringIO.StringIO()
f = gzip.GzipFile(fileobj=contents, mode='w')
f.write(str(text))
f.flush()
f.close()
return MIMEApplication(contents.getvalue(), 'gzip')
base_content1 = '''
#cloud-config
a: 2
'''
base_content2 = '''
#cloud-config
b: 3
c: 4
'''
message = MIMEMultipart('test')
message.attach(gzip_part(base_content1))
message.attach(gzip_part(base_content2))
ci = stages.Init()
ci.datasource = FakeDataSource(str(message))
new_root = self.makeDir()
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
self.assertEquals(3, len(contents))
self.assertEquals(2, contents['a'])
self.assertEquals(3, contents['b'])
self.assertEquals(4, contents['c'])
def test_mime_text_plain(self):
"""Mime message of type text/plain is ignored but shows warning."""
ci = stages.Init()
message = MIMEBase("text", "plain")
message.set_payload("Just text")
ci.datasource = FakeDataSource(message.as_string())
mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
def test_shellscript(self):
"""Raw text starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
ci.datasource = FakeDataSource(script)
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
mock_write(outpath, script, 0700)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
"""Mime message of type text/x-shellscript is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "x-shellscript")
message.set_payload(script)
ci.datasource = FakeDataSource(message.as_string())
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
mock_write(outpath, script, 0700)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
"""Mime type text/plain starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "plain")
message.set_payload(script)
ci.datasource = FakeDataSource(message.as_string())
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
mock_write(outpath, script, 0700)
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
| gpl-3.0 |
areski/cdr-stats | cdr_stats/cdr/utils/encoder.py | 3 | 1240 | # -*- coding: utf-8 -*-
#
# This code is borrowed from django-cms
# https://raw.githubusercontent.com/divio/django-cms/develop/cms/utils/encoder.py
# Licensed as https://github.com/divio/django-cms/blob/develop/LICENSE
#
from django.utils.html import conditional_escape
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.six import iteritems
class SafeJSONEncoder(DjangoJSONEncoder):
def _recursive_escape(self, o, esc=conditional_escape):
if isinstance(o, dict):
return type(o)((esc(k), self._recursive_escape(v)) for (k, v) in iteritems(o))
if isinstance(o, (list, tuple)):
return type(o)(self._recursive_escape(v) for v in o)
if type(o) is bool:
return o
try:
return type(o)(esc(o))
except (ValueError, TypeError):
return self.default(o)
def encode(self, o):
value = self._recursive_escape(o)
return super(SafeJSONEncoder, self).encode(value)
def default(self, o):
if isinstance(o, Promise):
return force_text(o)
return super(SafeJSONEncoder, self).default(o)
| mpl-2.0 |
spradeepv/dive-into-python | hackerrank/domain/python/numpy/transpose_flatten.py | 1 | 1198 | """
Problem Statement
Transpose
We can generate the transposition of an array using the tool numpy.transpose.
It will not affect the original array, but it will create a new array.
import numpy
my_array = numpy.array([[1,2,3],
[4,5,6]])
print numpy.transpose(my_array)
#Output
[[1 4]
[2 5]
[3 6]]
Flatten
The tool flatten creates a copy of the input array flattened to one dimension.
import numpy
my_array = numpy.array([[1,2,3],
[4,5,6]])
print my_array.flatten()
#Output
[1 2 3 4 5 6]
Task
You are given a NXM integer array matrix with space separated elements (N =
rows and M = columns).
Your task is to print the transpose and flatten results.
Input Format
The first line contains the space separated values of N and M.
The next N lines contains the space separated elements of M columns.
Output Format
First, print the transpose array and then print the flatten.
Sample Input
2 2
1 2
3 4
Sample Output
[[1 3]
[2 4]]
[1 2 3 4]
"""
import numpy as np
n, m = map(int, raw_input().split())
l = []
for _ in range(n):
l.append(map(int, raw_input().split()))
array = np.array(l)
print np.transpose(array)
print array.flatten()
| mit |
gwq5210/litlib | thirdparty/sources/boost_1_60_0/tools/build/test/default_toolset.py | 51 | 7777 | #!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the expected default toolset is used when no toolset is explicitly
# specified on the command line or used from code via the using rule. Test that
# the default toolset is correctly used just like any other explicitly used
# toolset (e.g. toolset prerequisites, properties conditioned on toolset
# related features, etc.).
#
# Note that we need to ignore regular site/user/test configuration files to
# avoid them marking any toolsets not under our control as used.
import BoostBuild
# Line displayed by Boost Build when using the default toolset.
configuring_default_toolset_message = \
'warning: Configuring default toolset "%s".'
###############################################################################
#
# test_conditions_on_default_toolset()
# ------------------------------------
#
###############################################################################
def test_conditions_on_default_toolset():
"""Test that toolset and toolset subfeature conditioned properties get
applied correctly when the toolset is selected by default. Implicitly tests
that we can use the set-default-toolset rule to set the default toolset to
be used by Boost Build.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False)
toolset_name = "myCustomTestToolset"
toolset_version = "v"
toolset_version_unused = "v_unused"
message_loaded = "Toolset '%s' loaded." % toolset_name
message_initialized = "Toolset '%s' initialized." % toolset_name ;
# Custom toolset.
t.write(toolset_name + ".jam", """
import feature ;
ECHO "%(message_loaded)s" ;
feature.extend toolset : %(toolset_name)s ;
feature.subfeature toolset %(toolset_name)s : version : %(toolset_version)s %(toolset_version_unused)s ;
rule init ( version ) { ECHO "%(message_initialized)s" ; }
""" % {'message_loaded' : message_loaded ,
'message_initialized' : message_initialized,
'toolset_name' : toolset_name ,
'toolset_version' : toolset_version ,
'toolset_version_unused': toolset_version_unused})
# Main Boost Build project script.
t.write("jamroot.jam", """
import build-system ;
import errors ;
import feature ;
import notfile ;
build-system.set-default-toolset %(toolset_name)s : %(toolset_version)s ;
feature.feature description : : free incidental ;
# We use a rule instead of an action to avoid problems with action output not
# getting piped to stdout by the testing system.
rule buildRule ( names : targets ? : properties * )
{
local descriptions = [ feature.get-values description : $(properties) ] ;
ECHO "descriptions:" /$(descriptions)/ ;
local toolset = [ feature.get-values toolset : $(properties) ] ;
ECHO "toolset:" /$(toolset)/ ;
local toolset-version = [ feature.get-values "toolset-$(toolset):version" : $(properties) ] ;
ECHO "toolset-version:" /$(toolset-version)/ ;
}
notfile testTarget
: @buildRule
:
:
<description>stand-alone
<toolset>%(toolset_name)s:<description>toolset
<toolset>%(toolset_name)s-%(toolset_version)s:<description>toolset-version
<toolset>%(toolset_name)s-%(toolset_version_unused)s:<description>toolset-version-unused ;
""" % {'toolset_name' : toolset_name ,
'toolset_version' : toolset_version,
'toolset_version_unused': toolset_version_unused})
t.run_build_system()
t.expect_output_lines(configuring_default_toolset_message % toolset_name)
t.expect_output_lines(message_loaded)
t.expect_output_lines(message_initialized)
t.expect_output_lines("descriptions: /stand-alone/ /toolset/ "
"/toolset-version/")
t.expect_output_lines("toolset: /%s/" % toolset_name)
t.expect_output_lines("toolset-version: /%s/" % toolset_version)
t.cleanup()
###############################################################################
#
# test_default_toolset_on_os()
# ----------------------------
#
###############################################################################
def test_default_toolset_on_os( os, expected_toolset ):
"""Test that the given toolset is used as the default toolset on the given
os. Uses hardcoded knowledge of how Boost Build decides on which host OS it
is currently running. Note that we must not do much after tricking Boost
Build into believing it has a specific host OS as this might mess up other
important internal Boost Build state.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False)
t.write("jamroot.jam", "modules.poke os : .name : %s ;" % os)
# We need to tell the test system to ignore stderr output as attempting to
# load missing toolsets might cause random failures with which we are not
# concerned in this test.
t.run_build_system(stderr=None)
t.expect_output_lines(configuring_default_toolset_message %
expected_toolset)
t.cleanup()
###############################################################################
#
# test_default_toolset_requirements()
# -----------------------------------
#
###############################################################################
def test_default_toolset_requirements():
"""Test that default toolset's requirements get applied correctly.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False,
ignore_toolset_requirements=False)
toolset_name = "customTestToolsetWithRequirements"
# Custom toolset.
t.write(toolset_name + ".jam", """
import feature ;
import toolset ;
feature.extend toolset : %(toolset_name)s ;
toolset.add-requirements <description>toolset-requirement ;
rule init ( ) { }
""" % {'toolset_name': toolset_name})
# Main Boost Build project script.
t.write("jamroot.jam", """
import build-system ;
import errors ;
import feature ;
import notfile ;
build-system.set-default-toolset %(toolset_name)s ;
feature.feature description : : free incidental ;
# We use a rule instead of an action to avoid problems with action output not
# getting piped to stdout by the testing system.
rule buildRule ( names : targets ? : properties * )
{
local descriptions = [ feature.get-values description : $(properties) ] ;
ECHO "descriptions:" /$(descriptions)/ ;
local toolset = [ feature.get-values toolset : $(properties) ] ;
ECHO "toolset:" /$(toolset)/ ;
}
notfile testTarget
: @buildRule
:
:
<description>target-requirement
<description>toolset-requirement:<description>conditioned-requirement
<description>unrelated-condition:<description>unrelated-description ;
""" % {'toolset_name': toolset_name})
t.run_build_system()
t.expect_output_lines(configuring_default_toolset_message % toolset_name)
t.expect_output_lines("descriptions: /conditioned-requirement/ "
"/target-requirement/ /toolset-requirement/")
t.expect_output_lines("toolset: /%s/" % toolset_name)
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_default_toolset_on_os("NT" , "msvc")
test_default_toolset_on_os("LINUX" , "gcc" )
test_default_toolset_on_os("CYGWIN" , "gcc" )
test_default_toolset_on_os("SomeOtherOS", "gcc" )
test_default_toolset_requirements()
test_conditions_on_default_toolset()
| gpl-3.0 |
Mitali-Sodhi/CodeLingo | Dataset/python/routetable.py | 29 | 3683 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Route Table
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class RouteTable(TaggedEC2Object):
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
self.vpc_id = None
self.routes = []
self.associations = []
def __repr__(self):
return 'RouteTable:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(RouteTable, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'routeSet':
self.routes = ResultSet([('item', Route)])
return self.routes
elif name == 'associationSet':
self.associations = ResultSet([('item', RouteAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'routeTableId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class Route(object):
def __init__(self, connection=None):
self.destination_cidr_block = None
self.gateway_id = None
self.instance_id = None
self.state = None
def __repr__(self):
return 'Route:%s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'gatewayId':
self.gateway_id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'state':
self.state = value
class RouteAssociation(object):
def __init__(self, connection=None):
self.id = None
self.route_table_id = None
self.subnet_id = None
self.main = False
def __repr__(self):
return 'RouteAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'routeTableAssociationId':
self.id = value
elif name == 'routeTableId':
self.route_table_id = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'main':
self.main = value == 'true'
| mit |
haibuo1981/cuda-convnet2 | python_util/data.py | 180 | 7803 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from threading import Thread
from util import *
BATCH_META_FILE = "batches.meta"
class DataLoaderThread(Thread):
def __init__(self, path, tgt):
Thread.__init__(self)
self.path = path
self.tgt = tgt
def run(self):
self.tgt += [unpickle(self.path)]
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def get_batch(self, batch_num):
fname = self.get_data_file_name(batch_num)
if os.path.isdir(fname): # batch in sub-batches
sub_batches = sorted(os.listdir(fname), key=alphanum_key)
#print sub_batches
num_sub_batches = len(sub_batches)
tgts = [[] for i in xrange(num_sub_batches)]
threads = [DataLoaderThread(os.path.join(fname, s), tgt) for (s, tgt) in zip(sub_batches, tgts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return [t[0] for t in tgts]
return unpickle(self.get_data_file_name(batch_num))
def get_data_dims(self,idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
if self.data is None:
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
self.data, self.labels = data, labels
else:
data, labels = self.data, self.labels
# print data.shape, labels.shape
return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ]
dp_types = {"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
class DataProviderException(Exception):
pass
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_asof.py | 7 | 4718 | # coding=utf-8
import nose
import numpy as np
from pandas import (offsets, Series, notnull,
isnull, date_range, Timestamp)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAsof(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_basic(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
def test_scalar(self):
N = 30
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.arange(N), index=rng)
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
result = ts.asof(ts.index[3])
self.assertEqual(result, ts[3])
# no as of value
d = ts.index[0] - offsets.BDay()
self.assertTrue(np.isnan(ts.asof(d)))
def test_with_nan(self):
# basic asof test
rng = date_range('1/1/2000', '1/2/2000', freq='4h')
s = Series(np.arange(len(rng)), index=rng)
r = s.resample('2h').mean()
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.nan
ts[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
self.assertTrue(isnull(ts.asof(d)))
def test_errors(self):
s = Series([1, 2, 3],
index=[Timestamp('20130101'),
Timestamp('20130103'),
Timestamp('20130102')])
# non-monotonic
self.assertFalse(s.index.is_monotonic)
with self.assertRaises(ValueError):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
s = Series(np.random.randn(N), index=rng)
with self.assertRaises(ValueError):
s.asof(s.index[0], subset='foo')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
olegpshenichniy/Booktype | lib/booktype/apps/importer/views.py | 5 | 7974 | # -*- coding: utf-8 -*-
import json
import datetime
import logging
from ebooklib import epub
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.views.generic import UpdateView
from django.views.generic.edit import FormView
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from braces.views import JSONResponseMixin
from booki.editor.models import Book, Chapter
from sputnik.utils import LazyEncoder
from booktype.apps.core.views import SecurityMixin
from booktype.importer.delegate import Delegate
from booktype.importer.notifier import CollectNotifier
from booktype.importer import utils as importer_utils
from booktype.importer.docx.utils import get_importer_class
from booktype.utils import config
from booktype.utils.book import check_book_availability, create_book
from booktype.utils.misc import booktype_slugify, get_file_extension, has_book_limit
from booktype.utils.security import BookSecurity
from .forms import UploadBookForm, UploadDocxFileForm
logger = logging.getLogger("booktype.importer")
class ImporterView(JSONResponseMixin, SecurityMixin, FormView):
form_class = UploadBookForm
def check_permissions(self, request, *args, **kwargs):
# TODO: this should be moved to parent SecurityMixin class
if request.user.is_superuser:
return
if not self.security.has_perm('account.can_upload_book'):
raise PermissionDenied
# if only admin import then deny user permission to upload books
if config.get_configuration('ADMIN_IMPORT_BOOKS'):
raise PermissionDenied
# check if user can import more books
if has_book_limit(request.user):
raise PermissionDenied
def get_default_title(self, temp_file, ext):
book_title = _('Imported Book %(date)s') % dict(date=datetime.date.today())
if ext == 'epub':
epub_book = epub.read_epub(temp_file)
try:
dc_key = epub.NAMESPACES['DC']
book_title = epub_book.metadata[dc_key]['title'][0][0]
except Exception:
pass
return book_title
def form_valid(self, form):
logger.debug('ImporterView::form_valid')
book_file = form.cleaned_data.get('book_file')
ext = get_file_extension(book_file.name)
logger.debug('ImporterView::Importing file extension is "{}".'.format(ext.encode('utf8')))
default_book_title = self.get_default_title(book_file, ext)
book_title = form.cleaned_data.get('book_title', default_book_title)
logger.debug('ImporterView::book_title="{}" default_book_title="{}".'.format(
book_title.encode('utf8'), default_book_title.encode('utf8')))
# in case book title in form is empty string
if len(book_title) == 0:
book_title = default_book_title
if not check_book_availability(book_title):
registered = Book.objects.filter(title__startswith=book_title).count()
book_title = '%s %s' % (book_title, registered)
logger.debug('ImporterView::Checking book availability: "{}".'.format(book_title.encode('utf8')))
book_url = booktype_slugify(book_title)
book = create_book(self.request.user, book_title, book_url=book_url)
logger.debug('ImporterView::Book created with url title="{}".'.format(book_url))
# check if book will be hidden and set to book
book.hidden = form.cleaned_data.get('hidden')
book.save()
notifier = CollectNotifier()
delegate = Delegate()
response = {}
try:
book_importer = importer_utils.get_importer_module(ext)
except KeyError:
logger.error('ImporterView::No importer for this extension')
response_data = dict(errors=[ugettext('Extension not supported!')])
return self.render_json_response(response_data)
try:
book_importer(
book_file, book, notifier=notifier, delegate=delegate)
logger.debug('ImporterView::Book imported.')
response['url'] = reverse('reader:infopage', args=[book.url_title])
except Exception as e:
logger.error('ImporterView::Some kind of error while importing book.')
logger.exception(e)
notifier.errors.append(str(e))
response['infos'] = notifier.infos
response['warnings'] = notifier.warnings
response['errors'] = notifier.errors
return self.render_json_response(response)
def form_invalid(self, form):
response_data = {
'errors': [ugettext('Something went wrong!')],
'infos': [],
'warnings': []
}
return self.render_json_response(response_data)
class ImportToChapter(JSONResponseMixin, SecurityMixin, UpdateView):
"""
Importer view to load content from given docx file (near future epub) into
a single existing chapter
This view will redirect to chapter edit screen if docx import succeed or will
redirect to current screen user is
"""
# TODO: Implement importing epub files into existent chapters
model = Book
form_class = UploadDocxFileForm
slug_field = 'url_title'
slug_url_kwarg = 'bookid'
context_object_name = 'book'
SECURITY_BRIDGE = BookSecurity
json_encoder_class = LazyEncoder
def check_permissions(self, request, *args, **kwargs):
if request.user.is_superuser:
return
if not self.security.has_perm('edit.import_to_chapter'):
raise PermissionDenied
def get_object(self, queryset=None):
book = super(ImportToChapter, self).get_object(queryset)
self.chapter = get_object_or_404(
Chapter, book=book, pk=self.kwargs['chapter'])
return book
def get_form_kwargs(self):
"""Just override to avoid sending `instance` argument passed to form"""
kwargs = super(ImportToChapter, self).get_form_kwargs()
del kwargs['instance']
return kwargs
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
chapter = self.chapter
book = self.object
chapter_file = form.cleaned_data.get('chapter_file')
process_mode = form.cleaned_data.get('import_mode')
# this are used to get information messages about import process
notifier, delegate = CollectNotifier(), Delegate()
response = {}
# allow getting custom importer class if any
docx = get_importer_class()(
book, chapter, notifier=notifier, delegate=delegate, user=self.request.user)
try:
docx.import_file(chapter_file, **{'process_mode': process_mode})
response['url'] = self.get_success_url()
response['new_content'] = chapter.content
except Exception as e:
logger.error('ImporterToChapter::Unexpected error while importing file')
logger.exception(e)
notifier.errors.append(str(e))
response['infos'] = notifier.infos
response['warnings'] = notifier.warnings
response['errors'] = notifier.errors
response_data = json.dumps(response, cls=LazyEncoder)
return HttpResponse(response_data, content_type="application/json")
def form_invalid(self, form):
# NOTE: perhaps send back validation errors
response_data = {
'infos': [], 'warnings': [],
'errors': [ugettext('Something went wrong!')],
}
return self.render_json_response(response_data)
def get_success_url(self):
return '{}#edit/{}'.format(
reverse('edit:editor', args=[self.object.url_title]), self.chapter.pk)
| agpl-3.0 |
Plantain/sms-mailinglist | lib/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| apache-2.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_4/django/contrib/gis/geos/tests/test_geos_mutation.py | 198 | 5424 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.contrib.gis.geos import *
from django.contrib.gis.geos.error import GEOSIndexError
from django.utils import unittest
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, map(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSMutationTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| mit |
huanpc/IoT-1 | gui/controller/.venv/lib/python3.5/site-packages/jet/dashboard/views.py | 2 | 8007 | from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_POST, require_GET
from jet.dashboard.forms import UpdateDashboardModulesForm, AddUserDashboardModuleForm, \
UpdateDashboardModuleCollapseForm, RemoveDashboardModuleForm, ResetDashboardForm
from jet.dashboard.models import UserDashboardModule
from jet.utils import JsonResponse, get_app_list, SuccessMessageMixin
from django.views.generic import UpdateView
from django.utils.translation import ugettext_lazy as _
class UpdateDashboardModuleView(SuccessMessageMixin, UpdateView):
model = UserDashboardModule
fields = ('title',)
template_name = 'jet.dashboard/update_module.html'
success_message = _('Widget was successfully updated')
object = None
module = None
def has_permission(self, request):
return request.user.is_active and request.user.is_staff
def get_success_url(self):
if self.object.app_label:
return reverse('admin:app_list', kwargs={'app_label': self.object.app_label})
else:
return reverse('admin:index')
def get_module(self):
object = self.object if getattr(self, 'object', None) is not None else self.get_object()
return object.load_module()
def get_settings_form_kwargs(self):
kwargs = {
'initial': self.module.settings
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_settings_form(self):
if self.module.settings_form:
form = self.module.settings_form(**self.get_settings_form_kwargs())
if hasattr(form, 'set_module'):
form.set_module(self.module)
return form
def get_children_formset_kwargs(self):
kwargs = {
'initial': self.module.children,
'prefix': 'children',
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_children_formset(self):
if self.module.child_form:
return formset_factory(self.module.child_form, can_delete=True, extra=1)(**self.get_children_formset_kwargs())
def clean_children_data(self, children):
children = list(filter(
lambda item: isinstance(item, dict) and item and item.get('DELETE') is not True,
children
))
for item in children:
item.pop('DELETE')
return children
def get_current_app(self):
app_list = get_app_list({'request': self.request})
for app in app_list:
if app.get('app_label', app.get('name')) == self.object.app_label:
return app
def get_context_data(self, **kwargs):
data = super(UpdateDashboardModuleView, self).get_context_data(**kwargs)
data['title'] = _('Change')
data['module'] = self.module
data['settings_form'] = self.get_settings_form()
data['children_formset'] = self.get_children_formset()
data['child_name'] = self.module.child_name if self.module.child_name else _('Items')
data['child_name_plural'] = self.module.child_name_plural if self.module.child_name_plural else _('Items')
data['app'] = self.get_current_app()
return data
def dispatch(self, request, *args, **kwargs):
if not self.has_permission(request):
index_path = reverse('admin:index')
return HttpResponseRedirect(index_path)
self.object = self.get_object()
self.module = self.get_module()(model=self.object)
return super(UpdateDashboardModuleView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
settings_form = self.get_settings_form()
children_formset = self.get_children_formset()
data = request.POST.copy()
if settings_form:
if settings_form.is_valid():
settings = settings_form.cleaned_data
data['settings'] = self.module.dump_settings(settings)
else:
return self.form_invalid(self.get_form(self.get_form_class()))
if children_formset:
if children_formset.is_valid():
self.module.children = self.clean_children_data(children_formset.cleaned_data)
data['children'] = self.module.dump_children()
else:
return self.form_invalid(self.get_form(self.get_form_class()))
request.POST = data
return super(UpdateDashboardModuleView, self).post(request, *args, **kwargs)
def form_valid(self, form):
if 'settings' in form.data:
form.instance.settings = form.data['settings']
if 'children' in form.data:
form.instance.children = form.data['children']
return super(UpdateDashboardModuleView, self).form_valid(form)
@require_POST
def update_dashboard_modules_view(request):
result = {'error': False}
form = UpdateDashboardModulesForm(request, request.POST)
if form.is_valid():
form.save()
else:
result['error'] = True
return JsonResponse(result)
@require_POST
def add_user_dashboard_module_view(request):
result = {'error': False}
form = AddUserDashboardModuleForm(request, request.POST)
if form.is_valid():
module = form.save()
result['id'] = module.pk
messages.success(request, _('Widget has been successfully added'))
if module.app_label:
result['success_url'] = reverse('admin:app_list', kwargs={'app_label': module.app_label})
else:
result['success_url'] = reverse('admin:index')
else:
result['error'] = True
return JsonResponse(result)
@require_POST
def update_dashboard_module_collapse_view(request):
result = {'error': False}
try:
instance = UserDashboardModule.objects.get(pk=request.POST.get('id'))
form = UpdateDashboardModuleCollapseForm(request, request.POST, instance=instance)
if form.is_valid():
module = form.save()
result['collapsed'] = module.collapsed
else:
result['error'] = True
except UserDashboardModule.DoesNotExist:
result['error'] = True
return JsonResponse(result)
@require_POST
def remove_dashboard_module_view(request):
result = {'error': False}
try:
instance = UserDashboardModule.objects.get(pk=request.POST.get('id'))
form = RemoveDashboardModuleForm(request, request.POST, instance=instance)
if form.is_valid():
form.save()
else:
result['error'] = True
except UserDashboardModule.DoesNotExist:
result['error'] = True
return JsonResponse(result)
@require_GET
def load_dashboard_module_view(request, pk):
result = {'error': False}
try:
if not request.user.is_authenticated() or not request.user.is_staff:
raise ValidationError('error')
instance = UserDashboardModule.objects.get(pk=pk, user=request.user.pk)
module_cls = instance.load_module()
module = module_cls(model=instance, context={'request': request})
result['html'] = module.render()
except (ValidationError, UserDashboardModule.DoesNotExist):
result['error'] = True
return JsonResponse(result)
@require_POST
def reset_dashboard_view(request):
result = {'error': False}
form = ResetDashboardForm(request, request.POST)
if form.is_valid():
form.save()
else:
result['error'] = True
return JsonResponse(result)
| mit |
nishigori/boto | boto/datapipeline/exceptions.py | 235 | 1471 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class PipelineDeletedException(JSONResponseError):
pass
class InvalidRequestException(JSONResponseError):
pass
class TaskNotFoundException(JSONResponseError):
pass
class PipelineNotFoundException(JSONResponseError):
pass
class InternalServiceError(JSONResponseError):
pass
| mit |
project-ncl/pnc-cli | pnc_cli/swagger_client/apis/repositoryconfigurations_api.py | 2 | 28759 | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RepositoryconfigurationsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_new(self, **kwargs):
"""
Creates a new Repository Configuration. NOTE: it does not create the repository in the scm server, it only creates an entry in PNC database. Useful when you already have internal and external scm repositories created.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RepositoryConfigurationRest body:
:return: RepositoryConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_new_with_http_info(**kwargs)
else:
(data) = self.create_new_with_http_info(**kwargs)
return data
def create_new_with_http_info(self, **kwargs):
"""
Creates a new Repository Configuration. NOTE: it does not create the repository in the scm server, it only creates an entry in PNC database. Useful when you already have internal and external scm repositories created.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RepositoryConfigurationRest body:
:return: RepositoryConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_new" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryConfigurationSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all(self, **kwargs):
"""
Gets all Repository Configurations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_http_info(**kwargs)
else:
(data) = self.get_all_with_http_info(**kwargs)
return data
def get_all_with_http_info(self, **kwargs):
"""
Gets all Repository Configurations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryConfigurationPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_specific(self, id, **kwargs):
"""
Gets a specific Repository Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Repository Configuration id (required)
:return: RepositoryConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_specific_with_http_info(id, **kwargs)
else:
(data) = self.get_specific_with_http_info(id, **kwargs)
return data
def get_specific_with_http_info(self, id, **kwargs):
"""
Gets a specific Repository Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Repository Configuration id (required)
:return: RepositoryConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_specific" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_specific`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryConfigurationSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def match(self, search, **kwargs):
"""
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.match_with_http_info(search, **kwargs)
else:
(data) = self.match_with_http_info(search, **kwargs)
return data
def match_with_http_info(self, search, **kwargs):
"""
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match_with_http_info(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search', 'page_index', 'page_size', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method match" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'search' is set
if ('search' not in params) or (params['search'] is None):
raise ValueError("Missing the required parameter `search` when calling `match`")
collection_formats = {}
path_params = {}
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'search' in params:
query_params.append(('search', params['search']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations/match-by-scm-url', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryConfigurationPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, search, **kwargs):
"""
Search for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. The matching is done using LIKE.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url part to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_with_http_info(search, **kwargs)
else:
(data) = self.search_with_http_info(search, **kwargs)
return data
def search_with_http_info(self, search, **kwargs):
"""
Search for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. The matching is done using LIKE.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_with_http_info(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url part to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search', 'page_index', 'page_size', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'search' is set
if ('search' not in params) or (params['search'] is None):
raise ValueError("Missing the required parameter `search` when calling `search`")
collection_formats = {}
path_params = {}
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'search' in params:
query_params.append(('search', params['search']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations/search-by-scm-url', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryConfigurationPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, id, **kwargs):
"""
Updates an existing Repository Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Repository Configuration id (required)
:param RepositoryConfigurationRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(id, **kwargs)
else:
(data) = self.update_with_http_info(id, **kwargs)
return data
def update_with_http_info(self, id, **kwargs):
"""
Updates an existing Repository Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Repository Configuration id (required)
:param RepositoryConfigurationRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/repository-configurations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 |
grimmjow8/ansible | lib/ansible/modules/system/selinux_permissive.py | 48 | 4345 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <[email protected]>
# inspired by code of github.com/dandiker/
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: selinux_permissive
short_description: Change permissive domain in SELinux policy
description:
- Add and remove domain from the list of permissive domain.
version_added: "2.0"
options:
domain:
description:
- "the domain that will be added or removed from the list of permissive domains"
required: true
permissive:
description:
- "indicate if the domain should or should not be set as permissive"
required: true
choices: [ 'True', 'False' ]
no_reload:
description:
- "automatically reload the policy after a change"
- "default is set to 'false' as that's what most people would want after changing one domain"
- "Note that this doesn't work on older version of the library (example EL 6), the module will silently ignore it in this case"
required: false
default: False
choices: [ 'True', 'False' ]
store:
description:
- "name of the SELinux policy store to use"
required: false
default: null
notes:
- Requires a version of SELinux recent enough ( ie EL 6 or newer )
requirements: [ policycoreutils-python ]
author: Michael Scherer <[email protected]>
'''
EXAMPLES = '''
- selinux_permissive:
name: httpd_t
permissive: true
'''
HAVE_SEOBJECT = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
pass
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(aliases=['name'], required=True),
store=dict(required=False, default=''),
permissive=dict(type='bool', required=True),
no_reload=dict(type='bool', required=False, default=False),
),
supports_check_mode=True
)
# global vars
changed = False
store = module.params['store']
permissive = module.params['permissive']
domain = module.params['domain']
no_reload = module.params['no_reload']
if not HAVE_SEOBJECT:
module.fail_json(changed=False, msg="policycoreutils-python required for this module")
try:
permissive_domains = seobject.permissiveRecords(store)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
# not supported on EL 6
if 'set_reload' in dir(permissive_domains):
permissive_domains.set_reload(not no_reload)
try:
all_domains = permissive_domains.get_all()
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
if permissive:
if domain not in all_domains:
if not module.check_mode:
try:
permissive_domains.add(domain)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
else:
if domain in all_domains:
if not module.check_mode:
try:
permissive_domains.delete(domain)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
module.exit_json(changed=changed, store=store,
permissive=permissive, domain=domain)
if __name__ == '__main__':
main()
| gpl-3.0 |
christophlsa/odoo | addons/account_asset/__init__.py | 446 | 1135 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset
import account_asset_invoice
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
smi96/django-blog_website | lib/python2.7/site-packages/django/test/signals.py | 240 | 5928 | import os
import threading
import time
import warnings
from django.core.signals import setting_changed
from django.db import connections, router
from django.db.utils import ConnectionRouter
from django.dispatch import Signal, receiver
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'}
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild get_app_template_dirs cache.
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}:
for conn in connections.all():
try:
del conn.timezone
except AttributeError:
pass
try:
del conn.timezone_name
except AttributeError:
pass
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
with conn.cursor() as cursor:
cursor.execute(tz_sql, [conn.timezone_name])
@receiver(setting_changed)
def clear_routers_cache(**kwargs):
if kwargs['setting'] == 'DATABASE_ROUTERS':
router.routers = ConnectionRouter().routers
@receiver(setting_changed)
def reset_template_engines(**kwargs):
if kwargs['setting'] in {
'TEMPLATES',
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
'DEBUG',
'FILE_CHARSET',
'INSTALLED_APPS',
}:
from django.template import engines
try:
del engines.templates
except AttributeError:
pass
engines._templates = None
engines._engines = {}
from django.template.engine import Engine
Engine.get_default.cache_clear()
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
file_storage_settings = {
'DEFAULT_FILE_STORAGE',
'FILE_UPLOAD_DIRECTORY_PERMISSIONS',
'FILE_UPLOAD_PERMISSIONS',
'MEDIA_ROOT',
'MEDIA_URL',
}
if kwargs['setting'] in file_storage_settings:
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# stacklevel=5 shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behavior."
% kwargs['setting'], stacklevel=5)
@receiver(setting_changed)
def root_urlconf_changed(**kwargs):
if kwargs['setting'] == 'ROOT_URLCONF':
from django.core.urlresolvers import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_STORAGE',
'STATIC_ROOT',
'STATIC_URL',
}:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
@receiver(setting_changed)
def static_finders_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_DIRS',
'STATIC_ROOT',
}:
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
@receiver(setting_changed)
def auth_password_validators_changed(**kwargs):
if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS':
from django.contrib.auth.password_validation import get_default_password_validators
get_default_password_validators.cache_clear()
| mit |
Oliver2213/NVDAYoutube-dl | addon/globalPlugins/nvdaYoutubeDL/youtube_dl/extractor/varzesh3.py | 120 | 1696 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class Varzesh3IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?video\.varzesh3\.com/(?:[^/]+/)+(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://video.varzesh3.com/germany/bundesliga/5-%D9%88%D8%A7%DA%A9%D9%86%D8%B4-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AF%D8%B1%D9%88%D8%A7%D8%B2%D9%87%E2%80%8C%D8%A8%D8%A7%D9%86%D8%A7%D9%86%D8%9B%D9%87%D9%81%D8%AA%D9%87-26-%D8%A8%D9%88%D9%86%D8%AF%D8%B3/',
'md5': '2a933874cb7dce4366075281eb49e855',
'info_dict': {
'id': '76337',
'ext': 'mp4',
'title': '۵ واکنش برتر دروازهبانان؛هفته ۲۶ بوندسلیگا',
'description': 'فصل ۲۰۱۵-۲۰۱۴',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r'<source[^>]+src="([^"]+)"', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div class="matn">(.+?)</div>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_id = self._search_regex(
r"<link[^>]+rel='(?:canonical|shortlink)'[^>]+href='/\?p=([^']+)'",
webpage, display_id, default=display_id)
return {
'url': video_url,
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| gpl-2.0 |
tonybaloney/st2 | st2actions/tests/unit/policies/test_base.py | 6 | 6189 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from st2tests import config as test_config
test_config.parse_args()
import st2common
from st2actions.notifier import notifier
from st2actions import scheduler
from st2common.bootstrap.policiesregistrar import register_policy_types
from st2common.models.db.action import LiveActionDB
from st2common.persistence.policy import Policy
from st2common.services import action as action_service
from st2tests.base import DbTestCase
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import FixturesLoader
__all__ = [
'SchedulerPoliciesTest',
'NotifierPoliciesTest'
]
PACK = 'generic'
TEST_FIXTURES_1 = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policies': [
'policy_4.yaml',
]
}
TEST_FIXTURES_2 = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policies': [
'policy_1.yaml',
]
}
class SchedulerPoliciesTest(CleanDbTestCase):
@classmethod
def setUpClass(cls):
DbTestCase.setUpClass()
super(SchedulerPoliciesTest, cls).setUpClass()
def setUp(self):
super(SchedulerPoliciesTest, self).setUp()
# Register common policy types
register_policy_types(st2common)
loader = FixturesLoader()
models = loader.save_fixtures_to_db(fixtures_pack=PACK,
fixtures_dict=TEST_FIXTURES_2)
# Policy with "post_run" application
self.policy_db = models['policies']['policy_1.yaml']
@mock.patch('st2actions.scheduler.policies')
def test_disabled_policy_not_applied_on_pre_run(self, mock_policies):
scheduler_worker = scheduler.get_scheduler()
##########
# First test a scenario where policy is enabled
##########
self.assertTrue(self.policy_db.enabled)
# Post run hasn't been called yet, call count should be 0
self.assertEqual(mock_policies.get_driver.call_count, 0)
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
scheduler_worker._apply_pre_run_policies(liveaction_db=live_action_db)
# Ony policy has been applied so call count should be 1
self.assertEqual(mock_policies.get_driver.call_count, 1)
##########
# Now a scenaro with disabled policy
##########
mock_policies.get_driver.call_count = 0
self.policy_db.enabled = False
self.policy_db = Policy.add_or_update(self.policy_db)
self.assertFalse(self.policy_db.enabled)
self.assertEqual(mock_policies.get_driver.call_count, 0)
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
scheduler_worker._apply_pre_run_policies(liveaction_db=live_action_db)
# Policy is disabled so call_count should stay the same as before as no policies have been
# applied
self.assertEqual(mock_policies.get_driver.call_count, 0)
class NotifierPoliciesTest(CleanDbTestCase):
@classmethod
def setUpClass(cls):
DbTestCase.setUpClass()
super(NotifierPoliciesTest, cls).setUpClass()
def setUp(self):
super(NotifierPoliciesTest, self).setUp()
# Register common policy types
register_policy_types(st2common)
loader = FixturesLoader()
models = loader.save_fixtures_to_db(fixtures_pack=PACK,
fixtures_dict=TEST_FIXTURES_1)
# Policy with "post_run" application
self.policy_db = models['policies']['policy_4.yaml']
@mock.patch('st2actions.notifier.notifier.policies')
def test_disabled_policy_not_applied_on_post_run(self, mock_policies):
notifier_worker = notifier.get_notifier()
##########
# First test a scenario where policy is enabled
##########
self.assertTrue(self.policy_db.enabled)
# Post run hasn't been called yet, call count should be 0
self.assertEqual(mock_policies.get_driver.call_count, 0)
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
notifier_worker._apply_post_run_policies(liveaction_db=live_action_db)
# Ony policy has been applied so call count should be 1
self.assertEqual(mock_policies.get_driver.call_count, 1)
##########
# Now a scenaro with disabled policy
##########
mock_policies.get_driver.call_count = 0
self.policy_db.enabled = False
self.policy_db = Policy.add_or_update(self.policy_db)
self.assertFalse(self.policy_db.enabled)
self.assertEqual(mock_policies.get_driver.call_count, 0)
liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'})
live_action_db, execution_db = action_service.request(liveaction)
notifier_worker._apply_post_run_policies(liveaction_db=live_action_db)
# Policy is disabled so call_count should stay the same as before as no policies have been
# applied
self.assertEqual(mock_policies.get_driver.call_count, 0)
| apache-2.0 |
svn2github/vbox | src/VBox/ValidationKit/testmanager/webui/wuireport.py | 4 | 5986 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager WUI - Reports.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Validation Kit imports.
from testmanager.webui.wuicontentbase import WuiContentBase;
from testmanager.webui.wuihlpgraph import WuiHlpGraphDataTable, WuiHlpBarGraph;
from testmanager.core.report import ReportModelBase;
class WuiReportBase(WuiContentBase):
"""
Base class for the reports.
"""
def __init__(self, oModel, dParams, fSubReport = False, fnDPrint = None, oDisp = None):
WuiContentBase.__init__(self, fnDPrint = fnDPrint, oDisp = oDisp);
self._oModel = oModel;
self._dParams = dParams;
self._fSubReport = fSubReport;
self._sTitle = None;
def generateNavigator(self, sWhere):
"""
Generates the navigator (manipulate _dParams).
Returns HTML.
"""
assert sWhere == 'top' or sWhere == 'bottom';
return '';
def generateReportBody(self):
"""
This is overridden by the child class to generate the report.
Returns HTML.
"""
return '<h3>Must override generateReportBody!</h3>';
def show(self):
"""
Generate the report.
Returns (sTitle, HTML).
"""
sTitle = self._sTitle if self._sTitle is not None else type(self).__name__;
sReport = self.generateReportBody();
if not self._fSubReport:
sReport = self.generateNavigator('top') + sReport + self.generateNavigator('bottom');
sTitle = self._oModel.sSubject + ' - ' + sTitle; ## @todo add subject to title in a proper way!
return (sTitle, sReport);
class WuiReportSuccessRate(WuiReportBase):
"""
Generates a report displaying the success rate over time.
"""
def generateReportBody(self):
self._sTitle = 'Success rate';
adPeriods = self._oModel.getSuccessRates();
sReport = '';
oTable = WuiHlpGraphDataTable('Period', [ 'Succeeded', 'Skipped', 'Failed' ]);
#for i in range(len(adPeriods) - 1, -1, -1):
for i in range(len(adPeriods)):
dStatuses = adPeriods[i];
cSuccess = dStatuses[ReportModelBase.ksTestStatus_Success] + dStatuses[ReportModelBase.ksTestStatus_Skipped];
cTotal = cSuccess + dStatuses[ReportModelBase.ksTestStatus_Failure];
sPeriod = self._oModel.getPeriodDesc(i);
if cTotal > 0:
iPctSuccess = dStatuses[ReportModelBase.ksTestStatus_Success] * 100 / cTotal;
iPctSkipped = dStatuses[ReportModelBase.ksTestStatus_Skipped] * 100 / cTotal;
iPctFailure = dStatuses[ReportModelBase.ksTestStatus_Failure] * 100 / cTotal;
oTable.addRow(sPeriod, [ iPctSuccess, iPctSkipped, iPctFailure ],
[ '%s%% (%d)' % (iPctSuccess, dStatuses[ReportModelBase.ksTestStatus_Success]),
'%s%% (%d)' % (iPctSkipped, dStatuses[ReportModelBase.ksTestStatus_Skipped]),
'%s%% (%d)' % (iPctFailure, dStatuses[ReportModelBase.ksTestStatus_Failure]), ]);
else:
oTable.addRow(sPeriod, [ 0, 0, 0 ], [ '0%', '0%', '0%' ]);
cTotalNow = adPeriods[0][ReportModelBase.ksTestStatus_Success];
cTotalNow += adPeriods[0][ReportModelBase.ksTestStatus_Skipped];
cSuccessNow = cTotalNow;
cTotalNow += adPeriods[0][ReportModelBase.ksTestStatus_Failure];
sReport += '<p>Current success rate: ';
if cTotalNow > 0:
sReport += '%s%% (thereof %s%% skipped)</p>\n' \
% ( cSuccessNow * 100 / cTotalNow, adPeriods[0][ReportModelBase.ksTestStatus_Skipped] * 100 / cTotalNow);
else:
sReport += 'N/A</p>\n'
oGraph = WuiHlpBarGraph('success-rate', oTable, self._oDisp);
oGraph.setRangeMax(100);
sReport += oGraph.renderGraph();
return sReport;
class WuiReportFailureReasons(WuiReportBase):
"""
Generates a report displaying the failure reasons over time.
"""
def generateReportBody(self):
# Mockup.
self._sTitle = 'Success rate';
return '<p>Graph showing COUNT(idFailureReason) grouped by time period.</p>' \
'<p>New reasons per period, tracked down to build revision.</p>' \
'<p>Show graph content in table form.</p>';
class WuiReportSummary(WuiReportBase):
"""
Summary report.
"""
def generateReportBody(self):
self._sTitle = 'Summary';
sHtml = '<p>This will display several reports and listings useful to get an overview of %s (id=%s).</p>' \
% (self._oModel.sSubject, self._oModel.aidSubjects,);
oSuccessRate = WuiReportSuccessRate(self._oModel, self._dParams, fSubReport = True,
fnDPrint = self._fnDPrint, oDisp = self._oDisp);
sHtml += oSuccessRate.show()[1];
return sHtml;
| gpl-2.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/site_compare/command_line.py | 179 | 27397 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse a command line, retrieving a command and its arguments.
Supports the concept of command line commands, each with its own set
of arguments. Supports dependent arguments and mutually exclusive arguments.
Basically, a better optparse. I took heed of epg's WHINE() in gvn.cmdline
and dumped optparse in favor of something better.
"""
import os.path
import re
import string
import sys
import textwrap
import types
def IsString(var):
"""Little helper function to see if a variable is a string."""
return type(var) in types.StringTypes
class ParseError(Exception):
"""Encapsulates errors from parsing, string arg is description."""
pass
class Command(object):
"""Implements a single command."""
def __init__(self, names, helptext, validator=None, impl=None):
"""Initializes Command from names and helptext, plus optional callables.
Args:
names: command name, or list of synonyms
helptext: brief string description of the command
validator: callable for custom argument validation
Should raise ParseError if it wants
impl: callable to be invoked when command is called
"""
self.names = names
self.validator = validator
self.helptext = helptext
self.impl = impl
self.args = []
self.required_groups = []
self.arg_dict = {}
self.positional_args = []
self.cmdline = None
class Argument(object):
"""Encapsulates an argument to a command."""
VALID_TYPES = ['string', 'readfile', 'int', 'flag', 'coords']
TYPES_WITH_VALUES = ['string', 'readfile', 'int', 'coords']
def __init__(self, names, helptext, type, metaname,
required, default, positional):
"""Command-line argument to a command.
Args:
names: argument name, or list of synonyms
helptext: brief description of the argument
type: type of the argument. Valid values include:
string - a string
readfile - a file which must exist and be available
for reading
int - an integer
flag - an optional flag (bool)
coords - (x,y) where x and y are ints
metaname: Name to display for value in help, inferred if not
specified
required: True if argument must be specified
default: Default value if not specified
positional: Argument specified by location, not name
Raises:
ValueError: the argument name is invalid for some reason
"""
if type not in Command.Argument.VALID_TYPES:
raise ValueError("Invalid type: %r" % type)
if required and default is not None:
raise ValueError("required and default are mutually exclusive")
if required and type == 'flag':
raise ValueError("A required flag? Give me a break.")
if metaname and type not in Command.Argument.TYPES_WITH_VALUES:
raise ValueError("Type %r can't have a metaname" % type)
# If no metaname is provided, infer it: use the alphabetical characters
# of the last provided name
if not metaname and type in Command.Argument.TYPES_WITH_VALUES:
metaname = (
names[-1].lstrip(string.punctuation + string.whitespace).upper())
self.names = names
self.helptext = helptext
self.type = type
self.required = required
self.default = default
self.positional = positional
self.metaname = metaname
self.mutex = [] # arguments that are mutually exclusive with
# this one
self.depends = [] # arguments that must be present for this
# one to be valid
self.present = False # has this argument been specified?
def AddDependency(self, arg):
"""Makes this argument dependent on another argument.
Args:
arg: name of the argument this one depends on
"""
if arg not in self.depends:
self.depends.append(arg)
def AddMutualExclusion(self, arg):
"""Makes this argument invalid if another is specified.
Args:
arg: name of the mutually exclusive argument.
"""
if arg not in self.mutex:
self.mutex.append(arg)
def GetUsageString(self):
"""Returns a brief string describing the argument's usage."""
if not self.positional:
string = self.names[0]
if self.type in Command.Argument.TYPES_WITH_VALUES:
string += "="+self.metaname
else:
string = self.metaname
if not self.required:
string = "["+string+"]"
return string
def GetNames(self):
"""Returns a string containing a list of the arg's names."""
if self.positional:
return self.metaname
else:
return ", ".join(self.names)
def GetHelpString(self, width=80, indent=5, names_width=20, gutter=2):
"""Returns a help string including help for all the arguments."""
names = [" "*indent + line +" "*(names_width-len(line)) for line in
textwrap.wrap(self.GetNames(), names_width)]
helpstring = textwrap.wrap(self.helptext, width-indent-names_width-gutter)
if len(names) < len(helpstring):
names += [" "*(indent+names_width)]*(len(helpstring)-len(names))
if len(helpstring) < len(names):
helpstring += [""]*(len(names)-len(helpstring))
return "\n".join([name_line + " "*gutter + help_line for
name_line, help_line in zip(names, helpstring)])
def __repr__(self):
if self.present:
string = '= %r' % self.value
else:
string = "(absent)"
return "Argument %s '%s'%s" % (self.type, self.names[0], string)
# end of nested class Argument
def AddArgument(self, names, helptext, type="string", metaname=None,
required=False, default=None, positional=False):
"""Command-line argument to a command.
Args:
names: argument name, or list of synonyms
helptext: brief description of the argument
type: type of the argument
metaname: Name to display for value in help, inferred if not
required: True if argument must be specified
default: Default value if not specified
positional: Argument specified by location, not name
Raises:
ValueError: the argument already exists or is invalid
Returns:
The newly-created argument
"""
if IsString(names): names = [names]
names = [name.lower() for name in names]
for name in names:
if name in self.arg_dict:
raise ValueError("%s is already an argument"%name)
if (positional and required and
[arg for arg in self.args if arg.positional] and
not [arg for arg in self.args if arg.positional][-1].required):
raise ValueError(
"A required positional argument may not follow an optional one.")
arg = Command.Argument(names, helptext, type, metaname,
required, default, positional)
self.args.append(arg)
for name in names:
self.arg_dict[name] = arg
return arg
def GetArgument(self, name):
"""Return an argument from a name."""
return self.arg_dict[name.lower()]
def AddMutualExclusion(self, args):
"""Specifies that a list of arguments are mutually exclusive."""
if len(args) < 2:
raise ValueError("At least two arguments must be specified.")
args = [arg.lower() for arg in args]
for index in xrange(len(args)-1):
for index2 in xrange(index+1, len(args)):
self.arg_dict[args[index]].AddMutualExclusion(self.arg_dict[args[index2]])
def AddDependency(self, dependent, depends_on):
"""Specifies that one argument may only be present if another is.
Args:
dependent: the name of the dependent argument
depends_on: the name of the argument on which it depends
"""
self.arg_dict[dependent.lower()].AddDependency(
self.arg_dict[depends_on.lower()])
def AddMutualDependency(self, args):
"""Specifies that a list of arguments are all mutually dependent."""
if len(args) < 2:
raise ValueError("At least two arguments must be specified.")
args = [arg.lower() for arg in args]
for (arg1, arg2) in [(arg1, arg2) for arg1 in args for arg2 in args]:
if arg1 == arg2: continue
self.arg_dict[arg1].AddDependency(self.arg_dict[arg2])
def AddRequiredGroup(self, args):
"""Specifies that at least one of the named arguments must be present."""
if len(args) < 2:
raise ValueError("At least two arguments must be in a required group.")
args = [self.arg_dict[arg.lower()] for arg in args]
self.required_groups.append(args)
def ParseArguments(self):
"""Given a command line, parse and validate the arguments."""
# reset all the arguments before we parse
for arg in self.args:
arg.present = False
arg.value = None
self.parse_errors = []
# look for arguments remaining on the command line
while len(self.cmdline.rargs):
try:
self.ParseNextArgument()
except ParseError, e:
self.parse_errors.append(e.args[0])
# after all the arguments are parsed, check for problems
for arg in self.args:
if not arg.present and arg.required:
self.parse_errors.append("'%s': required parameter was missing"
% arg.names[0])
if not arg.present and arg.default:
arg.present = True
arg.value = arg.default
if arg.present:
for mutex in arg.mutex:
if mutex.present:
self.parse_errors.append(
"'%s', '%s': arguments are mutually exclusive" %
(arg.argstr, mutex.argstr))
for depend in arg.depends:
if not depend.present:
self.parse_errors.append("'%s': '%s' must be specified as well" %
(arg.argstr, depend.names[0]))
# check for required groups
for group in self.required_groups:
if not [arg for arg in group if arg.present]:
self.parse_errors.append("%s: at least one must be present" %
(", ".join(["'%s'" % arg.names[-1] for arg in group])))
# if we have any validators, invoke them
if not self.parse_errors and self.validator:
try:
self.validator(self)
except ParseError, e:
self.parse_errors.append(e.args[0])
# Helper methods so you can treat the command like a dict
def __getitem__(self, key):
arg = self.arg_dict[key.lower()]
if arg.type == 'flag':
return arg.present
else:
return arg.value
def __iter__(self):
return [arg for arg in self.args if arg.present].__iter__()
def ArgumentPresent(self, key):
"""Tests if an argument exists and has been specified."""
return key.lower() in self.arg_dict and self.arg_dict[key.lower()].present
def __contains__(self, key):
return self.ArgumentPresent(key)
def ParseNextArgument(self):
"""Find the next argument in the command line and parse it."""
arg = None
value = None
argstr = self.cmdline.rargs.pop(0)
# First check: is this a literal argument?
if argstr.lower() in self.arg_dict:
arg = self.arg_dict[argstr.lower()]
if arg.type in Command.Argument.TYPES_WITH_VALUES:
if len(self.cmdline.rargs):
value = self.cmdline.rargs.pop(0)
# Second check: is this of the form "arg=val" or "arg:val"?
if arg is None:
delimiter_pos = -1
for delimiter in [':', '=']:
pos = argstr.find(delimiter)
if pos >= 0:
if delimiter_pos < 0 or pos < delimiter_pos:
delimiter_pos = pos
if delimiter_pos >= 0:
testarg = argstr[:delimiter_pos]
testval = argstr[delimiter_pos+1:]
if testarg.lower() in self.arg_dict:
arg = self.arg_dict[testarg.lower()]
argstr = testarg
value = testval
# Third check: does this begin an argument?
if arg is None:
for key in self.arg_dict.iterkeys():
if (len(key) < len(argstr) and
self.arg_dict[key].type in Command.Argument.TYPES_WITH_VALUES and
argstr[:len(key)].lower() == key):
value = argstr[len(key):]
argstr = argstr[:len(key)]
arg = self.arg_dict[argstr]
# Fourth check: do we have any positional arguments available?
if arg is None:
for positional_arg in [
testarg for testarg in self.args if testarg.positional]:
if not positional_arg.present:
arg = positional_arg
value = argstr
argstr = positional_arg.names[0]
break
# Push the retrieved argument/value onto the largs stack
if argstr: self.cmdline.largs.append(argstr)
if value: self.cmdline.largs.append(value)
# If we've made it this far and haven't found an arg, give up
if arg is None:
raise ParseError("Unknown argument: '%s'" % argstr)
# Convert the value, if necessary
if arg.type in Command.Argument.TYPES_WITH_VALUES and value is None:
raise ParseError("Argument '%s' requires a value" % argstr)
if value is not None:
value = self.StringToValue(value, arg.type, argstr)
arg.argstr = argstr
arg.value = value
arg.present = True
# end method ParseNextArgument
def StringToValue(self, value, type, argstr):
"""Convert a string from the command line to a value type."""
try:
if type == 'string':
pass # leave it be
elif type == 'int':
try:
value = int(value)
except ValueError:
raise ParseError
elif type == 'readfile':
if not os.path.isfile(value):
raise ParseError("'%s': '%s' does not exist" % (argstr, value))
elif type == 'coords':
try:
value = [int(val) for val in
re.match("\(\s*(\d+)\s*\,\s*(\d+)\s*\)\s*\Z", value).
groups()]
except AttributeError:
raise ParseError
else:
raise ValueError("Unknown type: '%s'" % type)
except ParseError, e:
# The bare exception is raised in the generic case; more specific errors
# will arrive with arguments and should just be reraised
if not e.args:
e = ParseError("'%s': unable to convert '%s' to type '%s'" %
(argstr, value, type))
raise e
return value
def SortArgs(self):
"""Returns a method that can be passed to sort() to sort arguments."""
def ArgSorter(arg1, arg2):
"""Helper for sorting arguments in the usage string.
Positional arguments come first, then required arguments,
then optional arguments. Pylint demands this trivial function
have both Args: and Returns: sections, sigh.
Args:
arg1: the first argument to compare
arg2: the second argument to compare
Returns:
-1 if arg1 should be sorted first, +1 if it should be sorted second,
and 0 if arg1 and arg2 have the same sort level.
"""
return ((arg2.positional-arg1.positional)*2 +
(arg2.required-arg1.required))
return ArgSorter
def GetUsageString(self, width=80, name=None):
"""Gets a string describing how the command is used."""
if name is None: name = self.names[0]
initial_indent = "Usage: %s %s " % (self.cmdline.prog, name)
subsequent_indent = " " * len(initial_indent)
sorted_args = self.args[:]
sorted_args.sort(self.SortArgs())
return textwrap.fill(
" ".join([arg.GetUsageString() for arg in sorted_args]), width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
def GetHelpString(self, width=80):
"""Returns a list of help strings for all this command's arguments."""
sorted_args = self.args[:]
sorted_args.sort(self.SortArgs())
return "\n".join([arg.GetHelpString(width) for arg in sorted_args])
# end class Command
class CommandLine(object):
"""Parse a command line, extracting a command and its arguments."""
def __init__(self):
self.commands = []
self.cmd_dict = {}
# Add the help command to the parser
help_cmd = self.AddCommand(["help", "--help", "-?", "-h"],
"Displays help text for a command",
ValidateHelpCommand,
DoHelpCommand)
help_cmd.AddArgument(
"command", "Command to retrieve help for", positional=True)
help_cmd.AddArgument(
"--width", "Width of the output", type='int', default=80)
self.Exit = sys.exit # override this if you don't want the script to halt
# on error or on display of help
self.out = sys.stdout # override these if you want to redirect
self.err = sys.stderr # output or error messages
def AddCommand(self, names, helptext, validator=None, impl=None):
"""Add a new command to the parser.
Args:
names: command name, or list of synonyms
helptext: brief string description of the command
validator: method to validate a command's arguments
impl: callable to be invoked when command is called
Raises:
ValueError: raised if command already added
Returns:
The new command
"""
if IsString(names): names = [names]
for name in names:
if name in self.cmd_dict:
raise ValueError("%s is already a command"%name)
cmd = Command(names, helptext, validator, impl)
cmd.cmdline = self
self.commands.append(cmd)
for name in names:
self.cmd_dict[name.lower()] = cmd
return cmd
def GetUsageString(self):
"""Returns simple usage instructions."""
return "Type '%s help' for usage." % self.prog
def ParseCommandLine(self, argv=None, prog=None, execute=True):
"""Does the work of parsing a command line.
Args:
argv: list of arguments, defaults to sys.args[1:]
prog: name of the command, defaults to the base name of the script
execute: if false, just parse, don't invoke the 'impl' member
Returns:
The command that was executed
"""
if argv is None: argv = sys.argv[1:]
if prog is None: prog = os.path.basename(sys.argv[0]).split('.')[0]
# Store off our parameters, we may need them someday
self.argv = argv
self.prog = prog
# We shouldn't be invoked without arguments, that's just lame
if not len(argv):
self.out.writelines(self.GetUsageString())
self.Exit()
return None # in case the client overrides Exit
# Is it a valid command?
self.command_string = argv[0].lower()
if not self.command_string in self.cmd_dict:
self.err.write("Unknown command: '%s'\n\n" % self.command_string)
self.out.write(self.GetUsageString())
self.Exit()
return None # in case the client overrides Exit
self.command = self.cmd_dict[self.command_string]
# "rargs" = remaining (unparsed) arguments
# "largs" = already parsed, "left" of the read head
self.rargs = argv[1:]
self.largs = []
# let the command object do the parsing
self.command.ParseArguments()
if self.command.parse_errors:
# there were errors, output the usage string and exit
self.err.write(self.command.GetUsageString()+"\n\n")
self.err.write("\n".join(self.command.parse_errors))
self.err.write("\n\n")
self.Exit()
elif execute and self.command.impl:
self.command.impl(self.command)
return self.command
def __getitem__(self, key):
return self.cmd_dict[key]
def __iter__(self):
return self.cmd_dict.__iter__()
def ValidateHelpCommand(command):
"""Checks to make sure an argument to 'help' is a valid command."""
if 'command' in command and command['command'] not in command.cmdline:
raise ParseError("'%s': unknown command" % command['command'])
def DoHelpCommand(command):
"""Executed when the command is 'help'."""
out = command.cmdline.out
width = command['--width']
if 'command' not in command:
out.write(command.GetUsageString())
out.write("\n\n")
indent = 5
gutter = 2
command_width = (
max([len(cmd.names[0]) for cmd in command.cmdline.commands]) + gutter)
for cmd in command.cmdline.commands:
cmd_name = cmd.names[0]
initial_indent = (" "*indent + cmd_name + " "*
(command_width+gutter-len(cmd_name)))
subsequent_indent = " "*(indent+command_width+gutter)
out.write(textwrap.fill(cmd.helptext, width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent))
out.write("\n")
out.write("\n")
else:
help_cmd = command.cmdline[command['command']]
out.write(textwrap.fill(help_cmd.helptext, width))
out.write("\n\n")
out.write(help_cmd.GetUsageString(width=width))
out.write("\n\n")
out.write(help_cmd.GetHelpString(width=width))
out.write("\n")
command.cmdline.Exit()
def main():
# If we're invoked rather than imported, run some tests
cmdline = CommandLine()
# Since we're testing, override Exit()
def TestExit():
pass
cmdline.Exit = TestExit
# Actually, while we're at it, let's override error output too
cmdline.err = open(os.path.devnull, "w")
test = cmdline.AddCommand(["test", "testa", "testb"], "test command")
test.AddArgument(["-i", "--int", "--integer", "--optint", "--optionalint"],
"optional integer parameter", type='int')
test.AddArgument("--reqint", "required integer parameter", type='int',
required=True)
test.AddArgument("pos1", "required positional argument", positional=True,
required=True)
test.AddArgument("pos2", "optional positional argument", positional=True)
test.AddArgument("pos3", "another optional positional arg",
positional=True)
# mutually dependent arguments
test.AddArgument("--mutdep1", "mutually dependent parameter 1")
test.AddArgument("--mutdep2", "mutually dependent parameter 2")
test.AddArgument("--mutdep3", "mutually dependent parameter 3")
test.AddMutualDependency(["--mutdep1", "--mutdep2", "--mutdep3"])
# mutually exclusive arguments
test.AddArgument("--mutex1", "mutually exclusive parameter 1")
test.AddArgument("--mutex2", "mutually exclusive parameter 2")
test.AddArgument("--mutex3", "mutually exclusive parameter 3")
test.AddMutualExclusion(["--mutex1", "--mutex2", "--mutex3"])
# dependent argument
test.AddArgument("--dependent", "dependent argument")
test.AddDependency("--dependent", "--int")
# other argument types
test.AddArgument("--file", "filename argument", type='readfile')
test.AddArgument("--coords", "coordinate argument", type='coords')
test.AddArgument("--flag", "flag argument", type='flag')
test.AddArgument("--req1", "part of a required group", type='flag')
test.AddArgument("--req2", "part 2 of a required group", type='flag')
test.AddRequiredGroup(["--req1", "--req2"])
# a few failure cases
exception_cases = """
test.AddArgument("failpos", "can't have req'd pos arg after opt",
positional=True, required=True)
+++
test.AddArgument("--int", "this argument already exists")
+++
test.AddDependency("--int", "--doesntexist")
+++
test.AddMutualDependency(["--doesntexist", "--mutdep2"])
+++
test.AddMutualExclusion(["--doesntexist", "--mutex2"])
+++
test.AddArgument("--reqflag", "required flag", required=True, type='flag')
+++
test.AddRequiredGroup(["--req1", "--doesntexist"])
"""
for exception_case in exception_cases.split("+++"):
try:
exception_case = exception_case.strip()
exec exception_case # yes, I'm using exec, it's just for a test.
except ValueError:
# this is expected
pass
except KeyError:
# ...and so is this
pass
else:
print ("FAILURE: expected an exception for '%s'"
" and didn't get it" % exception_case)
# Let's do some parsing! first, the minimal success line:
MIN = "test --reqint 123 param1 --req1 "
# tuples of (command line, expected error count)
test_lines = [
("test --int 3 foo --req1", 1), # missing required named parameter
("test --reqint 3 --req1", 1), # missing required positional parameter
(MIN, 0), # success!
("test param1 --reqint 123 --req1", 0), # success, order shouldn't matter
("test param1 --reqint 123 --req2", 0), # success, any of required group ok
(MIN+"param2", 0), # another positional parameter is okay
(MIN+"param2 param3", 0), # and so are three
(MIN+"param2 param3 param4", 1), # but four are just too many
(MIN+"--int", 1), # where's the value?
(MIN+"--int 456", 0), # this is fine
(MIN+"--int456", 0), # as is this
(MIN+"--int:456", 0), # and this
(MIN+"--int=456", 0), # and this
(MIN+"--file c:\\windows\\system32\\kernel32.dll", 0), # yup
(MIN+"--file c:\\thisdoesntexist", 1), # nope
(MIN+"--mutdep1 a", 2), # no!
(MIN+"--mutdep2 b", 2), # also no!
(MIN+"--mutdep3 c", 2), # dream on!
(MIN+"--mutdep1 a --mutdep2 b", 2), # almost!
(MIN+"--mutdep1 a --mutdep2 b --mutdep3 c", 0), # yes
(MIN+"--mutex1 a", 0), # yes
(MIN+"--mutex2 b", 0), # yes
(MIN+"--mutex3 c", 0), # fine
(MIN+"--mutex1 a --mutex2 b", 1), # not fine
(MIN+"--mutex1 a --mutex2 b --mutex3 c", 3), # even worse
(MIN+"--dependent 1", 1), # no
(MIN+"--dependent 1 --int 2", 0), # ok
(MIN+"--int abc", 1), # bad type
(MIN+"--coords abc", 1), # also bad
(MIN+"--coords (abc)", 1), # getting warmer
(MIN+"--coords (abc,def)", 1), # missing something
(MIN+"--coords (123)", 1), # ooh, so close
(MIN+"--coords (123,def)", 1), # just a little farther
(MIN+"--coords (123,456)", 0), # finally!
("test --int 123 --reqint=456 foo bar --coords(42,88) baz --req1", 0)
]
badtests = 0
for (test, expected_failures) in test_lines:
cmdline.ParseCommandLine([x.strip() for x in test.strip().split(" ")])
if not len(cmdline.command.parse_errors) == expected_failures:
print "FAILED:\n issued: '%s'\n expected: %d\n received: %d\n\n" % (
test, expected_failures, len(cmdline.command.parse_errors))
badtests += 1
print "%d failed out of %d tests" % (badtests, len(test_lines))
cmdline.ParseCommandLine(["help", "test"])
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
buaakq/codezero | tools/cml2-tools/autoconfigure.py | 7 | 32845 | #!/usr/bin/env python
#
# linux/scripts/autoconfigure.py : Automagical Kernel Configuration.
#
# Copyright (C) 2000-2002 Eric S. Raymond <[email protected]>
# This is free software, see GNU General Public License 2 for details.
#
# This script tries to autoconfigure the Linux kernel, detecting the
# hardware (devices, ...) and software (protocols, filesystems, ...).
# It uses soft detection: no direct IO access to unknown devices, thus
# it is always safe to run this script and it never hangs, but it cannot
# detect all hardware (mainly misses some very old hardware). You don't
# need root, but you will need a CML2 rulebase handy.
#
# Most of the smarts in this script is in the file of probe rules
# maintained by Giacomo Catenazzi and brought in by execfile.
import sys, getopt, os, glob, commands, re
import cml, cmlsystem
from cml import y, m, n # For use in the autoprobe rules
lang = {
"COMPLETE":"Configuration complete.",
"COMPLEMENT":"* Computing complement sets",
"DERIVED":"Symbol %s is derived and cannot be set.",
"DONE":"Done",
"EFFECTS":"Side effects:",
"NOCMDLINE":"%s is the wrong type to be set from the command line",
"OPTUNKNOWN":"autoconfigure: unknown option.\n",
"ROOTFS":"* %s will be hard-compiled in for the root filesystem\n",
"ROOTHW":"* %s will be hard-compiled in to run the root device\n",
"ROOTLOOK":"# Looking for your root filesystem...\n",
"ROOTWARN":"** Warning: I could not identify the " \
"bus type of your root drive!\n",
"SETFAIL" : "%s failed while %s was being set to %s\n",
"SYMUNKNOWN":"cmlconfigure: unknown symbol %s\n",
"TURNOFF":"# Turning off unprobed device symbols",
"UNAME":"Can't determine ARCH, uname failed.",
}
class ConfigFile:
"Object that represents a generated configuration."
def __init__(self, myconfiguration, hardcompile, debuglevel=0):
# Prepare an output object to accept the configuration file
self.hardcompile = hardcompile
self.myconfiguration = myconfiguration
myconfiguration.debug = debuglevel
self.modified = {}
self.emitted = {}
if debuglevel:
sys.stderr.write("* Debug level %d" % debuglevel)
# 'found' sets the value 'y/m' (driver detected)
# 'found_y' sets the value 'y' (driver detected, forces built-in)
# 'found_m' sets the value 'm' (driver detected, build as module)
# 'found_n' sets the value 'n' (driver not needed)
#
# The priority is: y > m > n > 'other'
def found(self, symbol, val=None, label=None):
if type(symbol) == type(""):
symbol = self.myconfiguration.dictionary.get(symbol)
# Ignore obsolete symbols
if not symbol:
return
# Ignore attempts to set derived symbols. Some autoprobes
# do this because they were composed in ignorance of the rulebase.
elif symbol.is_derived():
return
# If no value specified, play some tricks.
if val == None:
if symbol.type=="bool" or (self.hardcompile and symbol.type=="trit"):
val = cml.y
elif symbol.type == "trit":
val = cml.m
elif symbol.is_numeric():
val = 0
elif symbol.type == "string":
val = ""
if not self.modified.has_key(symbol) or symbol.eval() < val:
self.myconfiguration.set_symbol(symbol, val)
self.modified[symbol] = 1
(ok, effects, violations) = self.myconfiguration.set_symbol(symbol, val)
if ok:
if label:
symbol.setprop(label)
else:
for violation in violations:
sys.stderr.write(lang["SETFAIL"] % (`violation`, symbol.name, val))
def found_y(self, var, label=None): self.found(var, cml.y, label)
def found_m(self, var, label=None): self.found(var, cml.m, label)
def found_n(self, var, label=None): self.found(var, cml.n, label)
def yak(self, symbol):
if not self.emitted.has_key(symbol):
try:
entry = self.myconfiguration.dictionary[symbol]
if entry.prompt:
sys.stderr.write("* " + symbol + ": " + entry.prompt + "\n")
self.emitted[symbol] = 1
except KeyError:
sys.stderr.write("! Obsolete symbol: " + symbol + "\n")
def complement(self, symbol, value, baton, label):
"Force a complement set to a specified value."
symbol = self.myconfiguration.dictionary[symbol]
if not symbol.eval():
return
for driver in self.myconfiguration.dictionary.values():
if baton: baton.twirl()
if driver.is_symbol() and driver.is_logical() \
and self.myconfiguration.is_visible(driver) \
and driver.setcount == 0 \
and symbol.ancestor_of(driver):
set_to = value
if driver.type == "bool" and value == cml.m:
set_to = cml.y
self.found(driver.name, set_to, label)
def force_dependents_modular(self, symbol, legend):
"Force all trit-valued dependents of a symbol to be modular."
net_ethernet = self.myconfiguration.dictionary[symbol]
for driver in self.myconfiguration.dictionary.values():
if driver.is_symbol() and driver.type == "trit" \
and driver.eval() == cml.y \
and self.myconfiguration.is_visible(driver) \
and net_ethernet.ancestor_of(driver):
driver.setprop(legend)
self.found(driver, cml.m)
def enabled(self, symbol):
"Is a given symbol enabled?"
return self.myconfiguration.dictionary[symbol]
# Now define classes for probing and reporting the system state
class PCIDevice:
"Identification data for a device on the PCI bus."
def __init__(self, procdata):
"Initialize PCI device ID data based on what's in a /proc entry."
procdata = map(ord, procdata)
self.vendor = "%02x%02x" % (procdata[1], procdata[0])
self.device = "%02x%02x" % (procdata[3], procdata[2])
if procdata[14]:
self.subvendor = None
self.subdevice = None
else:
self.subvendor = "%02x%02x" % (procdata[45], procdata[44])
self.subdevice = "%02x%02x" % (procdata[47], procdata[46])
self.revision = "%02x" % procdata[8]
self.deviceclass = "%02x%02x" % (procdata[11], procdata[10])
self.interface = "%02x" % procdata[9]
# Here is the digest format:
# "pci: xxxx,yyyy,zz:Class:aabb,cc" or
# "pci: xxxx,yyyy,ssss,rrrr,zz:Class:aabbb,cc"
# where: xxxx,yyyy: the vendor and device id
# ssss,rrrr: the sub-vendor and sub-device id
# zz: revision
# aabb,cc: Device Class, Interface
self.digest = self.vendor + "," + self.device
if self.subvendor:
self.digest += "," + self.subvendor + "," + self.subdevice
self.digest += ",%s;Class:%s,%s\n" % (self.revision,self.deviceclass,self.interface)
def __repr__(self):
return "pci: " + self.digest
class PCIScanner:
"Encapsulate the PCI hardware registry state."
def __init__(self):
"Unpack data from the PCI hardware registry."
self.devices = []
for f in glob.glob("/proc/bus/pci/??/*"):
dfp = open(f)
self.devices.append(PCIDevice(dfp.read()))
dfp.close()
def search(self, pattern):
"Search for a device match by prefix in the digest."
pattern = re.compile(pattern, re.I)
return not not filter(lambda x, p=pattern: p.search(x.digest), self.devices)
def __repr__(self):
return "".join(map(repr, self.devices))
class FieldParser:
"Parse entire lines, or a given field, out of a file or command output."
def __init__(self, sources):
self.items = []
for item in sources:
if type(item) == type(()):
file = item[0]
field = item[1]
else:
file = item
field = None
try:
if file[0] == '/':
ifp = open(file, "r")
lines = ifp.readlines()
ifp.close()
else:
(status, output) = commands.getstatusoutput(file)
if status:
raise IOError
lines = output.split("\n")
except IOError:
continue
# No field specified, capture entire line
if not field:
self.items += lines
# Numeric (1-origin) field index, capture that
# space-separated field.
elif type(field) == type(0):
for line in lines:
fields = line.split()
if len(fields) >= field and fields[field-1] not in self.items:
self.items.append(fields[field-1])
# Regexp specified, collect group 1
else:
for line in lines:
lookfor = re.compile(field)
match = lookfor.search(line)
if match:
res = match.group(1)
if res not in self.items:
self.items.append(res)
def find(self, str, ind=0):
"Is given string or regexp pattern found in the file?"
match = re.compile(str)
result = filter(lambda x: x, map(lambda x, ma=match: ma.search(x), self.items))
if result:
result = result[ind]
if result.groups():
result = ",".join(result.groups())
return result
def __repr__(self):
return `self.items`
#
# Main sequence begins here
#
def get_arch():
# Get the architecture (taken from top-level Unix makefile).
(error, ARCH) = commands.getstatusoutput('uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/')
if error:
sys.stderr.write(lang["UNAME"])
raise SystemExit, 1
# A platform symbol has to be set, otherwise many assignments will fail
ARCHSYMBOL = re.compile("i.86").sub("x86", ARCH)
ARCHSYMBOL = ARCHSYMBOL.replace("superh", "sh")
ARCHSYMBOL = ARCHSYMBOL.replace("sparc32", "sparc")
ARCHSYMBOL = ARCHSYMBOL.replace("sparc64", "sparc")
ARCHSYMBOL = ARCHSYMBOL.upper()
return(ARCH, ARCHSYMBOL)
# We can't assume 2.1 nested scopes, so refer shared stuff to global level.
config = cpu = cpu_id = pci = isapnp = mca = usbp = usbc = usbi = None
fs = devices = m_devices = misc = net = ide = dmesg = None
modules = cpu_latch = None
fsmap = {}
reliable = {}
def autoconfigure(configuration, hardcompile, debuglevel):
global config, cpu, cpu_id, pci, isapnp, mca, usbp, usbc, usbi, fs
global devices, m_devices, misc, net, ide, dmesg, modules, cpu_latch
global fsmap, reliable
configuration.interactive = 0 # Don't deduce from visibility.
config = ConfigFile(configuration, hardcompile, debuglevel)
#
# Here is where we query the system state.
#
(ARCH, ARCHSYMBOL) = get_arch()
config.found_y(ARCHSYMBOL)
config.yak(ARCHSYMBOL)
# Get the processor type
cpu = FieldParser(("/proc/cpuinfo",))
if ARCHSYMBOL == 'SPARC':
processors = int(cpu.find("^ncpus active.*: *([0-9]*)"))
vendor = cpu.find("^cpu.*: *(.*)")
cpufam = cpu.find("^type.*: *([-A-Za-z0-9_]*)")
mod = cpu.find("^fpu.*: *(.*)")
name = cpu.find("^MMU Type.*: *(.*)")
else:
processors = int(cpu.find("^processor.*: *([0-9]*)", -1)) + 1
vendor = cpu.find("^vendor_id.*: *([-A-Za-z0-9_]*)")
cpufam = cpu.find("^cpu family.*: *([-A-Za-z0-9_]*)")
mod = cpu.find("^model.*: *([-A-Za-z0-9_]*)")
name = cpu.find("^model name.*: *(.*)")
cpu_id = vendor + ":" + cpufam + ":" + mod + ":" + name
cpu_latch = 0
# Now query for features
pci = PCIScanner()
isapnp = FieldParser((("/proc/bus/isapnp/devices", 2),))
mca = FieldParser(("/proc/mca/pos",))
usbp = FieldParser((("/proc/bus/usb/devices", "^P:.*Vendor=([A-Fa-f0-9]*)\s.*ProdID=\([A-Fa-f0-9]*\)"),))
usbc = FieldParser((("/proc/bus/usb/devices", "^D:.*Cls=([A-Fa-f0-9]*)[^A-Fa-f0-9].*Sub=([A-Fa-f0-9]*)[^A-Fa-f0-9].*Prot=([A-Fa-f0-9]*)"),))
usbi = FieldParser((("/proc/bus/usb/devices", "^I:.*Cls=([A-Fa-f0-9]*)[^A-Fa-f0-9].*Sub=([A-Fa-f0-9]*)[^A-Fa-f0-9].*Prot=([A-Fa-f0-9]*)"),))
fs = FieldParser((("/proc/mounts",3),
("/etc/mtab", 3),
("/etc/fstab", 3)))
devices = FieldParser((("/proc/devices", "[0-9]+ (.*)"),))
m_devices = FieldParser((("/proc/misc", "[0-9]+ (.*)"),))
misc = FieldParser(("/proc/iomem", "/proc/ioports", "/proc/dma", "/proc/interrupts"))
net = FieldParser((("/proc/net/sockstat","^([A-Z0-9]*): inuse [1-9]"),))
ide = FieldParser(glob.glob('/proc/ide/hd?/media'))
dmesg = FieldParser(("/var/log/dmesg", "dmesg"))
modules = FieldParser((("/proc/modules", 1),))
#
# Tests that won't fit in the rulesfile format
#
# Source: linux/i386/kernel/setup.c
if dmesg.find("Use a PAE"):
config.found_y("HIGHMEM64G")
elif dmesg.find("Use a HIGHMEM"):
config.found_y("HIGHMEM4G") ##Source: linux/i386/kernel/setup.c
else:
highmem = dmesg.find("([0-9]*)MB HIGHMEM avail.")
if not highmem:
config.found_y("NOHIGHMEM")
elif int(highmem) > 3072:
config.found_y("HIGHMEM64G")
else:
config.found_y("HIGHMEM4G")
# SMP? This test is reliable.
if processors == 0:
processors = len(filter(lambda x: x.find('processor') > -1, cpu.items))
if processors > 1:
config.found_y("SMP")
config.yak("SMP")
fsmap = {}
reliable = {}
#
# Here are the function calls used by the rules file
#
TRUE = 1
FALSE = 0
PRESENT = 1
ABSENT = 0
def DEBUG(str):
sys.stderr.write("# " + str + "\n")
# Following three tests are reliable -- that is, if PCI or PNP
# tests fail we know the feature is *not* there.
def PCI(prefix, symbol):
global pci, config
reliable[symbol] = "PCI"
if pci.search("^" + prefix):
config.yak(symbol)
config.found(symbol, None, "PCI")
def PCI_CLASS(match, symbol):
global pci, config
reliable[symbol] = "PCI_CLASS"
if pci.search("Class:" + match):
config.yak(symbol)
config.found(symbol, None, "PCI_CLASS")
def PNP(match, symbol):
global isapnp, config
reliable[symbol] = "PNP"
if isapnp.find(match):
config.yak(symbol)
config.found(symbol, None, "PNP")
def MCA(match, symbol):
global mca, config
reliable[symbol] = "MCA"
# FIXME: Not certain I've got the byte order right here
if mca.find(": " + match[2:] + " " + match[:2]):
config.yak(symbol)
config.found(symbol, None, "MCA")
# USB tests reliably detect connected devices, but the bus is hot-plug.
def USBP(match, symbol):
global usbp, config
if usbp.find(match):
config.yak(symbol)
config.found(symbol, None, "USBP")
def USBC(match, symbol):
global usbc, config
if usbc.find(match):
config.yak(symbol)
config.found(symbol, None, "USBC")
def USBI(match, symbol):
global usbi, config
if usbi.find(match):
config.yak(symbol)
config.found(symbol, None, "USBI")
# Remaining tests rely on prior kernel configuration.
def FS(match, symbol):
global fs, fsmap, config
if fs.find(r"\b" + match + r"\b"):
config.yak(symbol)
config.found(symbol, None, "FS")
# Also, build the map of file system types to symbols.
fsmap[match] = symbol
def DEV(match, symbol):
global devices, config
if devices.find(r"\b" + match + r"\b"):
config.yak(symbol)
config.found(symbol, None, "DEV")
def DEVM(match, symbol):
global m_devices, config
if m_devices.find(r"\b" + match + r"\b"):
config.yak(symbol)
config.found(symbol, None, "DEV_M")
def CONS(match, symbol):
global dmesg, config
if dmesg.find("^Console: .* " + match + " "):
config.yak(symbol)
config.found(symbol, None, "CONS")
def DMESG(match, symbol, truthval=TRUE):
global dmesg, config
if dmesg.find(match):
if truthval:
config.found(symbol, None, "DMESG")
config.yak(symbol)
else:
config.found_n(symbol, "DMESG")
def NET(match, symbol):
global net, config
if net.find(match):
config.yak(symbol)
config.found(symbol, None, "NET")
def IDE(match, symbol):
global ide, config
if ide.find(match):
config.yak(symbol)
config.found(symbol, None, "IDE")
def REQ(match, symbol):
global misc, config
if misc.find(match):
config.yak(symbol)
config.found(symbol, None, "REQ")
def CPUTYPE(match, symbol):
global cpu_latch, config
if not cpu_latch and re.search(match, cpu_id):
config.found_y(symbol, "CPUTYPE")
config.yak(symbol)
cpu_latch = 1
def CPUINFO(match, symbol, present=PRESENT, truthval=cml.y):
global cpu, config
if (not not cpu.find(match)) == present:
config.found(symbol, truthval, "CPUINFO")
if truthval:
config.yak(symbol)
def EXISTS(procfile, symbol):
global config
if os.path.exists(procfile):
config.found(symbol, None, "EXISTS")
config.yak(symbol)
else:
config.found(symbol, n, "EXISTS")
def MODULE(name, symbol):
global modules, config
if modules.find(r"\b" + name + r"\b"):
config.found(symbol, None, "MODULES")
config.yak(symbol)
def GREP(pattern, file, symbol):
global config
try:
fp = open(file)
except IOError:
return
if re.compile(pattern).search(fp.read()):
config.found(symbol, None, "GREP")
config.yak(symbol)
fp.close()
def LINKTO(file, pattern, symbol):
global config
if not os.path.exists(file):
return
file = os.readlink(file)
if re.compile(pattern).search(file):
config.found(symbol, None, "LINKTO")
config.yak(symbol)
# Use this to avoid conflicts
def PRIORITY(symbols, cnf=configuration):
global config
legend = "PRIORITY" + `symbols`
dict = cnf.dictionary
symbols = map(lambda x, d=dict: d[x], symbols)
for i in range(len(symbols) - 1):
if cml.evaluate(symbols[i]):
for j in range(i+1, len(symbols)):
cnf.set_symbol(symbols[j], n)
symbols[j].setprop(legend)
break
########################################################################
##
## Section Command Version Status
## ------------------------------------------------------------------
## /proc features EXISTS 2.5.2-pre7 Partial
########################################################################
## Section: System Features
## KernelOutput: /proc/*, /dev/*
## Detect system features based on existence of /proc and /dev/* files
DEBUG("autoconfigure.rules: EXISTS")
## These tests are unreliable; they depend on the current kernel config.
EXISTS("/proc/sysvipc", 'SYSVIPC')
EXISTS("/proc/sys", 'SYSCTL')
EXISTS("/proc/scsi/ide-scsi", 'BLK_DEV_IDESCSI')
EXISTS("/proc/scsi/imm", 'SCSI_IMM')
EXISTS("/proc/scsi/ppa", 'SCSI_PPA')
EXISTS("/dev/.devfsd", 'DEVFS_FS')
# Giacomo does not have these yet.
EXISTS("/proc/sys/net/khttpd", 'KHTTPD')
EXISTS("/proc/sys/kernel/acct", 'BSD_PROCESS_ACCT')
# This one is reliable, according to the MCA port documentation.
EXISTS("/proc/mca", 'MCA')
# This one is reliable too
EXISTS("/proc/bus/isapnp/devices", 'ISAPNP')
# Test the new probe function.
GREP("scsi0", "/proc/scsi/scsi", 'SCSI')
# These can be bogus because the file or directory in question
# is empty, or consists of a banner string that does not describe
# an actual device. We need to do more analysis here.
# EXISTS("/proc/bus/pci", 'PCI')
# EXISTS("/proc/bus/usb", 'USB')
# EXISTS("/proc/net", 'NET')
# EXISTS("/proc/scsi", 'SCSI')
# These look tempting, but they're no good unless we're on a pure
# devfs system, without support for old devices, where devices
# only exist when they're needed.
# EXISTS("/dev/agpgart", 'AGP')
# EXISTS("/dev/floppy", 'BLK_DEV_FD')
# EXISTS("/dev/fd0", 'BLK_DEV_FD')
########################################################################
## Section: Mice
## Detect the mouse type by looking at what's behind the /dev/mouse link.
## These are probes for 2.4 with the old input core
LINKTO("/dev/mouse", "psaux", 'PSMOUSE')
LINKTO("/dev/mouse", "ttyS", 'SERIAL')
LINKTO("/dev/mouse", "logibm", 'LOGIBUSMOUSE')
LINKTO("/dev/mouse", "inportbm", 'MS_BUSMOUSE')
LINKTO("/dev/mouse", "atibm", 'ATIXL_BUSMOUSE')
## These are probes for 2.5 with the new input core
LINKTO("/dev/mouse", "psaux", 'MOUSE_PS2')
LINKTO("/dev/mouse", "ttyS", 'MOUSE_SERIAL')
LINKTO("/dev/mouse", "logibm", 'MOUSE_LOGIBM')
LINKTO("/dev/mouse", "inportbm", 'MOUSE_INPORT')
LINKTO("/dev/mouse", "atibm", 'MOUSE_ATIXL')
########################################################################
## Section: IDE devices
## KernelOutput: /proc/ide/hd?/media
## Detect IDE devices based on contents of /proc files
## These tests are unreliable; they depend on the current kernel config.
IDE('disk', 'BLK_DEV_IDEDISK')
IDE('cdrom', 'BLK_DEV_IDECD')
IDE('tape', 'BLK_DEV_IDETAPE')
IDE('floppy', 'BLK_DEV_FLOPPY')
EXISTS("/dev/ide/ide0", 'BLK_DEV_IDE')
EXISTS("/dev/ide/ide1", 'BLK_DEV_IDE')
EXISTS('/proc/ide/piix', 'PIIX_TUNING')
########################################################################
# Miscellaneous tests that replace Giacomo's ad-hoc ones.
DEV('pty', 'UNIX98_PTYS')
REQ('SMBus', 'I2C')
REQ('ATI.*Mach64', 'FB_ATY')
#FS(r'xfs', 'XFS_FS')
########################################################################
# This is a near complete set of MCA probes for hardware supported under
# Linux, according to MCA maintainer David Weinehall. The exception is
# the IBMTR card, which cannot be probed reliably.
if config.enabled("MCA"):
MCA("ddff", 'BLK_DEV_PS2')
MCA("df9f", 'BLK_DEV_PS2')
MCA("628b", 'EEXPRESS')
MCA("627[cd]", 'EL3')
MCA("62db", 'EL3')
MCA("62f6", 'EL3')
MCA("62f7", 'EL3')
MCA("6042", 'ELMC')
MCA("0041", 'ELMC_II')
MCA("8ef5", 'ELMC_II')
MCA("61c[89]", 'ULTRAMCA')
MCA("6fc[012]", 'ULTRAMCA')
MCA("efd[45]", 'ULTRAMCA')
MCA("efe5", 'ULTRAMCA')
MCA("641[036]", 'AT1700')
MCA("6def", 'DEPCA')
MCA("6afd", 'SKMC')
MCA("6be9", 'SKMC')
MCA("6354", 'NE2_MCA')
MCA("7154", 'NE2_MCA')
MCA("56ea", 'NE2_MCA')
MCA("ffe0", 'IBMLANA')
MCA("8ef[8cdef]", 'SCSI_IBMMCA')
MCA("5137", 'SCSI_FD_MCS')
MCA("60e9", 'SCSI_FD_MCS')
MCA("6127", 'SCSI_FD_MCS')
MCA("0092", 'SCSI_NCR_D700')
MCA("7f4c", 'SCSI_MCA_53C9X')
MCA("0f1f", 'SCSI_AHA_1542')
MCA("002d", 'MADGEMC')
MCA("6ec6", 'SMCTR')
MCA("62f3", 'SOUND_SB')
MCA("7113", 'SOUND_SB')
########################################################################
## This requires Paul Gortmaker's EISA ID patch.
REQ("EISA", "EISA") # Someday, IOPORTS()
########################################################################
## The rest of the table is read in from Giacomo's Catenazzi's rulesfile.
execfile(rulesfile)
# If it has a reliable test, but was not found by any test, switch it off.
# We do things in this order to avoid losing on symbols that are only set
# to n by PNP and PCI tests.
baton = cml.Baton(lang["TURNOFF"])
for symbol in configuration.dictionary.values():
baton.twirl()
if symbol.is_symbol() and configuration.saveable(symbol) \
and reliable.has_key(symbol.name) and not cml.evaluate(symbol):
config.found(symbol.name, n, reliable[symbol.name])
baton.end()
########################################################################
## Resolve conflicts.
PRIORITY(("SCSI_SYM53C8XX_2", "SCSI_SYM53C8XX", \
"SCSI_NCR53C8XX", "SCSI_GENERIC_NCR5380"))
PRIORITY(("DE2104X", "TULIP"))
## End of probe logic.
##
########################################################################
# More tests that don't fit the rulesfile format
# Filesystem, bus, and controller for root cannot be modules.
sys.stderr.write(lang["ROOTLOOK"])
fstab_to_bus_map = {
r"^/dev/sd" : ("SCSI",),
r"^/dev/hd" : ("IDE",),
r"\bnfs\b" : ("NFS_FS", "NFS_ROOT", "NET"),
}
ifp = open("/etc/mtab", "r")
while 1:
line = ifp.readline()
if not line:
break
fields = line.split()
mountpoint = fields[1]
fstype = fields[2]
if mountpoint == "/":
# Figure out the drive type of the root partition.
rootsymbols = []
for (pattern, symbols) in fstab_to_bus_map.items():
if re.compile(pattern).search(line):
rootsymbols = list(symbols)
if fsmap.has_key(fstype):
rootsymbols.append(fsmap[fstype])
if not rootsymbols:
sys.stderr.write(lang["ROOTWARN"])
break
# We should have a list of `buses' now...
for roottype in rootsymbols:
# First we have to force the bus the drive is on to y.
config.found(roottype, y, "Root filesystem")
sys.stderr.write(lang["ROOTFS"] % roottype)
# Then force all bootable hardware previously set modular and
# dependent on this bus to y.
bus = configuration.dictionary[roottype]
for symbol in configuration.dictionary.values():
if cml.evaluate(symbol) == m \
and symbol.hasprop("BOOTABLE") \
and bus.ancestor_of(symbol):
config.found(symbol.name, y, "Root filesystem")
sys.stderr.write(lang["ROOTHW"] % symbol.name)
ifp.close()
# PTY devices
ptycount = dmesg.find('pty: ([0-9]*) Unix98 ptys')
if ptycount:
config.found("UNIX98_PTY_COUNT", int(ptycount))
# Helper functions.
def grepcmd(pattern, cmd):
"Test for PATTERN in the output of COMMAND."
(status, output) = commands.getstatusoutput(cmd)
return status == 0 and re.compile(pattern).search(output)
# Apply those sanity checks
# Handle a subtle gotcha: if there are multiple NICs, they must be modular.
if grepcmd("eth[1-3]", "/sbin/ifconfig -a"):
config.force_dependents_modular("NET_ETHERNET",
"Multiple NICs must be modular")
# Now freeze complement sets. With any luck, this will reduce the
# set of drivers the user actually has to specify to zero.
#
# Giacomo writes:
# "BTW I have done some test with USB, and it seems that you can
# hotplug USB devices, also with hardcored drivers, and the driver
# is initialized only at the hotplug event.
# (This mean that USB devices can be set also to 'y', without
# losing functionality.
# This is not true for other 'hotplug' devices. I.e. my
# parport ZIP will be loaded only at boot time (hardcoded) or
# at modules loading (module)."
#
# So far I have not done anything about this.
if not hardcompile:
b = cml.Baton(lang["COMPLEMENT"])
config.complement("HOTPLUG_PCI",cml.m, b, "PCI_HOTPLUG is a hot-plug bus")
config.complement("USB", cml.m, b, "USB is a hot-plug bus")
config.complement("PCMCIA", cml.m, b, "PCMCIA is a hot-plug bus")
config.complement("IEEE1394", cml.m, b, "IEEE1394 ia a hot-plug bus")
b.end(lang["DONE"])
DEBUG(lang["COMPLETE"])
def process_define(myconfiguration, val, freeze):
"Process a -d=xxx or -D=xxx option."
parts = val.split("=")
sym = parts[0]
if myconfiguration.dictionary.has_key(sym):
sym = myconfiguration.dictionary[sym]
else:
myconfiguration.errout.write(lang["SYMUNKNOWN"] % (`sym`,))
sys.exit(1)
if sym.is_derived():
myconfiguration.debug_emit(1, lang["DERIVED"] % (`sym`,))
sys.exit(1)
elif sym.is_logical():
if len(parts) == 1:
val = 'y'
elif parts[1] == 'y':
val = 'y'
elif parts[1] == 'm':
myconfiguration.trits_enabled = 1
val = 'm'
elif parts[1] == 'n':
val = 'n'
elif len(parts) == 1:
print lang["NOCMDLINE"] % (`sym`,)
sys.exit(1)
else:
val = parts[1]
(ok, effects, violation) = myconfiguration.set_symbol(sym,
myconfiguration.value_from_string(sym, val),
freeze)
if effects:
sys.stderr.write(lang["EFFECTS"] + "\n")
sys.stderr.write("\n".join(effects) + "\n\n")
if not ok:
sys.stderr.write((lang["ROLLBACK"] % (sym.name, val)) + "\n")
sys.stderr.write("\n".join(violation)+"\n")
if __name__ == "__main__":
# Process command-line options
try:
(options, arguments) = getopt.getopt(sys.argv[1:], "d:D:hr:st:v",
("hardcompile",
"rules=",
"standalone",
"target=",
"verbose"))
except getopt.GetoptError:
sys.stderr.write(lang["OPTUNKNOWN"])
raise SystemExit, 2
autoprobe_debug = hardcompile = standalone = 0
objtree = os.environ.get("KBUILD_OBJTREE")
rulesfile = "autoconfigure.rules"
freeze_em = []
set_em = []
for (opt, val) in options:
if opt == '-D':
freeze_em.append(val)
elif opt == '-d':
set_em.append(val)
elif opt in ("-v", "--verbose"):
autoprobe_debug += 1
elif opt in ("--hardcompile", "-h"):
hardcompile = 1
elif opt in ("--rules", "-r"):
rulesfile = val
elif opt in ("--standalone", "-s"):
standalone = 1
elif opt in ("--target", "-t"):
objtree = os.path.expanduser(val)
if objtree == None:
objtree = "."
#
# Now use the rulebase information
#
rulebase = os.path.join(objtree, "rules.out")
if not os.path.exists(rulebase):
sys.stderr.write("autoconfigure: rulebase %s does not exist!\n" % rulebase)
raise SystemExit, 1
configuration = cmlsystem.CMLSystem(rulebase)
if not cmlsystem:
sys.stderr.write("autoconfigure: rulebase %s could not be read!\n" % rulebase)
raise SystemExit, 1
# Autoconfigure into the configuration object.
for sym in freeze_em:
process_define(configuration, sym, 1)
for sym in set_em:
process_define(configuration, sym, 0)
autoconfigure(configuration, hardcompile, autoprobe_debug)
# Write out this configuration, we're done.
if standalone:
configuration.save(sys.stdout, None, "normal")
else:
configuration.save(sys.stdout, None, "probe")
# End
| gpl-3.0 |
demiangomez/Parallel.GAMIT | classes/pyBrdc.py | 1 | 1517 | """
Project: Parallel.Archive
Date: 02/16/2017
Author: Demian D. Gomez
This class fetches broadcast orbits from the brdc folder (specified in the gnss_data.cfg file) passed as an argument (brdc_archive)
"""
import os
import pyProducts
class pyBrdcException(pyProducts.pyProductsException):
pass
class GetBrdcOrbits(pyProducts.OrbitalProduct):
def __init__(self, brdc_archive, date, copyto, no_cleanup=False):
self.brdc_archive = brdc_archive
self.brdc_path = None
self.no_cleanup = no_cleanup
# try both zipped and unzipped n files
self.brdc_filename = 'brdc' + str(date.doy).zfill(3) + '0.' + str(date.year)[2:4] + 'n'
try:
pyProducts.OrbitalProduct.__init__(self, self.brdc_archive, date, self.brdc_filename, copyto)
self.brdc_path = self.file_path
except pyProducts.pyProductsExceptionUnreasonableDate:
raise
except pyProducts.pyProductsException:
raise pyBrdcException(
'Could not find the broadcast ephemeris file for ' + str(date.year) + ' ' + str(date.doy))
return
def cleanup(self):
if self.brdc_path and not self.no_cleanup:
# delete files
if os.path.isfile(self.brdc_path):
os.remove(self.brdc_path)
return
def __del__(self):
self.cleanup()
return
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
def __enter__(self):
return self
| gpl-3.0 |
agnivade/three.js | utils/exporters/blender/addons/io_three/exporter/api/__init__.py | 174 | 1502 | import os
import bpy
from . import object as object_, mesh, material, camera, light
from .. import logger
def active_object():
"""
:return: The actively selected object
"""
return bpy.context.scene.objects.active
def batch_mode():
"""
:return: Whether or not the session is interactive
:rtype: bool
"""
return bpy.context.area is None
def data(node):
"""
:param node: name of an object node
:returns: the data block of the node
"""
try:
return bpy.data.objects[node].data
except KeyError:
pass
def init():
"""Initializing the api module. Required first step before
initializing the actual export process.
"""
logger.debug("Initializing API")
object_.clear_mesh_map()
def selected_objects(valid_types=None):
"""Selected objects.
:param valid_types: Filter for valid types (Default value = None)
"""
logger.debug("api.selected_objects(%s)", valid_types)
for node in bpy.context.selected_objects:
if valid_types is None:
yield node.name
elif valid_types is not None and node.type in valid_types:
yield node.name
def set_active_object(obj):
"""Set the object as active in the scene
:param obj:
"""
logger.debug("api.set_active_object(%s)", obj)
bpy.context.scene.objects.active = obj
def scene_name():
"""
:return: name of the current scene
"""
return os.path.basename(bpy.data.filepath)
| mit |
SUSE/azure-sdk-for-python | azure-mgmt-commerce/azure/mgmt/commerce/usage_management_client.py | 3 | 3820 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.usage_aggregates_operations import UsageAggregatesOperations
from .operations.rate_card_operations import RateCardOperations
from . import models
class UsageManagementClientConfiguration(AzureConfiguration):
"""Configuration for UsageManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: It uniquely identifies Microsoft Azure
subscription. The subscription ID forms part of the URI for every service
call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(UsageManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('usagemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class UsageManagementClient(object):
"""UsageManagementClient
:ivar config: Configuration for client.
:vartype config: UsageManagementClientConfiguration
:ivar usage_aggregates: UsageAggregates operations
:vartype usage_aggregates: azure.mgmt.commerce.operations.UsageAggregatesOperations
:ivar rate_card: RateCard operations
:vartype rate_card: azure.mgmt.commerce.operations.RateCardOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: It uniquely identifies Microsoft Azure
subscription. The subscription ID forms part of the URI for every service
call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = UsageManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-06-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.usage_aggregates = UsageAggregatesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.rate_card = RateCardOperations(
self._client, self.config, self._serialize, self._deserialize)
| mit |
varunnaganathan/django | django/contrib/gis/db/models/lookups.py | 46 | 11511 | from __future__ import unicode_literals
import re
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
def __init__(self, *args, **kwargs):
super(GISLookup, self).__init__(*args, **kwargs)
self.template_params = {}
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.remote_field.model._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
if hasattr(self.rhs, '_as_sql'):
# If rhs is some QuerySet, don't touch it
return rhs, rhs_params
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return
# an object (SpatialOperator) with an as_sql() method to allow for more
# complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql, 'value': '%s'}
template_params.update(self.template_params)
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def process_rhs(self, compiler, connection):
if not isinstance(self.rhs, (tuple, list)) or not 2 <= len(self.rhs) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(self.rhs[0])]
# Getting the distance parameter in the units of the field.
dist_param = self.rhs[1]
if hasattr(dist_param, 'resolve_expression'):
dist_param = dist_param.resolve_expression(compiler.query)
sql, expr_params = compiler.compile(dist_param)
self.template_params['value'] = sql
params.extend(expr_params)
else:
params += connection.ops.get_distance(
self.lhs.output_field, (dist_param,) + self.rhs[2:],
self.lookup_name, handle_spheroid=False
)
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, params[0], compiler)
return (rhs, params)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| bsd-3-clause |
jeroenj/CouchPotatoServer | libs/tornado/platform/caresresolver.py | 90 | 3016 | from __future__ import absolute_import, division, print_function, with_statement
import pycares
import socket
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver, is_valid_ip
class CaresResolver(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce
the same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}
def _sock_state_cb(self, fd, readable, writable):
state = ((IOLoop.READ if readable else 0) |
(IOLoop.WRITE if writable else 0))
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd, events):
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(self, host, port, family=0):
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
callback_args = yield gen.Wait(1)
assert isinstance(callback_args, gen.Arguments)
assert not callback_args.kwargs
result, error = callback_args.args
if error:
raise Exception('C-Ares returned error %s: %s while resolving %s' %
(error, pycares.errno.strerror(error), host))
addresses = result.addresses
addrinfo = []
for address in addresses:
if '.' in address:
address_family = socket.AF_INET
elif ':' in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise Exception('Requested socket family %d but got %d' %
(family, address_family))
addrinfo.append((address_family, (address, port)))
raise gen.Return(addrinfo)
| gpl-3.0 |
lehins/python-wepay | wepay/tests/test_batch.py | 1 | 1684 | from wepay.tests import CallBaseTestCase
class BatchTestCase(CallBaseTestCase):
def test_batch_find(self):
args = [
('client_id', 67890),
('client_secret', 'secret_6789'),
('calls', [
{
'call': '/preapproval',
'authorization': 'access_token_call_0',
'reference_id': 'reference_id_call_0',
'parameters': {
'preapproval_id': 12345
}
},
{
'call': '/preapproval/find',
'authorization': 'access_token_call_1',
'reference_id': 'reference_id_call_1',
'parameters': {
'account_id': 54321,
'state': 'expired',
'reference_id': 'ref_preapproval_123',
'start': 10,
'limit': 17,
'last_checkout_id': 221651,
'sort_order': 'ASC',
'shipping_fee': 34.05
}
}
])
]
call_0 = args[2][1][0]
self.api.preapproval(
call_0['parameters']['preapproval_id'], batch_mode=True,
batch_reference_id=call_0['reference_id'], access_token=call_0['authorization'])
call_1 = args[2][1][1]
self.api.preapproval.find(
batch_mode=True, batch_reference_id=call_1['reference_id'],
access_token=call_1['authorization'], **call_1['parameters'])
self._test_call('/batch/create', args, {})
| mit |
wdzhou/mantid | Testing/SystemTests/tests/analysis/SANS2DMultiPeriodAddFiles.py | 3 | 3134 | #pylint: disable=no-init
from __future__ import (absolute_import, division, print_function)
import stresstesting
from mantid.simpleapi import *
from mantid import config
from ISISCommandInterface import *
class SANS2DMultiPeriodAddFiles(stresstesting.MantidStressTest):
def requiredMemoryMB(self):
"""Requires 2.5Gb"""
return 2500
def runTest(self):
pass
SANS2D()
Set1D()
Detector("rear-detector")
MaskFile('MASKSANS2Doptions.091A')
Gravity(True)
add_runs( ('5512', '5512') ,'SANS2D', 'nxs', lowMem=True)
#one period of a multi-period Nexus file
AssignSample('5512-add.nxs', period=7)
WavRangeReduction(2, 4, DefaultTrans)
paths = [os.path.join(config['defaultsave.directory'],'SANS2D00005512-add.nxs'),
os.path.join(config['defaultsave.directory'],'SANS2D00005512.log')]
for path in paths:
if os.path.exists(path):
os.remove(path)
def validate(self):
# Need to disable checking of the Spectra-Detector map because it isn't
# fully saved out to the nexus file (it's limited to the spectra that
# are actually present in the saved workspace).
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Instrument')
self.disableChecking.append('Axes')
return '5512p7rear_1D_2.0_4.0Phi-45.0_45.0','SANS2DMultiPeriodAddFiles.nxs'
class LARMORMultiPeriodAddEventFiles(stresstesting.MantidStressTest):
def requiredMemoryMB(self):
"""Requires 2.5Gb"""
return 2500
def runTest(self):
LARMOR()
Set1D()
Detector("DetectorBench")
MaskFile('USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt')
Gravity(True)
add_runs( ('13065', '13065') ,'LARMOR', 'nxs', lowMem=True)
AssignSample('13065-add.nxs')
WavRangeReduction(2, 4, DefaultTrans)
# Clean up
to_clean = ["13065_sans_nxs",
"13065p1rear_1D_2.0_4.0_incident_monitor",
"13065p2rear_1D_2.0_4.0_incident_monitor",
"13065p3rear_1D_2.0_4.0_incident_monitor",
"13065p4rear_1D_2.0_4.0_incident_monitor",
"80tubeCalibration_1-05-2015_r3157-3160"]
for workspace in to_clean:
DeleteWorkspace(workspace)
paths = [os.path.join(config['defaultsave.directory'],'LARMOR00013065-add.nxs'),
os.path.join(config['defaultsave.directory'],'SANS2D00013065.log')] # noqa
for path in paths:
if os.path.exists(path):
os.remove(path)
def validate(self):
# Need to disable checking of the Spectra-Detector map because it isn't
# fully saved out to the nexus file (it's limited to the spectra that
# are actually present in the saved workspace).
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Instrument')
self.disableChecking.append('Axes')
return "13065p1rear_1D_2.0_4.0" , "LARMORMultiPeriodAddEventFiles.nxs"
| gpl-3.0 |
ejpbruel/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py | 652 | 6143 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions and exceptions used by WebSocket opening handshake
processors.
"""
from mod_pywebsocket import common
from mod_pywebsocket import http_header_util
class AbortedByUserException(Exception):
"""Exception for aborting a connection intentionally.
If this exception is raised in do_extra_handshake handler, the connection
will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
If this exception is raised in transfer_data_handler, the connection will
be closed without closing handshake. No other WebSocket or HTTP(S) handler
will be invoked.
"""
pass
class HandshakeException(Exception):
"""This exception will be raised when an error occurred while processing
WebSocket initial handshake.
"""
def __init__(self, name, status=None):
super(HandshakeException, self).__init__(name)
self.status = status
class VersionException(Exception):
"""This exception will be raised when a version of client request does not
match with version the server supports.
"""
def __init__(self, name, supported_versions=''):
"""Construct an instance.
Args:
supported_version: a str object to show supported hybi versions.
(e.g. '8, 13')
"""
super(VersionException, self).__init__(name)
self.supported_versions = supported_versions
def get_default_port(is_secure):
if is_secure:
return common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
return common.DEFAULT_WEB_SOCKET_PORT
def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be encoded HTTP token.
state = http_header_util.ParsingState(subprotocol)
token = http_header_util.consume_token(state)
rest = http_header_util.peek(state)
# If |rest| is not None, |subprotocol| is not one token or invalid. If
# |rest| is None, |token| must not be None because |subprotocol| is
# concatenation of |token| and |rest| and is not None.
if rest is not None:
raise HandshakeException('Invalid non-token string in subprotocol '
'name: %r' % rest)
def parse_host_header(request):
fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
return fields[0], int(fields[1])
except ValueError, e:
raise HandshakeException('Invalid port number format: %r' % e)
def format_header(name, value):
return '%s: %s\r\n' % (name, value)
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
raise HandshakeException('Header %s is not defined' % key)
return value
def validate_mandatory_header(request, key, expected_value, fail_status=None):
value = get_mandatory_header(request, key)
if value.lower() != expected_value.lower():
raise HandshakeException(
'Expected %r for header %s but found %r (case-insensitive)' %
(expected_value, key, value), status=fail_status)
def check_request_line(request):
# 5.1 1. The three character UTF-8 string "GET".
# 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
if request.method != 'GET':
raise HandshakeException('Method is not GET: %r' % request.method)
if request.protocol != 'HTTP/1.1':
raise HandshakeException('Version is not HTTP/1.1: %r' %
request.protocol)
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
Leading LWSes must be trimmed.
"""
state = http_header_util.ParsingState(data)
token_list = []
while True:
token = http_header_util.consume_token(state)
if token is not None:
token_list.append(token)
http_header_util.consume_lwses(state)
if http_header_util.peek(state) is None:
break
if not http_header_util.consume_string(state, ','):
raise HandshakeException(
'Expected a comma but found %r' % http_header_util.peek(state))
http_header_util.consume_lwses(state)
if len(token_list) == 0:
raise HandshakeException('No valid token found')
return token_list
# vi:sts=4 sw=4 et
| mpl-2.0 |
jpzk/evopy | evopy/external/playdoh/connection.py | 2 | 2971 | from debugtools import *
from userpref import *
from multiprocessing.connection import Listener, Client, AuthenticationError
import cPickle
import time
import socket
BUFSIZE = 1024 * 32
try:
LOCAL_IP = socket.gethostbyname(socket.gethostname())
except:
LOCAL_IP = '127.0.0.1'
__all__ = ['accept', 'connect', 'LOCAL_IP']
class Connection(object):
"""
Handles chunking and compression of data.
To minimise data transfers between machines, we can use data compression,
which this Connection handles automatically.
"""
def __init__(self, conn, chunked=False, compressed=False):
self.conn = conn
self.chunked = chunked
self.compressed = compressed
self.BUFSIZE = BUFSIZE
def send(self, obj):
s = cPickle.dumps(obj, -1)
self.conn.send(s)
def recv(self):
trials = 5
for i in xrange(trials):
try:
s = self.conn.recv()
break
except Exception as e:
log_warn("Connection error (%d/%d): %s" %
(i + 1, trials, str(e)))
time.sleep(.1 * 2 ** i)
if i == trials - 1:
return None
return cPickle.loads(s)
def close(self):
if self.conn is not None:
self.conn.close()
self.conn = None
def accept(address):
"""
Accept a connection and return a Connection object.
"""
while True:
try:
listener = Listener(address, authkey=USERPREF['authkey'])
conn = listener.accept()
break
except Exception:
log_warn("The authentication key is not correct")
listener.close()
del listener
time.sleep(.1)
client = listener.last_accepted
return Connection(conn), client[0]
def connect(address, trials=None):
"""
Connect to a server and return a Connection object.
"""
if trials is None:
trials = USERPREF['connectiontrials']
conn = None
t0 = time.time()
timeout = USERPREF['connectiontimeout']
for i in xrange(trials):
try:
conn = Client(address, authkey=USERPREF['authkey'])
break
except AuthenticationError as e:
log_warn("Authentication error: %s" % str(e))
break
except Exception as e:
if time.time() > t0 + timeout:
log_warn("Connection timed out, unable to connect to %s"\
% str(address))
break
log_debug("Connection error: %s, trying again... (%d/%d)" %
(str(e), i + 1, trials))
if i == trials - 1:
log_warn("Connection error: %s" % e)
time.sleep(.1 * 2 ** i)
if conn is None:
return None
return Connection(conn)
| gpl-3.0 |
justinwp/pyparsecom | docs/conf.py | 1 | 8494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pyparsecom documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pyparsecom
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Parse.com Wrapper'
copyright = u'2015, Justin Poehnelt'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pyparsecom.__version__
# The full version, including alpha/beta/rc tags.
release = pyparsecom.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyparsecomdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pyparsecom.tex',
u'Python Parse.com Wrapper Documentation',
u'Justin Poehnelt', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyparsecom',
u'Python Parse.com Wrapper Documentation',
[u'Justin Poehnelt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyparsecom',
u'Python Parse.com Wrapper Documentation',
u'Justin Poehnelt',
'pyparsecom',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
sarthfrey/Texty | lib/tests/test_messages.py | 14 | 2006 | from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with('MM123', {'Body': ''})
| apache-2.0 |
romain-dartigues/ansible | test/units/plugins/inventory/test_host.py | 56 | 3412 | # Copyright 2015 Marius Gedminas <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# for __setstate__/__getstate__ tests
import pickle
from units.compat import unittest
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils.six import string_types
class TestHost(unittest.TestCase):
ansible_port = 22
def setUp(self):
self.hostA = Host('a')
self.hostB = Host('b')
def test_equality(self):
self.assertEqual(self.hostA, self.hostA)
self.assertNotEqual(self.hostA, self.hostB)
self.assertNotEqual(self.hostA, Host('a'))
def test_hashability(self):
# equality implies the hash values are the same
self.assertEqual(hash(self.hostA), hash(Host('a')))
def test_get_vars(self):
host_vars = self.hostA.get_vars()
self.assertIsInstance(host_vars, dict)
def test_repr(self):
host_repr = repr(self.hostA)
self.assertIsInstance(host_repr, string_types)
def test_add_group(self):
group = Group('some_group')
group_len = len(self.hostA.groups)
self.hostA.add_group(group)
self.assertEqual(len(self.hostA.groups), group_len + 1)
def test_get_groups(self):
group = Group('some_group')
self.hostA.add_group(group)
groups = self.hostA.get_groups()
self.assertEqual(len(groups), 1)
for _group in groups:
self.assertIsInstance(_group, Group)
def test_equals_none(self):
other = None
self.hostA == other
other == self.hostA
self.hostA != other
other != self.hostA
self.assertNotEqual(self.hostA, other)
def test_serialize(self):
group = Group('some_group')
self.hostA.add_group(group)
data = self.hostA.serialize()
self.assertIsInstance(data, dict)
def test_serialize_then_deserialize(self):
group = Group('some_group')
self.hostA.add_group(group)
hostA_data = self.hostA.serialize()
hostA_clone = Host()
hostA_clone.deserialize(hostA_data)
self.assertEquals(self.hostA, hostA_clone)
def test_set_state(self):
group = Group('some_group')
self.hostA.add_group(group)
pickled_hostA = pickle.dumps(self.hostA)
hostA_clone = pickle.loads(pickled_hostA)
self.assertEquals(self.hostA, hostA_clone)
class TestHostWithPort(TestHost):
ansible_port = 8822
def setUp(self):
self.hostA = Host(name='a', port=self.ansible_port)
self.hostB = Host(name='b', port=self.ansible_port)
def test_get_vars_ansible_port(self):
host_vars = self.hostA.get_vars()
self.assertEquals(host_vars['ansible_port'], self.ansible_port)
| gpl-3.0 |
amstrudy/nao-ncsu | create_test_maps.py | 1 | 3010 | # This Python script creates a bunch of test maps of varying number, size, and density.
# The output is to a .csv file.
import cv2
import numpy
import random
import time
import csv
import oop_a_star as aStar
MAP_SIZE_MIN = 3 # 3 x 3 is smallest map
MAP_SIZE_MAX = 100 # 10 x 10 is biggest map
NUM_TESTS = 5 # number of tests per map size
DENSITY = 0.1 # % map populated with obstacles
class Map:
def __init__(self, x, y, trial_num):
self.x = x
self.y = y
self.trial_num = trial_num
self.state = None
self.time = 0
self.array = [[None for a in range(y)] for b in range(x)]
self.path = None
def generate_map (i, trial_num): # return Map object
m = Map(i, i, trial_num) # create empty array i x i
for j in list(range(m.x)):
for k in list(range(m.y)):
m.array[j][k] = numpy.random.choice(numpy.arange(0, 2), p=[1-DENSITY, DENSITY]) # pick either 0 or 1 with certain distribution
return m
##### GENERATE MAPS #####
maps = [[None for y in range(NUM_TESTS)] for x in range(MAP_SIZE_MAX - MAP_SIZE_MIN + 1)] # make [MAP_SIZE_MAX - MAP_SIZE_MIN + 1][NUM_TESTS]
for i in range(len(maps)):
for m in range(len(maps[i])):
maps[i][m] = generate_map(MAP_SIZE_MIN + i, m)
##### RUN A STAR #####
count1 = 0
for i in range(len(maps)):
for j in range(len(maps[i])):
count1 += 1
found_spot = False
x = 0
y = 0
while found_spot == False:
x = random.randint(0, maps[i][j].x - 1)
y = random.randint(0, maps[i][j].y - 1)
if x != 1 and y != 1:
found_spot = True
start = time.time()
ret = aStar.a_star((0, 0), (x - 1, y - 1), maps[i][j].array)
maps[i][j].path = ret
end = time.time()
if maps[i][j].path == 0:
maps[i][j].state = "Fail"
else:
maps[i][j].state = "Success"
maps[i][j].time = end - start
print(maps[i][j].state)
print "Completed Test %d of %d." % (count1, NUM_TESTS * (MAP_SIZE_MAX - MAP_SIZE_MIN + 1))
##### FORMAT DATA #####
formatted_all = [["Trial Number", "X", "Y", "Time (s)", "State"]]
formatted_successes = [["Trial Number", "X", "Y", "Time (s)", "Time (ms)", "Time (s) Per Node"]]
for i in range(len(maps)):
for j in range(len(maps[0])):
formatted_all.append([maps[i][j].trial_num, maps[i][j].x, maps[i][j].y, maps[i][j].time, maps[i][j].state])
if maps[i][j].state == "Success" and maps[i][j].path is not None:
formatted_successes.append([maps[i][j].trial_num, maps[i][j].x, maps[i][j].y, maps[i][j].time, maps[i][j].time * 1000, (maps[i][j].time * 1000) / len(maps[i][j].path)])
##### PUT INTO CSV #####
myFile = open("All_Data.csv", "w")
with myFile:
writer = csv.writer(myFile)
writer.writerows(formatted_all)
myFile = open("Successes.csv", "w")
with myFile:
writer = csv.writer(myFile)
writer.writerows(formatted_successes)
print("Writing complete")
| mit |
sloanyang/aquantic | Tools/Scripts/webkitpy/tool/steps/preparechangelog.py | 124 | 5952 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import sys
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class PrepareChangeLog(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.quiet,
Options.email,
Options.git_commit,
Options.update_changelogs,
]
def _ensure_bug_url(self, state):
if not state.get("bug_id"):
return
bug_id = state.get("bug_id")
changelogs = self.cached_lookup(state, "changelogs")
for changelog_path in changelogs:
changelog = ChangeLog(changelog_path, self._tool.filesystem)
if not changelog.latest_entry().bug_id():
changelog.set_short_description_and_bug_url(
self.cached_lookup(state, "bug_title"),
self._tool.bugs.bug_url_for_bug_id(bug_id))
def _resolve_existing_entry(self, changelog_path):
# When this is called, the top entry in the ChangeLog was just created
# by prepare-ChangeLog, as an clean updated version of the one below it.
with self._tool.filesystem.open_text_file_for_reading(changelog_path) as changelog_file:
entries_gen = ChangeLog.parse_entries_from_file(changelog_file)
entries = zip(entries_gen, range(2))
if not len(entries):
raise Exception("Expected to find at least two ChangeLog entries in %s but found none." % changelog_path)
if len(entries) == 1:
# If we get here, it probably means we've just rolled over to a
# new CL file, so we don't have anything to resolve.
return
(new_entry, _), (old_entry, _) = entries
final_entry = self._merge_entries(old_entry, new_entry)
changelog = ChangeLog(changelog_path, self._tool.filesystem)
changelog.delete_entries(2)
changelog.prepend_text(final_entry)
def _merge_entries(self, old_entry, new_entry):
final_entry = old_entry.contents()
final_entry = final_entry.replace(old_entry.date(), new_entry.date(), 1)
new_bug_desc = new_entry.bug_description()
old_bug_desc = old_entry.bug_description()
if new_bug_desc and old_bug_desc and new_bug_desc != old_bug_desc:
final_entry = final_entry.replace(old_bug_desc, new_bug_desc)
new_touched = new_entry.touched_functions()
old_touched = old_entry.touched_functions()
if new_touched != old_touched:
if old_entry.is_touched_files_text_clean():
final_entry = final_entry.replace(old_entry.touched_files_text(), new_entry.touched_files_text())
else:
final_entry += "\n" + new_entry.touched_files_text()
return final_entry + "\n"
def run(self, state):
if self.cached_lookup(state, "changelogs"):
self._ensure_bug_url(state)
if not self._options.update_changelogs:
return
args = self._tool.deprecated_port().prepare_changelog_command()
if state.get("bug_id"):
args.append("--bug=%s" % state["bug_id"])
args.append("--description=%s" % self.cached_lookup(state, 'bug_title'))
if self._options.email:
args.append("--email=%s" % self._options.email)
if self._tool.scm().supports_local_commits():
args.append("--merge-base=%s" % self._tool.scm().merge_base(self._options.git_commit))
args.extend(self._changed_files(state))
try:
output = self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.error("Unable to prepare ChangeLogs.")
sys.exit(1)
# These are the ChangeLog entries added by prepare-Changelog
changelogs = re.findall(r'Editing the (\S*/ChangeLog) file.', output)
changelogs = set(self._tool.filesystem.join(self._tool.scm().checkout_root, f) for f in changelogs)
for changelog in changelogs & set(self.cached_lookup(state, "changelogs")):
self._resolve_existing_entry(changelog)
self.did_modify_checkout(state)
| gpl-2.0 |
sebres/fail2ban | fail2ban/server/jails.py | 3 | 2837 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2013- Yaroslav Halchenko"
__license__ = "GPL"
from threading import Lock
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from ..exceptions import DuplicateJailException, UnknownJailException
from .jail import Jail
class Jails(Mapping):
"""Handles the jails.
This class handles the jails. Creation, deletion or access to a jail
must be done through this class. This class is thread-safe which is
not the case of the jail itself, including filter and actions. This
class is based on Mapping type, and the `add` method must be used to
add additional jails.
"""
def __init__(self):
self.__lock = Lock()
self._jails = dict()
def add(self, name, backend, db=None):
"""Adds a jail.
Adds a new jail if not already present which should use the
given backend.
Parameters
----------
name : str
The name of the jail.
backend : str
The backend to use.
db : Fail2BanDb
Fail2Ban's persistent database instance.
Raises
------
DuplicateJailException
If jail name is already present.
"""
with self.__lock:
if name in self._jails:
if noduplicates:
raise DuplicateJailException(name)
else:
self._jails[name] = Jail(name, backend, db)
def exists(self, name):
return name in self._jails
def __getitem__(self, name):
try:
self.__lock.acquire()
return self._jails[name]
except KeyError:
raise UnknownJailException(name)
finally:
self.__lock.release()
def __delitem__(self, name):
try:
self.__lock.acquire()
del self._jails[name]
except KeyError:
raise UnknownJailException(name)
finally:
self.__lock.release()
def __len__(self):
try:
self.__lock.acquire()
return len(self._jails)
finally:
self.__lock.release()
def __iter__(self):
try:
self.__lock.acquire()
return iter(self._jails)
finally:
self.__lock.release()
| gpl-2.0 |
mancoast/CPythonPyc_test | crash/161_test_userlist.py | 1 | 2789 | # Check every path through every method of UserList
from UserList import UserList
l0 = []
l1 = [0]
l2 = [0, 1]
# Test constructors
u = UserList()
u0 = UserList(l0)
u1 = UserList(l1)
u2 = UserList(l2)
uu = UserList(u)
uu0 = UserList(u0)
uu1 = UserList(u1)
uu2 = UserList(u2)
v = UserList(tuple(u))
class OtherList:
def __init__(self, initlist):
self.__data = initlist
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
v0 = UserList(OtherList(u0))
vv = UserList("this is also a sequence")
# Test __repr__
assert str(u0) == str(l0)
assert repr(u1) == repr(l1)
assert `u2` == `l2`
# Test __cmp__ and __len__
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
if r > 0: return 1
return r
all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
assert mycmp(a, b) == mycmp(len(a), len(b))
# Test __getitem__
for i in range(len(u2)):
assert u2[i] == i
# Test __setitem__
uu2[0] = 0
uu2[1] = 100
try:
uu2[2] = 200
except IndexError:
pass
else:
assert 0, "uu2[2] shouldn't be assignable"
# Test __delitem__
del uu2[1]
del uu2[0]
try:
del uu2[0]
except IndexError:
pass
else:
assert 0, "uu2[0] shouldn't be deletable"
# Test __getslice__
for i in range(-3, 4):
assert u2[:i] == l2[:i]
assert u2[i:] == l2[i:]
for j in range(-3, 4):
assert u2[i:j] == l2[i:j]
# Test __setslice__
for i in range(-3, 4):
u2[:i] = l2[:i]
assert u2 == l2
u2[i:] = l2[i:]
assert u2 == l2
for j in range(-3, 4):
u2[i:j] = l2[i:j]
assert u2 == l2
uu2 = u2[:]
uu2[:0] = [-2, -1]
assert uu2 == [-2, -1, 0, 1]
uu2[0:] = []
assert uu2 == []
# Test __delslice__
uu2 = u2[:]
del uu2[1:2]
del uu2[0:1]
assert uu2 == []
uu2 = u2[:]
del uu2[1:]
del uu2[:1]
assert uu2 == []
# Test __add__, __radd__, __mul__ and __rmul__
assert u1 + [] == [] + u1 == u1
assert u1 + [1] == u2
assert [-1] + u1 == [-1, 0]
assert u2 == u2*1 == 1*u2
assert u2+u2 == u2*2 == 2*u2
assert u2+u2+u2 == u2*3 == 3*u2
# Test append
u = u1[:]
u.append(1)
assert u == u2
# Test insert
u = u2[:]
u.insert(0, -1)
assert u == [-1, 0, 1]
# Test pop
u = [-1] + u2
u.pop()
assert u == [-1, 0]
u.pop(0)
assert u == [0]
# Test remove
u = u2[:]
u.remove(1)
assert u == u1
# Test count
u = u2*3
assert u.count(0) == 3
assert u.count(1) == 3
assert u.count(2) == 0
# Test index
assert u2.index(0) == 0
assert u2.index(1) == 1
try:
u2.index(2)
except ValueError:
pass
else:
assert 0, "expected ValueError"
# Test reverse
u = u2[:]
u.reverse()
assert u == [1, 0]
u.reverse()
assert u == u2
# Test sort
u = UserList([1, 0])
u.sort()
assert u == u2
# Test extend
u = u1[:]
u.extend(u2)
assert u == u1 + u2
| gpl-3.0 |
miptliot/edx-platform | openedx/core/djangoapps/credit/views.py | 8 | 7758 | """
Views for the credit Django app.
"""
from __future__ import unicode_literals
import datetime
import logging
import pytz
from django.conf import settings
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_rest_framework_extensions.authentication import JwtAuthentication
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import generics, mixins, permissions, views, viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework_oauth.authentication import OAuth2Authentication
from openedx.core.djangoapps.credit.api import create_credit_request
from openedx.core.djangoapps.credit.exceptions import (
CreditApiBadRequest,
InvalidCourseKey,
InvalidCreditRequest,
UserNotEligibleException
)
from openedx.core.djangoapps.credit.models import (
CREDIT_PROVIDER_ID_REGEX,
CreditCourse,
CreditEligibility,
CreditProvider,
CreditRequest
)
from openedx.core.djangoapps.credit.serializers import (
CreditCourseSerializer,
CreditEligibilitySerializer,
CreditProviderCallbackSerializer,
CreditProviderSerializer
)
from openedx.core.lib.api.mixins import PutAsCreateMixin
from openedx.core.lib.api.permissions import IsStaffOrOwner
log = logging.getLogger(__name__)
AUTHENTICATION_CLASSES = (JwtAuthentication, OAuth2Authentication, SessionAuthentication,)
class CreditProviderViewSet(viewsets.ReadOnlyModelViewSet):
""" Credit provider endpoints. """
lookup_field = 'provider_id'
lookup_value_regex = CREDIT_PROVIDER_ID_REGEX
authentication_classes = AUTHENTICATION_CLASSES
pagination_class = None
permission_classes = (permissions.IsAuthenticated,)
queryset = CreditProvider.objects.all()
serializer_class = CreditProviderSerializer
def filter_queryset(self, queryset):
queryset = super(CreditProviderViewSet, self).filter_queryset(queryset)
# Filter by provider ID
provider_ids = self.request.GET.get('provider_ids', None)
if provider_ids:
provider_ids = provider_ids.split(',')
queryset = queryset.filter(provider_id__in=provider_ids)
return queryset
class CreditProviderRequestCreateView(views.APIView):
""" Creates a credit request for the given user and course, if the user is eligible for credit."""
authentication_classes = AUTHENTICATION_CLASSES
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
def post(self, request, provider_id):
""" POST handler. """
# Get the provider, or return HTTP 404 if it doesn't exist
provider = generics.get_object_or_404(CreditProvider, provider_id=provider_id)
# Validate the course key
course_key = request.data.get('course_key')
try:
course_key = CourseKey.from_string(course_key)
except InvalidKeyError:
raise InvalidCourseKey(course_key)
# Validate the username
username = request.data.get('username')
if not username:
raise ValidationError({'detail': 'A username must be specified.'})
# Ensure the user is actually eligible to receive credit
if not CreditEligibility.is_user_eligible_for_credit(course_key, username):
raise UserNotEligibleException(course_key, username)
try:
credit_request = create_credit_request(course_key, provider.provider_id, username)
return Response(credit_request)
except CreditApiBadRequest as ex:
raise InvalidCreditRequest(ex.message)
class CreditProviderCallbackView(views.APIView):
""" Callback used by credit providers to update credit request status. """
# This endpoint should be open to all external credit providers.
authentication_classes = ()
permission_classes = ()
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CreditProviderCallbackView, self).dispatch(request, *args, **kwargs)
def post(self, request, provider_id):
""" POST handler. """
provider = generics.get_object_or_404(CreditProvider, provider_id=provider_id)
data = request.data
# Ensure the input data is valid
serializer = CreditProviderCallbackSerializer(data=data, provider=provider)
serializer.is_valid(raise_exception=True)
# Update the credit request status
request_uuid = data['request_uuid']
new_status = data['status']
credit_request = generics.get_object_or_404(CreditRequest, uuid=request_uuid, provider=provider)
old_status = credit_request.status
credit_request.status = new_status
credit_request.save()
log.info(
'Updated [%s] CreditRequest [%s] from status [%s] to [%s].',
provider_id, request_uuid, old_status, new_status
)
return Response()
class CreditEligibilityView(generics.ListAPIView):
""" Returns eligibility for a user-course combination. """
authentication_classes = AUTHENTICATION_CLASSES
pagination_class = None
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner)
serializer_class = CreditEligibilitySerializer
queryset = CreditEligibility.objects.all()
def filter_queryset(self, queryset):
username = self.request.GET.get('username')
course_key = self.request.GET.get('course_key')
if not (username and course_key):
raise ValidationError(
{'detail': 'Both the course_key and username querystring parameters must be supplied.'})
course_key = unicode(course_key)
try:
course_key = CourseKey.from_string(course_key)
except InvalidKeyError:
raise ValidationError({'detail': '[{}] is not a valid course key.'.format(course_key)})
return queryset.filter(
username=username,
course__course_key=course_key,
deadline__gt=datetime.datetime.now(pytz.UTC)
)
class CreditCourseViewSet(PutAsCreateMixin, mixins.UpdateModelMixin, viewsets.ReadOnlyModelViewSet):
""" CreditCourse endpoints. """
lookup_field = 'course_key'
lookup_value_regex = settings.COURSE_KEY_REGEX
queryset = CreditCourse.objects.all()
serializer_class = CreditCourseSerializer
authentication_classes = AUTHENTICATION_CLASSES
permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser)
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
# This CSRF exemption only applies when authenticating without SessionAuthentication.
# SessionAuthentication will enforce CSRF protection.
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CreditCourseViewSet, self).dispatch(request, *args, **kwargs)
def get_object(self):
# Convert the serialized course key into a CourseKey instance
# so we can look up the object.
course_key = self.kwargs.get(self.lookup_field)
if course_key is not None:
self.kwargs[self.lookup_field] = CourseKey.from_string(course_key)
return super(CreditCourseViewSet, self).get_object()
| agpl-3.0 |
brandond/ansible | lib/ansible/modules/cloud/vmware/vmware_vmkernel.py | 5 | 47337 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2017-18, Ansible Project
# Copyright: (c) 2017-18, Abhijeet Kasurde <[email protected]>
# Copyright: (c) 2018, Christian Kotte <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_vmkernel
short_description: Manages a VMware VMkernel Adapter of an ESXi host.
description:
- This module can be used to manage the VMKernel adapters / VMKernel network interfaces of an ESXi host.
- The module assumes that the host is already configured with the Port Group in case of a vSphere Standard Switch (vSS).
- The module assumes that the host is already configured with the Distributed Port Group in case of a vSphere Distributed Switch (vDS).
- The module automatically migrates the VMKernel adapter from vSS to vDS or vice versa if present.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Russell Teague (@mtnbikenc)
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- The option C(device) need to be used with DHCP because otherwise it's not possible to check if a VMkernel device is already present
- You can only change from DHCP to static, and vSS to vDS, or vice versa, in one step, without creating a new device, with C(device) specified.
- You can only create the VMKernel adapter on a vDS if authenticated to vCenter and not if authenticated to ESXi.
- Tested on vSphere 5.5 and 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vswitch_name:
description:
- The name of the vSwitch where to add the VMKernel interface.
- Required parameter only if C(state) is set to C(present).
- Optional parameter from version 2.5 and onwards.
type: str
aliases: ['vswitch']
dvswitch_name:
description:
- The name of the vSphere Distributed Switch (vDS) where to add the VMKernel interface.
- Required parameter only if C(state) is set to C(present).
- Optional parameter from version 2.8 and onwards.
type: str
aliases: ['dvswitch']
version_added: 2.8
portgroup_name:
description:
- The name of the port group for the VMKernel interface.
required: True
aliases: ['portgroup']
network:
description:
- A dictionary of network details.
- 'The following parameter is required:'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)).'
- 'The following parameters are required in case of C(type) is set to C(static):'
- ' - C(ip_address) (string): Static IP address (implies C(type: static)).'
- ' - C(subnet_mask) (string): Static netmask required for C(ip_address).'
- 'The following parameter is optional in case of C(type) is set to C(static):'
- ' - C(default_gateway) (string): Default gateway (Override default gateway for this adapter).'
- 'The following parameter is optional:'
- ' - C(tcpip_stack) (string): The TCP/IP stack for the VMKernel interface. Can be default, provisioning, vmotion, or vxlan. (default: default)'
type: dict
default: {
type: 'static',
tcpip_stack: 'default',
}
version_added: 2.5
ip_address:
description:
- The IP Address for the VMKernel interface.
- Use C(network) parameter with C(ip_address) instead.
- Deprecated option, will be removed in version 2.9.
subnet_mask:
description:
- The Subnet Mask for the VMKernel interface.
- Use C(network) parameter with C(subnet_mask) instead.
- Deprecated option, will be removed in version 2.9.
mtu:
description:
- The MTU for the VMKernel interface.
- The default value of 1500 is valid from version 2.5 and onwards.
default: 1500
device:
description:
- Search VMkernel adapter by device name.
- The parameter is required only in case of C(type) is set to C(dhcp).
version_added: 2.8
enable_vsan:
description:
- Enable VSAN traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
enable_vmotion:
description:
- Enable vMotion traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
- You cannot enable vMotion on an additional adapter if you already have an adapter with the vMotion TCP/IP stack configured.
type: bool
enable_mgmt:
description:
- Enable Management traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
enable_ft:
description:
- Enable Fault Tolerance traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
enable_provisioning:
description:
- Enable Provisioning traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
version_added: 2.8
enable_replication:
description:
- Enable vSphere Replication traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
version_added: 2.8
enable_replication_nfc:
description:
- Enable vSphere Replication NFC traffic on the VMKernel adapter.
- This option is only allowed if the default TCP/IP stack is used.
type: bool
version_added: 2.8
state:
description:
- If set to C(present), the VMKernel adapter will be created with the given specifications.
- If set to C(absent), the VMKernel adapter will be removed.
- If set to C(present) and VMKernel adapter exists, the configurations will be updated.
choices: [ present, absent ]
default: present
version_added: 2.5
esxi_hostname:
description:
- Name of ESXi host to which VMKernel is to be managed.
- "From version 2.5 onwards, this parameter is required."
required: True
version_added: 2.5
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add Management vmkernel port using static network type
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
vswitch_name: vSwitch0
portgroup_name: PG_0001
network:
type: 'static'
ip_address: 192.168.127.10
subnet_mask: 255.255.255.0
state: present
enable_mgmt: True
delegate_to: localhost
- name: Add Management vmkernel port using DHCP network type
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
vswitch_name: vSwitch0
portgroup_name: PG_0002
state: present
network:
type: 'dhcp'
enable_mgmt: True
delegate_to: localhost
- name: Change IP allocation from static to dhcp
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
vswitch_name: vSwitch0
portgroup_name: PG_0002
state: present
device: vmk1
network:
type: 'dhcp'
enable_mgmt: True
delegate_to: localhost
- name: Delete VMkernel port
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
vswitch_name: vSwitch0
portgroup_name: PG_0002
state: absent
delegate_to: localhost
- name: Add Management vmkernel port to Distributed Switch
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
dvswitch_name: dvSwitch1
portgroup_name: dvPG_0001
network:
type: 'static'
ip_address: 192.168.127.10
subnet_mask: 255.255.255.0
state: present
enable_mgmt: True
delegate_to: localhost
- name: Add vMotion vmkernel port with vMotion TCP/IP stack
vmware_vmkernel:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
esxi_hostname: '{{ esxi_hostname }}'
dvswitch_name: dvSwitch1
portgroup_name: dvPG_0001
network:
type: 'static'
ip_address: 192.168.127.10
subnet_mask: 255.255.255.0
tcpip_stack: vmotion
state: present
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about VMKernel name
returned: always
type: dict
sample: {
"changed": false,
"msg": "VMkernel Adapter already configured properly",
"device": "vmk1",
"ipv4": "static",
"ipv4_gw": "No override",
"ipv4_ip": "192.168.1.15",
"ipv4_sm": "255.255.255.0",
"mtu": 9000,
"services": "vMotion",
"switch": "vDS"
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (
PyVmomi, TaskError, vmware_argument_spec, wait_for_task,
find_dvspg_by_name, find_dvs_by_name, get_all_objs
)
from ansible.module_utils._text import to_native
class PyVmomiHelper(PyVmomi):
"""Class to manage VMkernel configuration of an ESXi host system"""
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
if self.params['network']:
self.network_type = self.params['network'].get('type')
self.ip_address = self.params['network'].get('ip_address', None)
self.subnet_mask = self.params['network'].get('subnet_mask', None)
self.default_gateway = self.params['network'].get('default_gateway', None)
if self.network_type == 'static':
if not self.ip_address:
module.fail_json(msg="ip_address is a required parameter when network type is set to 'static'")
if not self.subnet_mask:
module.fail_json(msg="subnet_mask is a required parameter when network type is set to 'static'")
self.tcpip_stack = self.params['network'].get('tcpip_stack')
else:
self.network_type = 'dhcp'
self.ip_address = None
self.subnet_mask = None
self.default_gateway = None
self.tcpip_stack = 'default'
self.device = self.params['device']
if self.network_type == 'dhcp' and not self.device:
module.fail_json(msg="device is a required parameter when network type is set to 'dhcp'")
self.mtu = self.params['mtu']
self.enable_vsan = self.params['enable_vsan']
self.enable_vmotion = self.params['enable_vmotion']
self.enable_mgmt = self.params['enable_mgmt']
self.enable_ft = self.params['enable_ft']
self.enable_provisioning = self.params['enable_provisioning']
self.enable_replication = self.params['enable_replication']
self.enable_replication_nfc = self.params['enable_replication_nfc']
self.vswitch_name = self.params['vswitch_name']
self.vds_name = self.params['dvswitch_name']
self.port_group_name = self.params['portgroup_name']
self.esxi_host_name = self.params['esxi_hostname']
hosts = self.get_all_host_objs(esxi_host_name=self.esxi_host_name)
if hosts:
self.esxi_host_obj = hosts[0]
else:
self.module.fail_json(
msg="Failed to get details of ESXi server. Please specify esxi_hostname."
)
# find Port Group
if self.vswitch_name:
self.port_group_obj = self.get_port_group_by_name(
host_system=self.esxi_host_obj,
portgroup_name=self.port_group_name,
vswitch_name=self.vswitch_name
)
if not self.port_group_obj:
module.fail_json(msg="Portgroup '%s' not found on vSS '%s'" % (self.port_group_name, self.vswitch_name))
elif self.vds_name:
self.dv_switch_obj = find_dvs_by_name(self.content, self.vds_name)
if not self.dv_switch_obj:
module.fail_json(msg="vDS '%s' not found" % self.vds_name)
self.port_group_obj = find_dvspg_by_name(self.dv_switch_obj, self.port_group_name)
if not self.port_group_obj:
module.fail_json(msg="Portgroup '%s' not found on vDS '%s'" % (self.port_group_name, self.vds_name))
# find VMkernel Adapter
if self.device:
self.vnic = self.get_vmkernel_by_device(device_name=self.device)
else:
# config change (e.g. DHCP to static, or vice versa); doesn't work with virtual port change
self.vnic = self.get_vmkernel_by_portgroup_new(port_group_name=self.port_group_name)
if not self.vnic:
if self.network_type == 'static':
# vDS to vSS or vSS to vSS (static IP)
self.vnic = self.get_vmkernel_by_ip(ip_address=self.ip_address)
elif self.network_type == 'dhcp':
# vDS to vSS or vSS to vSS (DHCP)
self.vnic = self.get_vmkernel_by_device(device_name=self.device)
def get_port_group_by_name(self, host_system, portgroup_name, vswitch_name):
"""
Get specific port group by given name
Args:
host_system: Name of Host System
portgroup_name: Name of Port Group
vswitch_name: Name of the vSwitch
Returns: List of port groups by given specifications
"""
portgroups = self.get_all_port_groups_by_host(host_system=host_system)
for portgroup in portgroups:
if portgroup.spec.vswitchName == vswitch_name and portgroup.spec.name == portgroup_name:
return portgroup
return None
def ensure(self):
"""
Manage internal VMKernel management
Returns: NA
"""
host_vmk_states = {
'absent': {
'present': self.host_vmk_delete,
'absent': self.host_vmk_unchange,
},
'present': {
'present': self.host_vmk_update,
'absent': self.host_vmk_create,
}
}
try:
host_vmk_states[self.module.params['state']][self.check_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
def get_vmkernel_by_portgroup_new(self, port_group_name=None):
"""
Check if vmkernel available or not
Args:
port_group_name: name of port group
Returns: vmkernel managed object if vmkernel found, false if not
"""
for vnic in self.esxi_host_obj.config.network.vnic:
# check if it's a vSS Port Group
if vnic.spec.portgroup == port_group_name:
return vnic
# check if it's a vDS Port Group
try:
if vnic.spec.distributedVirtualPort.portgroupKey == self.port_group_obj.key:
return vnic
except AttributeError:
pass
return False
def get_vmkernel_by_ip(self, ip_address):
"""
Check if vmkernel available or not
Args:
ip_address: IP address of vmkernel device
Returns: vmkernel managed object if vmkernel found, false if not
"""
vnics = [vnic for vnic in self.esxi_host_obj.config.network.vnic if vnic.spec.ip.ipAddress == ip_address]
if vnics:
return vnics[0]
return None
def get_vmkernel_by_device(self, device_name):
"""
Check if vmkernel available or not
Args:
device_name: name of vmkernel device
Returns: vmkernel managed object if vmkernel found, false if not
"""
vnics = [vnic for vnic in self.esxi_host_obj.config.network.vnic if vnic.device == device_name]
if vnics:
return vnics[0]
return None
def check_state(self):
"""
Check internal state management
Returns: Present if found and absent if not found
"""
state = 'absent'
if self.vnic:
state = 'present'
return state
def host_vmk_delete(self):
"""
Delete VMKernel
Returns: NA
"""
results = dict(changed=False, msg='')
vmk_device = self.vnic.device
try:
if self.module.check_mode:
results['msg'] = "VMkernel Adapter would be deleted"
else:
self.esxi_host_obj.configManager.networkSystem.RemoveVirtualNic(vmk_device)
results['msg'] = "VMkernel Adapter deleted"
results['changed'] = True
results['device'] = vmk_device
except vim.fault.NotFound as not_found:
self.module.fail_json(
msg="Failed to find vmk to delete due to %s" %
to_native(not_found.msg)
)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(
msg="Failed to delete vmk due host config issues : %s" %
to_native(host_config_fault.msg)
)
self.module.exit_json(**results)
def host_vmk_unchange(self):
"""
Denote no change in VMKernel
Returns: NA
"""
self.module.exit_json(changed=False)
def host_vmk_update(self):
"""
Function to update VMKernel with given parameters
Returns: NA
"""
changed = changed_settings = changed_vds = changed_services = \
changed_service_vmotion = changed_service_mgmt = changed_service_ft = \
changed_service_vsan = changed_service_prov = changed_service_rep = changed_service_rep_nfc = False
changed_list = []
results = dict(changed=False, msg='')
results['tcpip_stack'] = self.tcpip_stack
net_stack_instance_key = self.get_api_net_stack_instance(self.tcpip_stack)
if self.vnic.spec.netStackInstanceKey != net_stack_instance_key:
self.module.fail_json(msg="The TCP/IP stack cannot be changed on an existing VMkernel adapter!")
# Check MTU
results['mtu'] = self.mtu
if self.vnic.spec.mtu != self.mtu:
changed_settings = True
changed_list.append("MTU")
results['mtu_previous'] = self.vnic.spec.mtu
# Check IPv4 settings
results['ipv4'] = self.network_type
results['ipv4_ip'] = self.ip_address
results['ipv4_sm'] = self.subnet_mask
if self.default_gateway:
results['ipv4_gw'] = self.default_gateway
else:
results['ipv4_gw'] = "No override"
if self.vnic.spec.ip.dhcp:
if self.network_type == 'static':
changed_settings = True
changed_list.append("IPv4 settings")
results['ipv4_previous'] = "DHCP"
if not self.vnic.spec.ip.dhcp:
if self.network_type == 'dhcp':
changed_settings = True
changed_list.append("IPv4 settings")
results['ipv4_previous'] = "static"
elif self.network_type == 'static':
if self.ip_address != self.vnic.spec.ip.ipAddress:
changed_settings = True
changed_list.append("IP")
results['ipv4_ip_previous'] = self.vnic.spec.ip.ipAddress
if self.subnet_mask != self.vnic.spec.ip.subnetMask:
changed_settings = True
changed_list.append("SM")
results['ipv4_sm_previous'] = self.vnic.spec.ip.subnetMask
if self.default_gateway:
try:
if self.default_gateway != self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway:
changed_settings = True
changed_list.append("GW override")
results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway
except AttributeError:
changed_settings = True
changed_list.append("GW override")
results['ipv4_gw_previous'] = "No override"
else:
try:
if self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway:
changed_settings = True
changed_list.append("GW override")
results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway
except AttributeError:
pass
# Check virtual port (vSS or vDS)
results['portgroup'] = self.port_group_name
dvs_uuid = None
if self.vswitch_name:
results['switch'] = self.vswitch_name
try:
if self.vnic.spec.distributedVirtualPort.switchUuid:
changed_vds = True
changed_list.append("Virtual Port")
dvs_uuid = self.vnic.spec.distributedVirtualPort.switchUuid
except AttributeError:
pass
if changed_vds:
results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid)
self.dv_switch_obj = find_dvs_by_name(self.content, results['switch_previous'])
results['portgroup_previous'] = self.find_dvspg_by_key(
self.dv_switch_obj, self.vnic.spec.distributedVirtualPort.portgroupKey
)
elif self.vds_name:
results['switch'] = self.vds_name
try:
if self.vnic.spec.distributedVirtualPort.switchUuid != self.dv_switch_obj.uuid:
changed_vds = True
changed_list.append("Virtual Port")
dvs_uuid = self.vnic.spec.distributedVirtualPort.switchUuid
except AttributeError:
changed_vds = True
changed_list.append("Virtual Port")
if changed_vds:
results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid)
results['portgroup_previous'] = self.vnic.spec.portgroup
portgroups = self.get_all_port_groups_by_host(host_system=self.esxi_host_obj)
for portgroup in portgroups:
if portgroup.spec.name == self.vnic.spec.portgroup:
results['switch_previous'] = portgroup.spec.vswitchName
results['services'] = self.create_enabled_services_string()
# Check configuration of service types (only if default TCP/IP stack is used)
if self.vnic.spec.netStackInstanceKey == 'defaultTcpipStack':
service_type_vmks = self.get_all_vmks_by_service_type()
if (self.enable_vmotion and self.vnic.device not in service_type_vmks['vmotion']) or \
(not self.enable_vmotion and self.vnic.device in service_type_vmks['vmotion']):
changed_services = changed_service_vmotion = True
if (self.enable_mgmt and self.vnic.device not in service_type_vmks['management']) or \
(not self.enable_mgmt and self.vnic.device in service_type_vmks['management']):
changed_services = changed_service_mgmt = True
if (self.enable_ft and self.vnic.device not in service_type_vmks['faultToleranceLogging']) or \
(not self.enable_ft and self.vnic.device in service_type_vmks['faultToleranceLogging']):
changed_services = changed_service_ft = True
if (self.enable_vsan and self.vnic.device not in service_type_vmks['vsan']) or \
(not self.enable_vsan and self.vnic.device in service_type_vmks['vsan']):
changed_services = changed_service_vsan = True
if (self.enable_provisioning and self.vnic.device not in service_type_vmks['vSphereProvisioning']) or \
(not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereProvisioning']):
changed_services = changed_service_prov = True
if (self.enable_replication and self.vnic.device not in service_type_vmks['vSphereReplication']) or \
(not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereReplication']):
changed_services = changed_service_rep = True
if (self.enable_replication_nfc and self.vnic.device not in service_type_vmks['vSphereReplicationNFC']) or \
(not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereReplicationNFC']):
changed_services = changed_service_rep_nfc = True
if changed_services:
changed_list.append("services")
if changed_settings or changed_vds or changed_services:
changed = True
if self.module.check_mode:
changed_suffix = ' would be updated'
else:
changed_suffix = ' updated'
if len(changed_list) > 2:
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
elif len(changed_list) == 2:
message = ' and '.join(changed_list)
elif len(changed_list) == 1:
message = changed_list[0]
message = "VMkernel Adapter " + message + changed_suffix
if changed_settings or changed_vds:
vnic_config = vim.host.VirtualNic.Specification()
ip_spec = vim.host.IpConfig()
if self.network_type == 'dhcp':
ip_spec.dhcp = True
else:
ip_spec.dhcp = False
ip_spec.ipAddress = self.ip_address
ip_spec.subnetMask = self.subnet_mask
if self.default_gateway:
vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway
else:
vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
vnic_config.ip = ip_spec
vnic_config.mtu = self.mtu
if changed_vds:
if self.vswitch_name:
vnic_config.portgroup = self.port_group_name
elif self.vds_name:
vnic_config.distributedVirtualPort = vim.dvs.PortConnection()
vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid
vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key
try:
if not self.module.check_mode:
self.esxi_host_obj.configManager.networkSystem.UpdateVirtualNic(self.vnic.device, vnic_config)
except vim.fault.NotFound as not_found:
self.module.fail_json(
msg="Failed to update vmk as virtual network adapter cannot be found %s" %
to_native(not_found.msg)
)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(
msg="Failed to update vmk due to host config issues : %s" %
to_native(host_config_fault.msg)
)
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(
msg="Failed to update vmk as ipv6 address is specified in an ipv4 only system : %s" %
to_native(invalid_state.msg)
)
except vmodl.fault.InvalidArgument as invalid_arg:
self.module.fail_json(
msg="Failed to update vmk as IP address or Subnet Mask in the IP configuration"
"are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg)
)
if changed_services:
changed_list.append("Services")
services_previous = []
vnic_manager = self.esxi_host_obj.configManager.virtualNicManager
if changed_service_mgmt:
if self.vnic.device in service_type_vmks['management']:
services_previous.append('Mgmt')
operation = 'select' if self.enable_mgmt else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='management', operation=operation
)
if changed_service_vmotion:
if self.vnic.device in service_type_vmks['vmotion']:
services_previous.append('vMotion')
operation = 'select' if self.enable_vmotion else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='vmotion', operation=operation
)
if changed_service_ft:
if self.vnic.device in service_type_vmks['faultToleranceLogging']:
services_previous.append('FT')
operation = 'select' if self.enable_ft else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='faultToleranceLogging', operation=operation
)
if changed_service_prov:
if self.vnic.device in service_type_vmks['vSphereProvisioning']:
services_previous.append('Prov')
operation = 'select' if self.enable_provisioning else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereProvisioning', operation=operation
)
if changed_service_rep:
if self.vnic.device in service_type_vmks['vSphereReplication']:
services_previous.append('Repl')
operation = 'select' if self.enable_replication else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplication', operation=operation
)
if changed_service_rep_nfc:
if self.vnic.device in service_type_vmks['vSphereReplicationNFC']:
services_previous.append('Repl_NFC')
operation = 'select' if self.enable_replication_nfc else 'deselect'
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplicationNFC', operation=operation
)
if changed_service_vsan:
if self.vnic.device in service_type_vmks['vsan']:
services_previous.append('VSAN')
if self.enable_vsan:
results['vsan'] = self.set_vsan_service_type()
else:
self.set_service_type(
vnic_manager=vnic_manager, vmk=self.vnic, service_type='vsan', operation=operation
)
results['services_previous'] = ', '.join(services_previous)
else:
message = "VMkernel Adapter already configured properly"
results['changed'] = changed
results['msg'] = message
results['device'] = self.vnic.device
self.module.exit_json(**results)
def find_dvs_by_uuid(self, uuid):
"""
Find DVS by UUID
Returns: DVS name
"""
dvs_list = get_all_objs(self.content, [vim.DistributedVirtualSwitch])
for dvs in dvs_list:
if dvs.uuid == uuid:
return dvs.summary.name
return None
def find_dvspg_by_key(self, dv_switch, portgroup_key):
"""
Find dvPortgroup by key
Returns: dvPortgroup name
"""
portgroups = dv_switch.portgroup
for portgroup in portgroups:
if portgroup.key == portgroup_key:
return portgroup.name
return None
def set_vsan_service_type(self):
"""
Set VSAN service type
Returns: result of UpdateVsan_Task
"""
result = None
vsan_system = self.esxi_host_obj.configManager.vsanSystem
vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()
vsan_port_config.device = self.vnic.device
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
vsan_config.networkInfo.port = [vsan_port_config]
if not self.module.check_mode:
try:
vsan_task = vsan_system.UpdateVsan_Task(vsan_config)
wait_for_task(vsan_task)
except TaskError as task_err:
self.module.fail_json(
msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err))
)
return result
def host_vmk_create(self):
"""
Create VMKernel
Returns: NA
"""
results = dict(changed=False, message='')
if self.vswitch_name:
results['switch'] = self.vswitch_name
elif self.vds_name:
results['switch'] = self.vds_name
results['portgroup'] = self.port_group_name
vnic_config = vim.host.VirtualNic.Specification()
ip_spec = vim.host.IpConfig()
results['ipv4'] = self.network_type
if self.network_type == 'dhcp':
ip_spec.dhcp = True
else:
ip_spec.dhcp = False
results['ipv4_ip'] = self.ip_address
results['ipv4_sm'] = self.subnet_mask
ip_spec.ipAddress = self.ip_address
ip_spec.subnetMask = self.subnet_mask
if self.default_gateway:
vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway
vnic_config.ip = ip_spec
results['mtu'] = self.mtu
vnic_config.mtu = self.mtu
results['tcpip_stack'] = self.tcpip_stack
vnic_config.netStackInstanceKey = self.get_api_net_stack_instance(self.tcpip_stack)
vmk_device = None
try:
if self.module.check_mode:
results['msg'] = "VMkernel Adapter would be created"
else:
if self.vswitch_name:
vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(
self.port_group_name,
vnic_config
)
elif self.vds_name:
vnic_config.distributedVirtualPort = vim.dvs.PortConnection()
vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid
vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key
vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(portgroup="", nic=vnic_config)
results['msg'] = "VMkernel Adapter created"
results['changed'] = True
results['device'] = vmk_device
if self.network_type != 'dhcp':
if self.default_gateway:
results['ipv4_gw'] = self.default_gateway
else:
results['ipv4_gw'] = "No override"
results['services'] = self.create_enabled_services_string()
except vim.fault.AlreadyExists as already_exists:
self.module.fail_json(
msg="Failed to add vmk as portgroup already has a virtual network adapter %s" %
to_native(already_exists.msg)
)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(
msg="Failed to add vmk due to host config issues : %s" %
to_native(host_config_fault.msg)
)
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(
msg="Failed to add vmk as ipv6 address is specified in an ipv4 only system : %s" %
to_native(invalid_state.msg)
)
except vmodl.fault.InvalidArgument as invalid_arg:
self.module.fail_json(
msg="Failed to add vmk as IP address or Subnet Mask in the IP configuration "
"are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg)
)
# do service type configuration
if self.tcpip_stack == 'default' and not all(
option is False for option in [self.enable_vsan, self.enable_vmotion,
self.enable_mgmt, self.enable_ft,
self.enable_provisioning, self.enable_replication,
self.enable_replication_nfc]):
self.vnic = self.get_vmkernel_by_device(device_name=vmk_device)
# VSAN
if self.enable_vsan:
results['vsan'] = self.set_vsan_service_type()
# Other service type
host_vnic_manager = self.esxi_host_obj.configManager.virtualNicManager
if self.enable_vmotion:
self.set_service_type(host_vnic_manager, self.vnic, 'vmotion')
if self.enable_mgmt:
self.set_service_type(host_vnic_manager, self.vnic, 'management')
if self.enable_ft:
self.set_service_type(host_vnic_manager, self.vnic, 'faultToleranceLogging')
if self.enable_provisioning:
self.set_service_type(host_vnic_manager, self.vnic, 'vSphereProvisioning')
if self.enable_replication:
self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplication')
if self.enable_replication_nfc:
self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplicationNFC')
self.module.exit_json(**results)
def set_service_type(self, vnic_manager, vmk, service_type, operation='select'):
"""
Set service type to given VMKernel
Args:
vnic_manager: Virtual NIC manager object
vmk: VMkernel managed object
service_type: Name of service type
operation: Select to select service type, deselect to deselect service type
"""
try:
if operation == 'select':
if not self.module.check_mode:
vnic_manager.SelectVnicForNicType(service_type, vmk.device)
elif operation == 'deselect':
if not self.module.check_mode:
vnic_manager.DeselectVnicForNicType(service_type, vmk.device)
except vmodl.fault.InvalidArgument as invalid_arg:
self.module.fail_json(
msg="Failed to %s VMK service type '%s' on '%s' due to : %s" %
(operation, service_type, vmk.device, to_native(invalid_arg.msg))
)
def get_all_vmks_by_service_type(self):
"""
Return information about service types and VMKernel
Returns: Dictionary of service type as key and VMKernel list as value
"""
service_type_vmk = dict(
vmotion=[],
vsan=[],
management=[],
faultToleranceLogging=[],
vSphereProvisioning=[],
vSphereReplication=[],
vSphereReplicationNFC=[],
)
for service_type in list(service_type_vmk):
vmks_list = self.query_service_type_for_vmks(service_type)
service_type_vmk[service_type] = vmks_list
return service_type_vmk
def query_service_type_for_vmks(self, service_type):
"""
Return list of VMKernels
Args:
service_type: Name of service type
Returns: List of VMKernel which belongs to that service type
"""
vmks_list = []
query = None
try:
query = self.esxi_host_obj.configManager.virtualNicManager.QueryNetConfig(service_type)
except vim.fault.HostConfigFault as config_fault:
self.module.fail_json(
msg="Failed to get all VMKs for service type %s due to host config fault : %s" %
(service_type, to_native(config_fault.msg))
)
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(
msg="Failed to get all VMKs for service type %s due to invalid arguments : %s" %
(service_type, to_native(invalid_argument.msg))
)
if not query.selectedVnic:
return vmks_list
selected_vnics = [vnic for vnic in query.selectedVnic]
vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics]
return vnics_with_service_type
def create_enabled_services_string(self):
"""Create services list"""
services = []
if self.enable_mgmt:
services.append('Mgmt')
if self.enable_vmotion:
services.append('vMotion')
if self.enable_ft:
services.append('FT')
if self.enable_vsan:
services.append('VSAN')
if self.enable_provisioning:
services.append('Prov')
if self.enable_replication:
services.append('Repl')
if self.enable_replication_nfc:
services.append('Repl_NFC')
return ', '.join(services)
@staticmethod
def get_api_net_stack_instance(tcpip_stack):
"""Get TCP/IP stack instance name or key"""
net_stack_instance = None
if tcpip_stack == 'default':
net_stack_instance = 'defaultTcpipStack'
elif tcpip_stack == 'provisioning':
net_stack_instance = 'vSphereProvisioning'
# vmotion and vxlan stay the same
elif tcpip_stack == 'vmotion':
net_stack_instance = 'vmotion'
elif tcpip_stack == 'vxlan':
net_stack_instance = 'vxlan'
elif tcpip_stack == 'defaultTcpipStack':
net_stack_instance = 'default'
elif tcpip_stack == 'vSphereProvisioning':
net_stack_instance = 'provisioning'
# vmotion and vxlan stay the same
elif tcpip_stack == 'vmotion':
net_stack_instance = 'vmotion'
elif tcpip_stack == 'vxlan':
net_stack_instance = 'vxlan'
return net_stack_instance
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
esxi_hostname=dict(required=True, type='str'),
portgroup_name=dict(required=True, type='str', aliases=['portgroup']),
ip_address=dict(removed_in_version=2.9, type='str'),
subnet_mask=dict(removed_in_version=2.9, type='str'),
mtu=dict(required=False, type='int', default=1500),
device=dict(type='str'),
enable_vsan=dict(required=False, type='bool', default=False),
enable_vmotion=dict(required=False, type='bool', default=False),
enable_mgmt=dict(required=False, type='bool', default=False),
enable_ft=dict(required=False, type='bool', default=False),
enable_provisioning=dict(type='bool', default=False),
enable_replication=dict(type='bool', default=False),
enable_replication_nfc=dict(type='bool', default=False),
vswitch_name=dict(required=False, type='str', aliases=['vswitch']),
dvswitch_name=dict(required=False, type='str', aliases=['dvswitch']),
network=dict(
type='dict',
options=dict(
type=dict(type='str', default='static', choices=['static', 'dhcp']),
ip_address=dict(type='str'),
subnet_mask=dict(type='str'),
default_gateway=dict(type='str'),
tcpip_stack=dict(type='str', default='default', choices=['default', 'provisioning', 'vmotion', 'vxlan']),
),
default=dict(
type='static',
tcpip_stack='default',
),
),
state=dict(
type='str',
default='present',
choices=['absent', 'present']
),
))
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['vswitch_name', 'dvswitch_name'],
['tcpip_stack', 'enable_vsan'],
['tcpip_stack', 'enable_vmotion'],
['tcpip_stack', 'enable_mgmt'],
['tcpip_stack', 'enable_ft'],
['tcpip_stack', 'enable_provisioning'],
['tcpip_stack', 'enable_replication'],
['tcpip_stack', 'enable_replication_nfc'],
],
required_one_of=[
['vswitch_name', 'dvswitch_name'],
['portgroup_name', 'device'],
],
required_if=[
['state', 'present', ['portgroup_name']],
['state', 'absent', ['device']]
],
supports_check_mode=True)
pyv = PyVmomiHelper(module)
pyv.ensure()
if __name__ == '__main__':
main()
| gpl-3.0 |
geekboxzone/lollipop_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/android_browser_finder_unittest.py | 25 | 3287 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core.platform import android_device
from telemetry.core.platform import android_platform_backend
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.unittest import system_stub
class AndroidBrowserFinderTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_browser_finder,
['adb_commands', 'os', 'subprocess',
'logging'])
self._android_device_stub = system_stub.Override(
android_device, ['adb_commands'])
self._apb_stub = system_stub.Override(
android_platform_backend, ['adb_commands'])
def tearDown(self):
self._stubs.Restore()
self._android_device_stub.Restore()
self._apb_stub.Restore()
def test_no_adb(self):
finder_options = browser_options.BrowserFinderOptions()
def NoAdb(*args, **kargs): # pylint: disable=W0613
raise OSError('not found')
self._stubs.subprocess.Popen = NoAdb
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(0, len(browsers))
def test_adb_no_devices(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(0, len(browsers))
def test_adb_permissions_error(self):
finder_options = browser_options.BrowserFinderOptions()
self._stubs.subprocess.Popen.communicate_result = (
"""List of devices attached
????????????\tno permissions""",
"""* daemon not running. starting it now on port 5037 *
* daemon started successfully *
""")
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(3, len(self._stubs.logging.warnings))
self.assertEquals(0, len(browsers))
def test_adb_two_devices(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = [
'015d14fec128220c', '015d14fec128220d']
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(1, len(self._stubs.logging.warnings))
self.assertEquals(0, len(browsers))
@benchmark.Disabled('chromeos')
def test_adb_one_device(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = (
['015d14fec128220c'])
def OnPM(args):
assert args[0] == 'pm'
assert args[1] == 'list'
assert args[2] == 'packages'
return ['package:org.chromium.content_shell_apk',
'package.com.google.android.setupwizard']
def OnLs(_):
return ['/sys/devices/system/cpu/cpu0']
self._apb_stub.adb_commands.adb_device.shell_command_handlers['pm'] = OnPM
self._apb_stub.adb_commands.adb_device.shell_command_handlers['ls'] = OnLs
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(1, len(browsers))
| bsd-3-clause |
jkmaxwell/dash-jetnoise | requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-3.0 |
axbaretto/beam | learning/katas/python/Core Transforms/Partition/Partition/tests.py | 7 | 1455 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_helper import failed, passed, get_file_output, test_is_not_empty
def test_output():
output = get_file_output()
numbers_greater_than_100 = ['110', '150', '250']
remaining_numbers = ['1', '2', '3', '4', '5', '100']
answers = []
for num in numbers_greater_than_100:
answers.append('Number > 100: ' + num)
for num in remaining_numbers:
answers.append('Number <= 100: ' + num)
if all(num in output for num in answers):
passed()
else:
failed("Incorrect output. Partition the numbers accordingly.")
if __name__ == '__main__':
test_is_not_empty()
test_output()
| apache-2.0 |
gangadharkadam/verveerp | erpnext/accounts/doctype/pos_setting/pos_setting.py | 6 | 2364 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cint
from frappe.model.document import Document
class POSSetting(Document):
def validate(self):
self.check_for_duplicate()
self.validate_expense_account()
self.validate_all_link_fields()
def check_for_duplicate(self):
res = frappe.db.sql("""select name, user from `tabPOS Setting`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.user, self.name, self.company))
if res:
if res[0][1]:
msgprint(_("POS Setting {0} already created for user: {1} and company {2}").format(res[0][0],
res[0][1], self.company), raise_exception=1)
else:
msgprint(_("Global POS Setting {0} already created for company {1}").format(res[0][0],
self.company), raise_exception=1)
def validate_expense_account(self):
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and not self.expense_account:
msgprint(_("Expense Account is mandatory"), raise_exception=1)
def validate_all_link_fields(self):
accounts = {"Account": [self.cash_bank_account, self.income_account,
self.expense_account], "Cost Center": [self.cost_center],
"Warehouse": [self.warehouse]}
for link_dt, dn_list in accounts.items():
for link_dn in dn_list:
if link_dn and not frappe.db.exists({"doctype": link_dt,
"company": self.company, "name": link_dn}):
frappe.throw(_("{0} does not belong to Company {1}").format(link_dn, self.company))
def on_update(self):
self.set_defaults()
def on_trash(self):
self.set_defaults(include_current_pos=False)
def set_defaults(self, include_current_pos=True):
frappe.defaults.clear_default("is_pos")
if not include_current_pos:
condition = " where name != '%s'" % self.name.replace("'", "\'")
else:
condition = ""
pos_view_users = frappe.db.sql_list("""select user
from `tabPOS Setting` {0}""".format(condition))
for user in pos_view_users:
if user:
frappe.defaults.set_user_default("is_pos", 1, user)
else:
frappe.defaults.set_global_default("is_pos", 1)
@frappe.whitelist()
def get_series():
return frappe.get_meta("Sales Invoice").get_field("naming_series").options or ""
| agpl-3.0 |
TrimBiggs/calico | calico/test/test_calcollections.py | 1 | 5048 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.test.test_calcollections
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test for collections library.
"""
import logging
from mock import Mock, call, patch
from calico.calcollections import SetDelta, MultiDict
from unittest2 import TestCase
_log = logging.getLogger(__name__)
class TestSetDelta(TestCase):
def setUp(self):
self.set = set("abc")
self.delta = SetDelta(self.set)
def test_add(self):
self.delta.add("c")
self.delta.add("d")
# Only "d" added, "c" was already present.
self.assertEqual(self.delta.added_entries, set(["d"]))
# Now apply, should mutate the set.
self.assertEqual(self.set, set("abc"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("abcd"))
self.assertEqual(self.delta.added_entries, set())
def test_remove(self):
self.delta.remove("c")
self.delta.remove("d")
# Only "c" added, "d" was already missing.
self.assertEqual(self.delta.removed_entries, set(["c"]))
# Now apply, should mutate the set.
self.assertEqual(self.set, set("abc"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("ab"))
self.assertEqual(self.delta.removed_entries, set())
def test_add_and_remove(self):
self.delta.add("c") # No-op, already present.
self.delta.add("d") # Put in added set.
self.delta.add("e") # Will remain in added set.
self.delta.remove("c") # Recorded in remove set.
self.delta.remove("d") # Cancels the pending add only.
self.delta.remove("f") # No-op.
self.assertEqual(self.delta.added_entries, set("e"))
self.assertEqual(self.delta.removed_entries, set("c"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("abe"))
def test_size(self):
self.assertTrue(self.delta.empty)
self.assertEqual(self.delta.resulting_size, 3)
self.delta.add("c") # No-op, already present.
self.assertEqual(self.delta.resulting_size, 3)
self.delta.add("d") # Put in added set.
self.assertEqual(self.delta.resulting_size, 4)
self.delta.add("e") # Will remain in added set.
self.assertEqual(self.delta.resulting_size, 5)
self.delta.remove("c") # Recorded in remove set.
self.assertEqual(self.delta.resulting_size, 4)
self.delta.remove("d") # Cancels the pending add only.
self.assertEqual(self.delta.resulting_size, 3)
self.delta.remove("f") # No-op.
self.assertEqual(self.delta.resulting_size, 3)
class TestMultiDict(TestCase):
def setUp(self):
super(TestMultiDict, self).setUp()
self.index = MultiDict()
def test_add_single(self):
self.index.add("k", "v")
self.assertTrue(self.index.contains("k", "v"))
self.assertEqual(set(self.index.iter_values("k")),
set(["v"]))
def test_add_remove_single(self):
self.index.add("k", "v")
self.index.discard("k", "v")
self.assertFalse(self.index.contains("k", "v"))
self.assertEqual(self.index._index, {})
def test_empty(self):
self.assertFalse(bool(self.index))
self.assertEqual(self.index.num_items("k"), 0)
self.assertEqual(list(self.index.iter_values("k")), [])
def test_add_multiple(self):
self.index.add("k", "v")
self.assertTrue(bool(self.index))
self.assertEqual(self.index.num_items("k"), 1)
self.index.add("k", "v")
self.assertEqual(self.index.num_items("k"), 1)
self.index.add("k", "v2")
self.assertEqual(self.index.num_items("k"), 2)
self.index.add("k", "v3")
self.assertEqual(self.index.num_items("k"), 3)
self.assertIn("k", self.index)
self.assertNotIn("k2", self.index)
self.assertTrue(self.index.contains("k", "v"))
self.assertTrue(self.index.contains("k", "v2"))
self.assertTrue(self.index.contains("k", "v3"))
self.assertEqual(self.index._index, {"k": set(["v", "v2", "v3"])})
self.assertEqual(set(self.index.iter_values("k")),
set(["v", "v2", "v3"]))
self.index.discard("k", "v")
self.index.discard("k", "v2")
self.assertTrue(self.index.contains("k", "v3"))
self.index.discard("k", "v3")
self.assertEqual(self.index._index, {})
| apache-2.0 |
eino-makitalo/odoo | addons/l10n_do/__openerp__.py | 309 | 2992 | # -*- coding: utf-8 -*-
# #############################################################################
#
# First author: Jose Ernesto Mendez <[email protected]> (Open Business Solutions SRL.)
# Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved.
#
# This is a fork to upgrade to odoo 8.0
# by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Marcos Organizador de Negocios.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'Dominican Republic - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Dominican Republic.
==============================================================================
* Chart of Accounts.
* The Tax Code Chart for Domincan Republic
* The main taxes used in Domincan Republic
* Fiscal position for local """,
'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.',
'website': 'http://marcos.do',
'depends': ['account', 'base_iban'],
'data': [
# basic accounting data
'data/ir_sequence_type.xml',
'data/ir_sequence.xml',
'data/account_journal.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'data/l10n_do_base_data.xml',
# Adds fiscal position
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
# configuration wizard, views, reports...
'data/l10n_do_wizard.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ponty/mangui | docs/conf.py | 1 | 1581 | from path import path
from setuptools import find_packages
import os
import sphinx
import sys
def read_project_version(py=None, where='.', exclude=['bootstrap', 'pavement', 'doc', 'docs', 'test', 'tests', ]):
if not py:
py = path(where) / find_packages(where=where, exclude=exclude)[0]
py = path(py)
if py.isdir():
py = py / '__init__.py'
__version__ = None
for line in py.lines():
if '__version__' in line:
exec line
break
return __version__
release = read_project_version(where='..')
project = 'mangui'
author = 'ponty'
copyright = '2011, ponty'
# Extension
extensions = [
# -*-Extensions: -*-
'sphinx.ext.autodoc',
# 'sphinxcontrib.programoutput',
'sphinxcontrib.programscreenshot',
#'sphinx.ext.graphviz',
#'sphinxcontrib.autorun',
#'sphinx.ext.autosummary',
#'sphinx.ext.intersphinx',
]
intersphinx_mapping = {'http://docs.python.org/': None}
# Source
master_doc = 'index'
templates_path = ['_templates']
source_suffix = '.rst'
exclude_trees = []
pygments_style = 'sphinx'
# html build settings
html_theme = 'default'
html_static_path = ['_static']
# htmlhelp settings
htmlhelp_basename = '%sdoc' % project
# latex build settings
latex_documents = [
('index', '%s.tex' % project, u'%s Documentation' % project,
author, 'manual'),
]
# remove blank pages from pdf
# http://groups.google.com/group/sphinx-
# dev/browse_thread/thread/92e19267d095412d/d60dcba483c6b13d
latex_font_size = '10pt,oneside'
latex_elements = dict(
papersize='a4paper',
)
| bsd-2-clause |
Weicong-Lin/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/symtable.py | 114 | 7518 | """Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC,
SCOPE_OFF, SCOPE_MASK, FREE, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL, LOCAL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
raw = _symtable.symtable(code, filename, compile_type)
for top in raw.itervalues():
if top.name == 'top':
break
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION
and not self._table.optimized)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec"""
return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
def has_import_star(self):
"""Return true if the scope uses import *"""
return bool(self._table.optimized & OPT_IMPORT_STAR)
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError, "name is bound to multiple namespaces"
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
src = open(sys.argv[0]).read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print info, info.is_local(), info.is_namespace()
| mit |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/data/region_RE.py | 11 | 1850 | """Auto-generated file, do not edit by hand. RE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RE = PhoneMetadata(id='RE', country_code=262, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[268]\\d{8}', possible_number_pattern='\\d{9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='262\\d{6}', possible_number_pattern='\\d{9}', example_number='262161234'),
mobile=PhoneNumberDesc(national_number_pattern='6(?:9[23]|47)\\d{6}', possible_number_pattern='\\d{9}', example_number='692123456'),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{7}', possible_number_pattern='\\d{9}', example_number='801234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='89[1-37-9]\\d{6}', possible_number_pattern='\\d{9}', example_number='891123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='8(?:1[019]|2[0156]|84|90)\\d{6}', possible_number_pattern='\\d{9}', example_number='810123456'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([268]\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', national_prefix_formatting_rule='0\\1')],
main_country_for_code=True,
leading_digits='262|6[49]|8')
| gpl-2.0 |
pronexo-odoo/odoo-argentina | l10n_ar_pyafipws/pyafipws/setup_wslpg.py | 1 | 2419 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para Liquidación Electrónica Primaria de Granos"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2013 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# required modules for shelve support (not detected by py2exe by defaultdel):
for lib in 'dbhash', 'gdbm', 'dbm', 'dumbdbm', 'anydbm':
try:
__import__(lib)
includes.append(lib)
except ImportError:
pass
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
'dll_excludes': ["mswsock.dll", "powrprof.dll", "KERNELBASE.dll",
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll"
],
'skip_archive': True,
}}
data_files = [
(".", ["licencia.txt", "wslpg.ini",
"wslpg_aut_test.xml", "afip_ca_info.crt",
"liquidacion_form_c1116b_wslpg.csv",
"liquidacion_form_c1116b_wslpg.png", ]),
("cache", glob.glob("cache/*")),
]
import wslpg, wsaa
from nsis import build_installer, Target
setup(
name="WSLPG",
version=wslpg.__version__ + (wslpg.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs WSLPG",
long_description=wslpg.__doc__,
author="Mariano Reingart",
author_email="[email protected]",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = [Target(module=wsaa, modules='wsaa', create_exe=False, create_dll=True),
Target(module=wslpg, modules="wslpg")],
console=[Target(module=wslpg, script='wslpg.py', dest_base="wslpg_cli"),
Target(module=wsaa, script="wsaa.py", dest_base="wsaa-cli")
],
windows=[Target(module=wsaa, script="wsaa.py", dest_base="wsaa")],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
)
| agpl-3.0 |
amahabal/PySeqsee | farg/apps/seqsee/subspaces/choose_item_to_focus_on.py | 1 | 1660 | # Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
from farg.core.subspace import QuickReconnResults, Subspace
from farg.core.util import WeightedChoice
def ThingsToChooseFrom(ws):
"""Yields two-tuples of things to choose from, the second being weight."""
# QUALITY TODO(Feb 14, 2012): This should be a subspace. What do we choose from, what
# to pay attention to?
# QUALITY TODO(Feb 14, 2012): Explore role of relations.
for element in ws.elements:
yield (element, 20)
for gp in ws.groups:
yield (gp, gp.strength)
class SubspaceSelectObjectToFocusOn(Subspace):
"""Select object in workspace to focus on."""
# This is a place-holder for a real space. For now, it has simply been upgraded from
# a codelet and all work is done by QuickReconn.
def QuickReconn(self):
parent_ws = self.parent_controller.workspace
choice = WeightedChoice(ThingsToChooseFrom(parent_ws))
if choice:
return QuickReconnResults.AnswerFound(choice)
else:
return QuickReconnResults.NoAnswerCanBeFound()
| gpl-3.0 |
akozumpl/dnf | tests/test_config.py | 4 | 3651 | # Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
from dnf.conf import CliCache
from dnf.yum.config import Option, BaseConfig, YumConf
from tests.support import TestCase
from tests.support import mock
import unittest
class OptionTest(unittest.TestCase):
class Cfg(BaseConfig):
a_setting = Option("roundabout")
def test_delete(self):
cfg = self.Cfg()
self.assertEqual(cfg.a_setting, "roundabout")
del cfg.a_setting
try:
cfg.a_setting
except RuntimeError as e:
pass
else:
self.fail("option should be deleted now.")
class CacheTest(TestCase):
@mock.patch('dnf.util.am_i_root', return_value=True)
def test_root(self, unused_am_i_root):
cache = CliCache('/var/lib/spinning', 'i286/20')
self.assertEqual(cache.system_cachedir, '/var/lib/spinning/i286/20')
self.assertEqual(cache.cachedir, '/var/lib/spinning/i286/20')
@mock.patch('dnf.yum.misc.getCacheDir', return_value="/notmp/dnf-walr-yeAH")
@mock.patch('dnf.util.am_i_root', return_value=False)
def test_noroot(self, fn_root, fn_getcachedir):
cache = CliCache('/var/lib/spinning', 'i286/20')
self.assertEqual(fn_getcachedir.call_count, 0)
self.assertEqual(cache.cachedir, '/notmp/dnf-walr-yeAH/i286/20')
self.assertEqual(fn_getcachedir.call_count, 1)
# the cachedirs are cached now, getCacheDir is not called again:
self.assertEqual(cache.cachedir, '/notmp/dnf-walr-yeAH/i286/20')
self.assertEqual(fn_getcachedir.call_count, 1)
class YumConfTest(TestCase):
def test_bugtracker(self):
conf = YumConf()
self.assertEqual(conf.bugtracker_url,
"https://bugzilla.redhat.com/enter_bug.cgi" +
"?product=Fedora&component=dnf")
def test_overrides(self):
conf = YumConf()
self.assertFalse(conf.assumeyes)
self.assertFalse(conf.assumeno)
self.assertEqual(conf.color_list_installed_older, 'bold')
override = {'assumeyes': True,
'color_list_installed_older': 'timid'}
conf.override(override)
self.assertTrue(conf.assumeyes)
self.assertFalse(conf.assumeno) # no change
self.assertEqual(conf.color_list_installed_older, 'timid')
def test_prepend_installroot(self):
conf = YumConf()
conf.installroot = '/mnt/root'
conf.prepend_installroot('persistdir')
self.assertEqual(conf.persistdir, '/mnt/root/var/lib/dnf')
def test_ranges(self):
conf = YumConf()
with self.assertRaises(ValueError):
conf.debuglevel = 11
| gpl-2.0 |
bgalehouse/grr | lib/data_stores/mysql_advanced_data_store_test.py | 2 | 2003 | #!/usr/bin/env python
"""Tests the mysql data store."""
import unittest
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import mysql_advanced_data_store
class MysqlAdvancedTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
# Use separate tables for benchmarks / tests so they can be run in parallel.
config_lib.CONFIG.Set("Mysql.database_name", "grr_test_%s" %
self.__class__.__name__)
try:
data_store.DB = mysql_advanced_data_store.MySQLAdvancedDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
data_store.DB.RecreateTables()
except Exception as e:
logging.debug("Error while connecting to MySQL db: %s.", e)
raise unittest.SkipTest("Skipping since Mysql db is not reachable.")
def DestroyDatastore(self):
data_store.DB.DropTables()
def testCorrectDataStore(self):
self.assertTrue(
isinstance(data_store.DB,
mysql_advanced_data_store.MySQLAdvancedDataStore))
class MysqlAdvancedDataStoreTest(
MysqlAdvancedTestMixin, data_store_test._DataStoreTest):
"""Test the mysql data store abstraction."""
class MysqlAdvancedDataStoreBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
havard024/prego | crm/lib/python2.7/site-packages/django/core/serializers/base.py | 33 | 5714 | """
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils.encoding import smart_text
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
pass
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.rel is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
# hack to make sure that the models have all been loaded before
# deserialization starts (otherwise subclass calls to get_model()
# and friends might fail...)
models.get_apps()
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s.%s(pk=%s)>" % (
self.object._meta.app_label, self.object._meta.object_name, self.object.pk)
def save(self, save_m2m=True, using=None):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# This ensures that the data that is deserialized is literally
# what came from the file, not post-processed by pre_save/save
# methods.
models.Model.save_base(self.object, using=using, raw=True)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
| mit |
Lokke/eden | controllers/hrm.py | 7 | 25077 | # -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
s3_redirect_default(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
s3_redirect_default(URL(f="staff", args="summary"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined Staff/Volunteers
Used for Summary view, Imports and S3AddPersonWidget2
"""
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def staff():
""" Staff Controller """
# Staff only
s3.filter = FS("type") == 1
def prep(r):
table = r.table
tablename = r.tablename
get_vars = r.get_vars
# Use CRUD strings for staff
crud_strings = s3.crud_strings
crud_strings[tablename] = crud_strings["hrm_staff"]
resource = r.resource
if "expiring" in get_vars:
# Filter for staff with contracts expiring in the next 4 weeks
query = FS("end_date") < \
(request.utcnow + datetime.timedelta(weeks=4))
resource.add_filter(query)
# Adapt CRUD strings
crud_strings[tablename].title_list = \
T("Staff with Contracts Expiring in the next Month")
# Reconfigure
resource.configure(# Sort by Expiry
sortby = table.end_date,
# Remove the Add button
insertable=False
)
# Adapt list_fields
list_fields = [(T("Contract End Date"), "end_date"),
"person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
]
else:
# Adapt list_fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
list_fields.append((T("Contract End Date"), "end_date"))
list_fields.append("status")
resource.configure(list_fields = list_fields)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
args = []
if r.representation == "iframe":
vars["format"] = "iframe"
args = [r.method]
redirect(URL(f="person", vars=vars, args=args))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "staff"}))
elif not r.component and r.method != "delete":
# Configure site_id
field = table.site_id
site_id = get_vars.get("site_id", None)
if site_id:
field.default = site_id
field.writable = False
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (
settings.get_org_site_label(),
T("The facility where this position is based."),
#messages.AUTOCOMPLETE_HELP,
)))
#field.comment = S3AddResourceLink(c="org", f="facility",
# vars = dict(child="site_id",
# parent="req"),
# title=T("Add New Site"),
# )
# Hide status field
table.status.writable = table.status.readable = False
# Assume staff only between 16-81
dob = s3db.pr_person.date_of_birth
dob.widget = S3CalendarWidget(past_months = 972,
future_months = -192,
)
elif r.representation == "xls":
# Make it match Import sheets
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append(
{"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
#s3.scripts.append("/%s/static/scripts/jquery.doubleScroll.js" % appname)
#s3.jquery_ready.append('''$('.dataTable_table').doubleScroll()''')
#s3.jquery_ready.append('''$('.dataTables_wrapper').doubleScroll()''')
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
return s3db.hrm_person_controller()
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method = "contacts",
action = s3db.pr_Contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
group = get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
s3db.configure(tablename,
deletable = False,
)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
else:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 12-81
dob = table.date_of_birth
dob.widget = S3CalendarWidget(past_months = 972,
future_months = -144,
)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
output = s3_rest_controller("pr", "person",
rheader = s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels & list_fields
s3db.hrm_configure_pr_group_membership()
# Only show Relief Teams
# Do not show system groups
# Only show Staff
table = db.pr_group_membership
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 1) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group_membership",
csv_template="group_membership",
csv_stylesheet=("hrm", "group_membership.xsl"),
)
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
s3.filter = FS("type").belongs((1, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def competency():
"""
RESTful CRUD controller used to allow searching for people by Skill
"""
s3.filter = FS("person_id$human_resource.type") == 1
field = s3db.hrm_competency.person_id
field.widget = S3PersonAutocompleteWidget(ajax_filter = "~.human_resource.type=1")
return s3db.hrm_competency_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
# =============================================================================
# Salaries
# =============================================================================
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def salary_grade():
""" Salary Grade Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Insurance Information
# =============================================================================
def insurance():
""" Insurance Information Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Awards
# =============================================================================
def award_type():
""" Award Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def award():
""" Awards Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Disciplinary Record
# =============================================================================
def disciplinary_type():
""" Disciplinary Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def disciplinary_action():
""" Disciplinary Action Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| mit |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/Queue.py | 82 | 8577 | """A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| mit |
pombredanne/pythran | pythran/tests/cases/quicksort.py | 4 | 2085 | #pythran export quicksort(int list, int, int)
#runas quicksort(range(10),0,9)
def partition(list, start, end):
pivot = list[end] # Partition around the last value
bottom = start-1 # Start outside the area to be partitioned
top = end # Ditto
done = 0
while not done: # Until all elements are partitioned...
while not done: # Until we find an out of place element...
bottom = bottom+1 # ... move the bottom up.
if bottom == top: # If we hit the top...
done = 1 # ... we are done.
break
if list[bottom] > pivot: # Is the bottom out of place?
list[top] = list[bottom] # Then put it at the top...
break # ... and start searching from the top.
while not done: # Until we find an out of place element...
top = top-1 # ... move the top down.
if top == bottom: # If we hit the bottom...
done = 1 # ... we are done.
break
if list[top] < pivot: # Is the top out of place?
list[bottom] = list[top] # Then put it at the bottom...
break # ...and start searching from the bottom.
list[top] = pivot # Put the pivot in its place.
return top # Return the split point
def do_quicksort(list, start, end):
if start < end: # If there are two or more elements...
split = partition(list, start, end) # ... partition the sublist...
do_quicksort(list, start, split-1) # ... and sort both halves.
do_quicksort(list, split+1, end)
def quicksort(l,s,e):
do_quicksort(l,s,e)
| bsd-3-clause |
ChaosJohn/autokey | src/lib/qtui/settingswidget.py | 50 | 4191 | #!/usr/bin/env python
# coding=UTF-8
#
# Generated by pykdeuic4 from settingswidget.ui on Sun Mar 4 11:39:40 2012
#
# WARNING! All changes to this file will be lost.
from PyKDE4 import kdecore
from PyKDE4 import kdeui
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_SettingsWidget(object):
def setupUi(self, SettingsWidget):
SettingsWidget.setObjectName(_fromUtf8("SettingsWidget"))
SettingsWidget.resize(316, 91)
self.gridLayout = QtGui.QGridLayout(SettingsWidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(SettingsWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.abbrLabel = QtGui.QLabel(SettingsWidget)
self.abbrLabel.setObjectName(_fromUtf8("abbrLabel"))
self.gridLayout.addWidget(self.abbrLabel, 0, 1, 1, 1)
self.setAbbrButton = KPushButton(SettingsWidget)
self.setAbbrButton.setObjectName(_fromUtf8("setAbbrButton"))
self.gridLayout.addWidget(self.setAbbrButton, 0, 3, 1, 1)
self.clearAbbrButton = KPushButton(SettingsWidget)
self.clearAbbrButton.setObjectName(_fromUtf8("clearAbbrButton"))
self.gridLayout.addWidget(self.clearAbbrButton, 0, 4, 1, 1)
self.label_2 = QtGui.QLabel(SettingsWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.hotkeyLabel = QtGui.QLabel(SettingsWidget)
self.hotkeyLabel.setObjectName(_fromUtf8("hotkeyLabel"))
self.gridLayout.addWidget(self.hotkeyLabel, 1, 1, 1, 1)
self.setHotkeyButton = KPushButton(SettingsWidget)
self.setHotkeyButton.setObjectName(_fromUtf8("setHotkeyButton"))
self.gridLayout.addWidget(self.setHotkeyButton, 1, 3, 1, 1)
self.clearHotkeyButton = KPushButton(SettingsWidget)
self.clearHotkeyButton.setObjectName(_fromUtf8("clearHotkeyButton"))
self.gridLayout.addWidget(self.clearHotkeyButton, 1, 4, 1, 1)
self.label_3 = QtGui.QLabel(SettingsWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.windowFilterLabel = QtGui.QLabel(SettingsWidget)
self.windowFilterLabel.setObjectName(_fromUtf8("windowFilterLabel"))
self.gridLayout.addWidget(self.windowFilterLabel, 2, 1, 1, 1)
self.setFilterButton = KPushButton(SettingsWidget)
self.setFilterButton.setObjectName(_fromUtf8("setFilterButton"))
self.gridLayout.addWidget(self.setFilterButton, 2, 3, 1, 1)
self.clearFilterButton = KPushButton(SettingsWidget)
self.clearFilterButton.setObjectName(_fromUtf8("clearFilterButton"))
self.gridLayout.addWidget(self.clearFilterButton, 2, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)
self.retranslateUi(SettingsWidget)
QtCore.QMetaObject.connectSlotsByName(SettingsWidget)
def retranslateUi(self, SettingsWidget):
SettingsWidget.setWindowTitle(kdecore.i18n(_fromUtf8("Form")))
self.label.setText(kdecore.i18n(_fromUtf8("Abbreviations:")))
self.abbrLabel.setText(kdecore.i18n(_fromUtf8("$abbr")))
self.setAbbrButton.setText(kdecore.i18n(_fromUtf8("Set&")))
self.clearAbbrButton.setText(kdecore.i18n(_fromUtf8("Clear&")))
self.label_2.setText(kdecore.i18n(_fromUtf8("Hotkey:")))
self.hotkeyLabel.setText(kdecore.i18n(_fromUtf8("$hotkey")))
self.setHotkeyButton.setText(kdecore.i18n(_fromUtf8("Set&")))
self.clearHotkeyButton.setText(kdecore.i18n(_fromUtf8("Clear&")))
self.label_3.setText(kdecore.i18n(_fromUtf8("Window Filter:")))
self.windowFilterLabel.setText(kdecore.i18n(_fromUtf8("$filter")))
self.setFilterButton.setText(kdecore.i18n(_fromUtf8("Set&")))
self.clearFilterButton.setText(kdecore.i18n(_fromUtf8("Clear&")))
from PyKDE4.kdeui import KPushButton
| gpl-3.0 |
scottmcmaster/catapult | tracing/tracing_build/check_common.py | 5 | 2612 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import tracing_project
FILE_GROUPS = ["tracing_css_files",
"tracing_js_html_files",
"tracing_img_files"]
def GetFileGroupFromFileName(filename):
extension = os.path.splitext(filename)[1]
return {
'.css': 'tracing_css_files',
'.html': 'tracing_js_html_files',
'.js': 'tracing_js_html_files',
'.png': 'tracing_img_files'
}[extension]
def CheckListedFilesSorted(src_file, group_name, listed_files):
sorted_files = sorted(listed_files)
if sorted_files != listed_files:
mismatch = ''
for i in range(len(listed_files)):
if listed_files[i] != sorted_files[i]:
mismatch = listed_files[i]
break
what_is = ' ' + '\n '.join(listed_files)
what_should_be = ' ' + '\n '.join(sorted_files)
return '''In group {0} from file {1}, filenames aren't sorted.
First mismatch:
{2}
Current listing:
{3}
Correct listing:
{4}\n\n'''.format(group_name, src_file, mismatch, what_is, what_should_be)
else:
return ''
def GetKnownFiles():
project = tracing_project.TracingProject()
vulcanizer = project.CreateVulcanizer()
m = vulcanizer.loader.LoadModule(
module_name='tracing.ui.extras.about_tracing.about_tracing')
absolute_filenames = m.GetAllDependentFilenamesRecursive(
include_raw_scripts=False)
return list(set([os.path.relpath(f, project.tracing_root_path)
for f in absolute_filenames]))
def CheckCommon(file_name, listed_files):
known_files = GetKnownFiles()
u = set(listed_files).union(set(known_files))
i = set(listed_files).intersection(set(known_files))
diff = list(u - i)
if len(diff) == 0:
return ''
error = 'Entries in ' + file_name + ' do not match files on disk:\n'
in_file_only = list(set(listed_files) - set(known_files))
in_known_only = list(set(known_files) - set(listed_files))
if len(in_file_only) > 0:
error += ' In file only:\n ' + '\n '.join(sorted(in_file_only))
if len(in_known_only) > 0:
if len(in_file_only) > 0:
error += '\n\n'
error += ' On disk only:\n ' + '\n '.join(sorted(in_known_only))
if in_file_only:
error += (
'\n\n'
' Note: only files actually used in about:tracing should\n'
' be listed in the build files. Try running \n'
' tracing/bin/update_gyp_and_gn\n'
' to update the files automatically.')
return error
| bsd-3-clause |
alhashash/odoomrp-wip | product_suppliers_from_category/__openerp__.py | 28 | 1457 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Product Suppliers from Product Category",
"version": "1.0",
"depends": ["base", "product"],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"contributors": [
"Pedro Manuel Baeza Romero <[email protected]>",
"Ana Juaristi Olalde <[email protected]>",
"Mikel Arregi <[email protected]>"],
"category": "Product",
'data': ["wizard/add_suppliers_view.xml",
"views/product_view.xml"],
"installable": True,
"auto_install": False,
}
| agpl-3.0 |
trabacus-softapps/docker-edumedia | additional_addons/Edumedia_India/amount_to_text_softapps.py | 1 | 4624 | # --------------------------------------------------------------------------------------------------------------------------
#
# SOFTAPPS IT SOLUTIONS PVT LTD
# Malleshwaram, Bangalore
#
# - this file was added to convert the currency amount
# to its equivalent in words in accordance with Indian Rupees system
#
# - by deepa
#--------------------------------------------------------------------------------------------------------------------------
ones = {
0: '', 1:'One', 2:'Two', 3:'Three', 4:'Four', 5:'Five', 6:'Six', 7:'Seven', 8:'Eight', 9:'Nine',
10:'Ten', 11:'Eleven', 12:'Twelve', 13:'Thirteen', 14:'Forteen', 15:'Fifteen', 16:'Sixteen', 17:"Seventeen",18:"Eighteen",19:"Nineteen",
}
tens = {
1: 'Ten', 2: 'Twenty ', 3:'Thirty', 4:'Forty', 5:'Fifty', 6: 'Sixty', 7 : 'Seventy', 8:'Eighty' ,9: 'Ninety'}
hundred = {
0:'',1: 'One Hundred', 2: 'Two Hundred', 3: 'Three Hundred', 4 :'Four Hundred', 5: 'Five Hundred', 6: 'Six Hundred', 7 :'Seven Hundred', 8:' Eight Hundred ', 9:'Nine Hundred '
}
thousands ={
0:'',1: 'One Thousand'
}
lacs = {
0:'',1: 'One Lac'
}
crore = {
0:'',1: 'One Crore'
}
def _100_to_text(number):
if number in ones:
return ones[number]
else:
if number%10>0:
return tens[number / 10]+' '+ones[number % 10]
else:
return tens[number / 10]
def _1000_to_text(number):
d = _100_to_text(number % 100)
d2 = number/100
if d2>0 and d:
return hundred[d2]+' and '+d
elif d2>1 and not(d):
return hundred[d2]+'s'
else:
return hundred[d2] or d
def _10000_to_text(number):
if number==0:
return 'zero'
part1 = _1000_to_text(number % 1000)
part2 = thousands.get(number / 1000, _1000_to_text(number / 1000)+' Thousand')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def _1000000_to_text(number):
if number==0:
return 'zero'
part1 = _10000_to_text(number % 100000)
part2 = lacs.get(number / 100000, _10000_to_text(number / 100000)+' Lacs')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def _100000000_to_text(number):
if number==0:
return 'zero'
part1 = _1000000_to_text(number % 10000000)
part2 = crore.get(number / 10000000, _1000000_to_text(number / 10000000)+' Crores')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def amount_to_text(number, currency):
lacs_number = int(number)
units_name = currency
if lacs_number > 1:
units_name += 's'
lacs = _1000000_to_text(lacs_number)
lacs = lacs_number and '%s %s' % (lacs, units_name) or ''
units_number = int(number * 10000) % 10000
units = _10000_to_text(units_number)
units = units_number and '%s %s' % (units, units_name) or ''
cents_number = int(number * 100) % 100
cents_name = (cents_number > 1) and 'cents' or 'cent'
cents = _100_to_text(cents_number)
cents = cents_number and '%s %s' % (cents.lower(), cents_name) or ''
if cents:
lacs += ' and %s' % (cents, )
return lacs
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
"""
Converts an integer to its textual representation, using the language set in the context if any.
Example:
1654: thousands six cent cinquante-quatre.
"""
from openerp import netsvc
if nbr > 10000000:
netsvc.Logger().notifyChannel('translate', netsvc.LOG_WARNING, _("Number too large '%d', can not translate it"))
return str(nbr)
if not _translate_funcs.has_key(lang):
netsvc.Logger().notifyChannel('translate', netsvc.LOG_WARNING, _("no translation function found for lang: '%s'" % (lang,)))
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](nbr, currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
| agpl-3.0 |
eleftherioszisis/NeuroM | neurom/stats.py | 2 | 7253 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Statistical analysis helper functions
Nothing fancy. Just commonly used functions using scipy functionality.'''
from collections import namedtuple
from enum import Enum, unique
import numpy as np
from future.moves.collections import OrderedDict
from scipy import stats as _st
FitResults = namedtuple('FitResults', ['params', 'errs', 'type'])
@unique
class StatTests(Enum):
'''Enum representing valid statistical tests of scipy'''
ks = 1
wilcoxon = 2
ttest = 3
def get_test(stest):
'''Returns the correct stat test'''
sts = {StatTests.ks: 'ks_2samp', StatTests.wilcoxon: 'wilcoxon', StatTests.ttest: 'ttest_ind'}
if stest in StatTests:
return sts[stest]
raise TypeError('Statistical test not recognized. Choose from ks, wilcoxon, ttest.')
def fit_results_to_dict(fit_results, min_bound=None, max_bound=None):
'''Create a JSON-comparable dict from a FitResults object
Parameters:
fit_results (FitResults): object containing fit parameters,\
errors and type
min_bound: optional min value to add to dictionary if min isn't\
a fit parameter.
max_bound: optional max value to add to dictionary if max isn't\
a fit parameter.
Returns:
JSON-compatible dictionary with fit results
Note:
Supported fit types: 'norm', 'expon', 'uniform'
'''
type_map = {'norm': 'normal', 'expon': 'exponential', 'uniform': 'uniform'}
param_map = {'uniform': lambda p: [('min', p[0]), ('max', p[0] + p[1])],
'norm': lambda p: [('mu', p[0]), ('sigma', p[1])],
'expon': lambda p: [('lambda', 1.0 / p[1])]}
d = OrderedDict({'type': type_map[fit_results.type]})
d.update(param_map[fit_results.type](fit_results.params))
if min_bound is not None and 'min' not in d:
d['min'] = min_bound
if max_bound is not None and 'max' not in d:
d['max'] = max_bound
return d
def fit(data, distribution='norm'):
'''Calculate the parameters of a fit of a distribution to a data set
Parameters:
data: array of data points to be fitted
Options:
distribution (str): type of distribution to fit. Default 'norm'.
Returns:
FitResults object with fitted parameters, errors and distribution type
Note:
Uses Kolmogorov-Smirnov test to estimate distance and p-value.
'''
params = getattr(_st, distribution).fit(data)
return FitResults(params, _st.kstest(data, distribution, params), distribution)
def optimal_distribution(data, distr_to_check=('norm', 'expon', 'uniform')):
'''Calculate the parameters of a fit of different distributions to a data set
and returns the distribution of the minimal ks-distance.
Parameters:
data: array of data points to be fitted
Options:
distr_to_check: tuple of distributions to be checked
Returns:
FitResults object with fitted parameters, errors and distribution type\
of the fit with the smallest fit distance
Note:
Uses Kolmogorov-Smirnov test to estimate distance and p-value.
'''
fit_results = [fit(data, d) for d in distr_to_check]
return min(fit_results, key=lambda fit: fit.errs[0])
def scalar_stats(data, functions=('min', 'max', 'mean', 'std')):
'''Calculate the stats from the given numpy functions
Parameters:
data: array of data points to be used for the stats
Options:
functions: tuple of numpy stat functions to apply on data
Returns:
Dictionary with the name of the function as key and the result
as the respective value
'''
stats = {}
for func in functions:
stats[func] = getattr(np, func)(data)
return stats
def compare_two(data1, data2, test=StatTests.ks):
'''Compares two distributions of data
and assess two scores: a distance between them
and a probability they are drawn from the same
distribution.
Parameters:
data1: numpy array of dataset 1
data2: numpy array of dataset 2
test: Stat_tests\
Defines the statistical test to be used, based\
on the scipy available modules.\
Accepted tests: ks_2samp, wilcoxon, ttest
Returns:
dist: float\
High numbers define high dissimilarity between the two datasets
p-value: float\
Small numbers define high probability the data come from\
same dataset.
'''
results = getattr(_st, get_test(test))(data1, data2)
Stats = namedtuple('Stats', ['dist', 'pvalue'])
return Stats(*results)
def total_score(paired_dats, p=2, test=StatTests.ks):
'''Calculates the p-norm of the distances that have been calculated from the statistical
test that has been applied on all the paired datasets.
Parameters:
paired_dats: a list of tuples or where each tuple
contains the paired data lists from two datasets
Options:
p : integer that defines the order of p-norm
test: Stat_tests\
Defines the statistical test to be used, based\
on the scipy available modules.\
Accepted tests: ks_2samp, wilcoxon, ttest
Returns:
A float corresponding to the p-norm of the distances that have
been calculated. 0 corresponds to high similarity while 1 to low.
'''
scores = np.array([compare_two(fL1, fL2, test=test).dist for fL1, fL2 in paired_dats])
return np.linalg.norm(scores, p)
| bsd-3-clause |
sunlightlabs/waterfall | waterfall/cascading_update.py | 2 | 1975 | from django.db import connection, models, transaction
from django.apps.registry import apps
from django.db.utils import IntegrityError
class CascadingUpdate():
def get_fk_list(self, obj):
for m in apps.get_models(include_auto_created=True):
for f in m._meta.get_all_field_names():
field = m._meta.get_field(f)
if (field.many_to_many or field.one_to_many):
continue
try:
relation = field.rel
except models.fields.FieldDoesNotExist:
continue
if relation:
if relation.to == obj.__class__:
yield(m, f)
def get_related_objects(self, obj, related_models):
#returns an iterator of tuples (object, keyname)
app_name = obj._meta.app_label
for model_type, keyname in related_models:
related_objects = model_type.objects.filter(**{keyname:obj})
for r in related_objects:
yield (r, keyname)
def replace_related_keys(self, related_objects, obj, new_obj):
if obj.__class__ != new_obj.__class__:
raise AssertionError("Cannot merge objects of different classes")
for related_obj, keyname in related_objects:
#get new_obj's related objects of this type
duplicate = False
with transaction.atomic():
setattr(related_obj, keyname, new_obj)
try:
related_obj.save()
except IntegrityError:
duplicate = True
if duplicate:
related_obj.delete()
def merge_foreign_keys(self, obj_to_remove, persistent_obj):
fk_relations = self.get_fk_list(obj_to_remove)
related_objects = self.get_related_objects(obj_to_remove, fk_relations)
self.replace_related_keys(related_objects, obj_to_remove, persistent_obj)
| bsd-3-clause |
looeee/mivart | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mpl-2.0 |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/simplify/tests/test_cse.py | 3 | 8131 | import itertools
from sympy import (Add, Pow, Symbol, exp, sqrt, symbols, sympify, cse,
Matrix, S, cos, sin, Eq, Function, Tuple, RootOf)
from sympy.simplify.cse_opts import sub_pre, sub_post
from sympy.functions.special.hyper import meijerg
from sympy.simplify import cse_main, cse_opts
from sympy.utilities.pytest import XFAIL
w, x, y, z = symbols('w,x,y,z')
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11 = symbols('x:12')
def test_numbered_symbols():
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('y%s' % i) for i in range(0, 10)]
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 10, 20)) == [Symbol('y%s' % i) for i in range(10, 20)]
ns = cse_main.numbered_symbols()
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('x%s' % i) for i in range(0, 10)]
# Dummy "optimization" functions for testing.
def opt1(expr):
return expr + y
def opt2(expr):
return expr*z
def test_preprocess_for_cse():
assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y
assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x
assert cse_main.preprocess_for_cse(x, [(None, None)]) == x
assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y
assert cse_main.preprocess_for_cse(
x, [(opt1, None), (opt2, None)]) == (x + y)*z
def test_postprocess_for_cse():
assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x
assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y
assert cse_main.postprocess_for_cse(x, [(None, None)]) == x
assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z
# Note the reverse order of application.
assert cse_main.postprocess_for_cse(
x, [(None, opt1), (None, opt2)]) == x*z + y
def test_cse_single():
# Simple substitution.
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse([e], optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_cse_single2():
# Simple substitution, test for being able to pass the expression directly
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse(e, optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
assert isinstance(cse(Matrix([[1]]))[1][0], Matrix)
def test_cse_not_possible():
# No substitution possible.
e = Add(x, y)
substs, reduced = cse([e], optimizations=[])
assert substs == []
assert reduced == [x + y]
# issue 3230
eq = (meijerg((1, 2), (y, 4), (5,), [], x) +
meijerg((1, 3), (y, 4), (5,), [], x))
assert cse(eq) == ([], [eq])
def test_nested_substitution():
# Substitution within a substitution.
e = Add(Pow(w*x + y, 2), sqrt(w*x + y))
substs, reduced = cse([e], optimizations=[])
assert substs == [(x0, w*x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_subtraction_opt():
# Make sure subtraction is optimized.
e = (x - y)*(z - y) + exp((x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [-x0 + exp(-x0)]
assert cse(-(x - y)*(z - y) + exp(-(x - y)*(z - y))) == \
([(x0, (x - y)*(y - z))], [x0 + exp(x0)])
# issue 978
n = -1 + 1/x
e = n/x/(-n)**2 - 1/n/x
assert cse(e) == ([], [0])
def test_multiple_expressions():
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2], optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
l = [w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [z + x*x0, x0]
l = [w*x*y, w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [x1, x1 + z, x0]
l = [(x - z)*(y - z), x - z, y - z]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
substitutions = [(x0, x - z), (x1, y - z)]
assert substs == substitutions
assert rsubsts == substitutions
assert reduced == [x0*x1, x0, x1]
l = [w*y + w + x + y + z, w*x*y]
assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0])
assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0])
assert cse([x + y, x + z]) == ([], [x + y, x + z])
assert cse([x*y, z + x*y, x*y*z + 3]) == \
([(x0, x*y)], [x0, z + x0, 3 + x0*z])
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([(x0, A*B)], [x0*C, x0])
@XFAIL
def test_powers():
assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0])
def test_issues_1399():
assert cse(w/(x - y) + z/(y - x)) == ([], [(w - z)/(x - y)])
def test_issue_921():
assert cse(
x**5 + x**4 + x**3 + x**2) == ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)])
def test_issue_1104():
assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0])
def test_issue_3164():
e = Eq(x*(-x + 1) + x*(x - 1), 0)
assert cse(e) == ([], [True])
def test_dont_cse_tuples():
from sympy import Subs
f = Function("f")
g = Function("g")
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
assert name_val == []
assert expr == (Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, x + y))
+ Subs(g(x, y), (x, y), (0, x + y)))
assert name_val == [(x0, x + y)]
assert expr == Subs(f(x, y), (x, y), (0, x0)) + \
Subs(g(x, y), (x, y), (0, x0))
def test_pow_invpow():
assert cse(1/x**2 + x**2) == \
([(x0, x**2)], [x0 + 1/x0])
assert cse(x**2 + (1 + 1/x**2)/x**2) == \
([(x0, x**2)], [x0 + (1 + 1/x0)/x0])
assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \
([(x0, x**2)], [x0*(1 + 1/x0) + 1/x0])
assert cse(cos(1/x**2) + sin(1/x**2)) == \
([(x0, x**2)], [sin(1/x0) + cos(1/x0)])
assert cse(cos(x**2) + sin(x**2)) == \
([(x0, x**2)], [sin(x0) + cos(x0)])
assert cse(y/(2 + x**2) + z/x**2/y) == \
([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)])
assert cse(exp(x**2) + x**2*cos(1/x**2)) == \
([(x0, x**2)], [x0*cos(1/x0) + exp(x0)])
assert cse((1 + 1/x**2)/x**2) == \
([(x0, x**2)], [(1 + 1/x0)/x0])
assert cse(x**(2*y) + x**(-2*y)) == \
([(x0, x**(2*y))], [x0 + 1/x0])
def test_postprocess():
eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)],
postprocess=cse_main.cse_separate) == \
[[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)],
[x0 + exp(x0/x1) + cos(x1), x2 - 3, x0*x2]]
def test_issue1400():
# previously, this gave 16 constants
from sympy.abc import a, b
B = Function('B')
G = Function('G')
t = Tuple(*
(a, a + S(1)/2, 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a -
b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1),
sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b,
sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1,
sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1),
(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1,
sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, S(1)/2, z/2, -b + 1, -2*a + b,
-2*a))
c = cse(t)
ans = (
[(x0, sqrt(z)), (x1, -b + 1), (x2, B(b, x0)), (x3, B(-x1, x0)), (x4,
2*a + x1), (x5, B(x4 - 1, x0)), (x6, B(x4, x0)), (x7, (x0/2)**(-2*a +
1)*G(b)*G(x4))], [(a, a + S(1)/2, 2*a, b, x4, x3*x5*x7, x0*x2*x5*x7,
x0*x3*x6*x7, x2*x6*x7, 1, 0, S(1)/2, z/2, x1, -x4 + 1, -2*a)])
assert ans == c
def test_issue_3070():
r = RootOf(x**6 - 4*x**5 - 2, 1)
assert cse(r) == ([], [r])
# and a check that the right thing is done with the new
# mechanism
assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y
| gpl-3.0 |
repotvsupertuga/repo | script.module.urlresolver/lib/urlresolver/plugins/lib/jjdecode.py | 35 | 13185 | #!/usr/bin/env python
#
# Python version of the jjdecode function written by Syed Zainudeen
# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html
#
# +NCR/CRC! [ReVeRsEr] - [email protected]
# Thanks to Jose Miguel Esparza (@EternalTodo) for the final push to make it work!
#
import re
class JJDecoder(object):
def __init__(self, jj_encoded_data):
self.encoded_str = jj_encoded_data
def clean(self):
return re.sub('^\s+|\s+$', '', self.encoded_str)
def checkPalindrome(self, Str):
startpos = -1
endpos = -1
gv, gvl = -1, -1
index = Str.find('"\'\\"+\'+",')
if index == 0:
startpos = Str.find('$$+"\\""+') + 8
endpos = Str.find('"\\"")())()')
gv = Str[Str.find('"\'\\"+\'+",') + 9:Str.find('=~[]')]
gvl = len(gv)
else:
gv = Str[0:Str.find('=')]
gvl = len(gv)
startpos = Str.find('"\\""+') + 5
endpos = Str.find('"\\"")())()')
return (startpos, endpos, gv, gvl)
def decode(self):
self.encoded_str = self.clean()
startpos, endpos, gv, gvl = self.checkPalindrome(self.encoded_str)
if startpos == endpos:
raise Exception('No data!')
data = self.encoded_str[startpos:endpos]
b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+']
str_l = '(![]+"")[' + gv + '._$_]+'
str_o = gv + '._$+'
str_t = gv + '.__+'
str_u = gv + '._+'
str_hex = gv + '.'
str_s = '"'
gvsig = gv + '.'
str_quote = '\\\\\\"'
str_slash = '\\\\\\\\'
str_lower = '\\\\"+'
str_upper = '\\\\"+' + gv + '._+'
str_end = '"+'
out = ''
while data != '':
# l o t u
if data.find(str_l) == 0:
data = data[len(str_l):]
out += 'l'
continue
elif data.find(str_o) == 0:
data = data[len(str_o):]
out += 'o'
continue
elif data.find(str_t) == 0:
data = data[len(str_t):]
out += 't'
continue
elif data.find(str_u) == 0:
data = data[len(str_u):]
out += 'u'
continue
# 0123456789abcdef
if data.find(str_hex) == 0:
data = data[len(str_hex):]
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
out += '%x' % i
break
continue
# start of s block
if data.find(str_s) == 0:
data = data[len(str_s):]
# check if "R
if data.find(str_upper) == 0: # r4 n >= 128
data = data[len(str_upper):] # skip sig
ch_str = ''
for i in range(2): # shouldn't be more than 2 hex chars
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):]
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str = '%x' % k
break
else:
break
out += chr(int(ch_str, 16))
continue
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
break
else:
break
out += chr(int(ch_str, 8)) + ch_lotux
continue
else: # "S ----> "SR or "S+
# if there is, loop s until R 0r +
# if there is no matching s block, throw error
match = 0
n = None
# searching for matching pure s block
while True:
n = ord(data[0])
if data.find(str_quote) == 0:
data = data[len(str_quote):]
out += '"'
match += 1
continue
elif data.find(str_slash) == 0:
data = data[len(str_slash):]
out += '\\'
match += 1
continue
elif data.find(str_end) == 0: # reached end off S block ? +
if match == 0:
raise '+ no match S block: ' + data
data = data[len(str_end):]
break # step out of the while loop
elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128
if match == 0:
raise 'no match S block n>128: ' + data
data = data[len(str_upper):] # skip sig
ch_str = ''
ch_lotux = ''
for j in range(10): # shouldn't be more than 10 hex chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):] # skip gvsig
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str += '%x' % k
break
else:
break # done
out += chr(int(ch_str, 16))
break # step out of the while loop
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
if match == 0:
raise 'no match S block n<128: ' + data
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
else:
break
out += chr(int(ch_str, 8)) + ch_lotux
break # step out of the while loop
elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or (0x5b <= n and n <= 0x60) or (0x7b <= n and n <= 0x7f):
out += data[0]
data = data[1:]
match += 1
continue
print 'No match : ' + data
break
return out
| gpl-2.0 |
multikatt/CouchPotatoServer | libs/guessit/transfo/guess_bonus_features.py | 150 | 2155 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import found_property
import logging
log = logging.getLogger(__name__)
def process(mtree):
def previous_group(g):
for leaf in mtree.unidentified_leaves()[::-1]:
if leaf.node_idx < g.node_idx:
return leaf
def next_group(g):
for leaf in mtree.unidentified_leaves():
if leaf.node_idx > g.node_idx:
return leaf
def same_group(g1, g2):
return g1.node_idx[:2] == g2.node_idx[:2]
bonus = [ node for node in mtree.leaves() if 'bonusNumber' in node.guess ]
if bonus:
bonusTitle = next_group(bonus[0])
if same_group(bonusTitle, bonus[0]):
found_property(bonusTitle, 'bonusTitle', 0.8)
filmNumber = [ node for node in mtree.leaves()
if 'filmNumber' in node.guess ]
if filmNumber:
filmSeries = previous_group(filmNumber[0])
found_property(filmSeries, 'filmSeries', 0.9)
title = next_group(filmNumber[0])
found_property(title, 'title', 0.9)
season = [ node for node in mtree.leaves() if 'season' in node.guess ]
if season and 'bonusNumber' in mtree.info:
series = previous_group(season[0])
if same_group(series, season[0]):
found_property(series, 'series', 0.9)
| gpl-3.0 |
mattnenterprise/servo | tests/wpt/web-platform-tests/tools/third_party/pluggy/testing/test_tracer.py | 43 | 2110 |
from pluggy import _TagTracer
def test_simple():
rootlogger = _TagTracer()
log = rootlogger.get("pytest")
log("hello")
out = []
rootlogger.setwriter(out.append)
log("world")
assert len(out) == 1
assert out[0] == "world [pytest]\n"
sublog = log.get("collection")
sublog("hello")
assert out[1] == "hello [pytest:collection]\n"
def test_indent():
rootlogger = _TagTracer()
log = rootlogger.get("1")
out = []
log.root.setwriter(lambda arg: out.append(arg))
log("hello")
log.root.indent += 1
log("line1")
log("line2")
log.root.indent += 1
log("line3")
log("line4")
log.root.indent -= 1
log("line5")
log.root.indent -= 1
log("last")
assert len(out) == 7
names = [x[:x.rfind(' [')] for x in out]
assert names == [
'hello', ' line1', ' line2',
' line3', ' line4', ' line5', 'last']
def test_readable_output_dictargs():
rootlogger = _TagTracer()
out = rootlogger.format_message(['test'], [1])
assert out == ['1 [test]\n']
out2 = rootlogger.format_message(['test'], ['test', {'a': 1}])
assert out2 == [
'test [test]\n',
' a: 1\n'
]
def test_setprocessor():
rootlogger = _TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
assert log2.tags == tuple("12")
out = []
rootlogger.setprocessor(tuple("12"), lambda *args: out.append(args))
log("not seen")
log2("seen")
assert len(out) == 1
tags, args = out[0]
assert "1" in tags
assert "2" in tags
assert args == ("seen",)
l2 = []
rootlogger.setprocessor("1:2", lambda *args: l2.append(args))
log2("seen")
tags, args = l2[0]
assert args == ("seen",)
def test_setmyprocessor():
rootlogger = _TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
out = []
log2.setmyprocessor(lambda *args: out.append(args))
log("not seen")
assert not out
log2(42)
assert len(out) == 1
tags, args = out[0]
assert "1" in tags
assert "2" in tags
assert args == (42,)
| mpl-2.0 |
caidongyun/Dato-Core | src/unity/python_deps/psutil/test/_bsd.py | 21 | 7771 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: add test for comparing connections with 'sockstat' cmd
"""BSD specific tests. These are implicitly run by test_psutil.py."""
import subprocess
import time
import sys
import os
import psutil
from psutil._compat import PY3
from test_psutil import (TOLERANCE, sh, get_test_subprocess, which,
retry_before_failing, reap_children, unittest)
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
if os.getuid() == 0: # muse requires root privileges
MUSE_AVAILABLE = which('muse')
else:
MUSE_AVAILABLE = False
def sysctl(cmdline):
"""Expects a sysctl command with an argument and parse the result
returning only the value of interest.
"""
result = sh("sysctl " + cmdline)
result = result[result.find(": ") + 2:]
try:
return int(result)
except ValueError:
return result
def muse(field):
"""Thin wrapper around 'muse' cmdline utility."""
out = sh('muse')
for line in out.split('\n'):
if line.startswith(field):
break
else:
raise ValueError("line not found")
return int(line.split()[1])
class BSDSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_boot_time(self):
s = sysctl('sysctl kern.boottime')
s = s[s.find(" sec = ") + 7:]
s = s[:s.find(',')]
btime = int(s)
self.assertEqual(btime, psutil.boot_time())
def test_process_create_time(self):
cmdline = "ps -o lstart -p %s" % self.pid
p = subprocess.Popen(cmdline, shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0]
if PY3:
output = str(output, sys.stdout.encoding)
start_ps = output.replace('STARTED', '').strip()
start_psutil = psutil.Process(self.pid).create_time()
start_psutil = time.strftime("%a %b %e %H:%M:%S %Y",
time.localtime(start_psutil))
self.assertEqual(start_ps, start_psutil)
def test_disks(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -k "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total = int(total) * 1024
used = int(used) * 1024
free = int(free) * 1024
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(part.device, dev)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_memory_maps(self):
out = sh('procstat -v %s' % self.pid)
maps = psutil.Process(self.pid).memory_maps(grouped=False)
lines = out.split('\n')[1:]
while lines:
line = lines.pop()
fields = line.split()
_, start, stop, perms, res = fields[:5]
map = maps.pop()
self.assertEqual("%s-%s" % (start, stop), map.addr)
self.assertEqual(int(res), map.rss)
if not map.path.startswith('['):
self.assertEqual(fields[10], map.path)
# --- virtual_memory(); tests against sysctl
def test_vmem_total(self):
syst = sysctl("sysctl vm.stats.vm.v_page_count") * PAGESIZE
self.assertEqual(psutil.virtual_memory().total, syst)
@retry_before_failing()
def test_vmem_active(self):
syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().active, syst,
delta=TOLERANCE)
@retry_before_failing()
def test_vmem_inactive(self):
syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().inactive, syst,
delta=TOLERANCE)
@retry_before_failing()
def test_vmem_wired(self):
syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().wired, syst,
delta=TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().cached, syst,
delta=TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().free, syst,
delta=TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
syst = sysctl("vfs.bufspace")
self.assertAlmostEqual(psutil.virtual_memory().buffers, syst,
delta=TOLERANCE)
# --- virtual_memory(); tests against muse
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
def test_total(self):
num = muse('Total')
self.assertEqual(psutil.virtual_memory().total, num)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_active(self):
num = muse('Active')
self.assertAlmostEqual(psutil.virtual_memory().active, num,
delta=TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_inactive(self):
num = muse('Inactive')
self.assertAlmostEqual(psutil.virtual_memory().inactive, num,
delta=TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_wired(self):
num = muse('Wired')
self.assertAlmostEqual(psutil.virtual_memory().wired, num,
delta=TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_cached(self):
num = muse('Cache')
self.assertAlmostEqual(psutil.virtual_memory().cached, num,
delta=TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_free(self):
num = muse('Free')
self.assertAlmostEqual(psutil.virtual_memory().free, num,
delta=TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_buffers(self):
num = muse('Buffer')
self.assertAlmostEqual(psutil.virtual_memory().buffers, num,
delta=TOLERANCE)
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BSDSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
| agpl-3.0 |
certik/hermes1d-llnl | examples/system_neutronics_eigenvalue/plot.py | 3 | 1652 | import matplotlib.pyplot as plt
import numpy as np
import sys
fig = plt.figure()
# one axes for each group
ax1 = fig.add_subplot(211)
ax1.grid(True)
ax1.axhline(0, color='black', lw=2)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.grid(True)
ax2.axhline(0, color='black', lw=2)
# computed solution
# plot solutions corresponding to power method iterations specified as arguments
# (enter 'final' as an argument to plot the converged solution)
for arg in sys.argv[1:]:
sarg = str(arg)
if sarg == 'final':
fname = "solution"
else:
fname = "solution_"+sarg
# group 1
data = np.loadtxt(fname+".gp_0")
x = data[:, 0]
y = data[:, 1]
y0 = y[0] #for normalization of the analytic solution
ax1.plot(x,y,label='iteration '+sarg)
# group 2
data = np.loadtxt(fname+".gp_1")
x = data[:, 0]
y = data[:, 1]
ax2.plot(x,y,label='iteration '+sarg)
# analytic solution
# group 1
# region 1
x1 = np.arange(0, 40, 0.05)
y1 = 0.65259*np.cos(0.023596*x1)-0.0012912*np.cosh(0.11331*x1)
# region 2
x2 = np.arange(40, 70, 0.05)
y2 = 0.12628*np.sinh(0.055596*(70-x2))
# normalization with the same condition as the computed solution
A = y0/y1[0]
# plot normalized solution in both regions
ax1.plot(np.concatenate((x1,x2)), A*np.concatenate((y1,y2)), 'k--', label='reference')
# group 2
# region 1
x1 = np.arange(0, 40, 0.05)
y1 = 0.25577*np.cos(0.023596*x1)+0.0013523*np.cosh(0.11331*x1)
# region 2
x2 = np.arange(40, 70, 0.05)
y2 = np.sinh(0.0212*(70-x2))-0.18263*np.sinh(0.055596*(70-x2))
ax2.plot(np.concatenate((x1,x2)), A*np.concatenate((y1,y2)), 'k--', label='reference')
plt.axes(ax1)
plt.legend()
plt.axes(ax2)
plt.legend()
plt.show()
| bsd-3-clause |
rednaxelafx/apache-spark | examples/src/main/python/ml/feature_hasher_example.py | 27 | 1521 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import FeatureHasher
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("FeatureHasherExample")\
.getOrCreate()
# $example on$
dataset = spark.createDataFrame([
(2.2, True, "1", "foo"),
(3.3, False, "2", "bar"),
(4.4, False, "3", "baz"),
(5.5, False, "4", "foo")
], ["real", "bool", "stringNum", "string"])
hasher = FeatureHasher(inputCols=["real", "bool", "stringNum", "string"],
outputCol="features")
featurized = hasher.transform(dataset)
featurized.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
cle1109/scot | examples/misc/pca.py | 4 | 1411 | # Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2016 SCoT Development Team
"""This example demonstrates that it is possible to reconstruct sources even if
we include PCA in the process.
"""
from __future__ import print_function
import numpy as np
from scot.pca import pca
from scot.var import VAR
# Set random seed for repeatable results
np.random.seed(42)
# Generate data from a VAR(1) process
model0 = VAR(1)
model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
x = model0.simulate(10000).squeeze()
# Transform data with PCA
w, v = pca(x)
y = np.dot(w.T, x)
# Verify that transformed data y is decorrelated
print('Covariance of x:\n', np.cov(x.squeeze()))
print('\nCovariance of y:\n', np.cov(y.squeeze()))
model1, model2 = VAR(1), VAR(1)
# Fit model1 to the original data
model1.fit(x)
# Fit model2 to the PCA transformed data
model2.fit(y)
# The coefficients estimated on x (2) are exactly equal to the back-transformed
# coefficients estimated on y (4)
print('\n(1) True VAR coefficients:\n', model0.coef)
print('\n(2) VAR coefficients estimated on x:\n', model1.coef)
print('\n(3) VAR coefficients estimated on y:\n', model2.coef)
print('\n(4) VAR coefficients estimated on y and transformed back:\n',
w.dot(model2.coef).dot(w.T))
print('\n(5) Check if (2) and (4) are equal:\n',
np.isclose(model1.coef, w.dot(model2.coef).dot(w.T)))
| mit |
sdague/home-assistant | homeassistant/components/nanoleaf/light.py | 16 | 7708 | """Support for Nanoleaf Lights."""
import logging
from pynanoleaf import Nanoleaf, Unavailable
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color as color_util
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Nanoleaf"
DATA_NANOLEAF = "nanoleaf"
CONFIG_FILE = ".nanoleaf.conf"
ICON = "mdi:triangle-outline"
SUPPORT_NANOLEAF = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_COLOR
| SUPPORT_TRANSITION
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nanoleaf light."""
if DATA_NANOLEAF not in hass.data:
hass.data[DATA_NANOLEAF] = {}
token = ""
if discovery_info is not None:
host = discovery_info["host"]
name = discovery_info["hostname"]
# if device already exists via config, skip discovery setup
if host in hass.data[DATA_NANOLEAF]:
return
_LOGGER.info("Discovered a new Nanoleaf: %s", discovery_info)
conf = load_json(hass.config.path(CONFIG_FILE))
if conf.get(host, {}).get("token"):
token = conf[host]["token"]
else:
host = config[CONF_HOST]
name = config[CONF_NAME]
token = config[CONF_TOKEN]
nanoleaf_light = Nanoleaf(host)
if not token:
token = nanoleaf_light.request_token()
if not token:
_LOGGER.error(
"Could not generate the auth token, did you press "
"and hold the power button on %s"
"for 5-7 seconds?",
name,
)
return
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {"token": token}
save_json(hass.config.path(CONFIG_FILE), conf)
nanoleaf_light.token = token
try:
nanoleaf_light.available
except Unavailable:
_LOGGER.error("Could not connect to Nanoleaf Light: %s on %s", name, host)
return
hass.data[DATA_NANOLEAF][host] = nanoleaf_light
add_entities([NanoleafLight(nanoleaf_light, name)], True)
class NanoleafLight(LightEntity):
"""Representation of a Nanoleaf Light."""
def __init__(self, light, name):
"""Initialize an Nanoleaf light."""
self._available = True
self._brightness = None
self._color_temp = None
self._effect = None
self._effects_list = None
self._light = light
self._name = name
self._hs_color = None
self._state = None
@property
def available(self):
"""Return availability."""
return self._available
@property
def brightness(self):
"""Return the brightness of the light."""
if self._brightness is not None:
return int(self._brightness * 2.55)
return None
@property
def color_temp(self):
"""Return the current color temperature."""
if self._color_temp is not None:
return color_util.color_temperature_kelvin_to_mired(self._color_temp)
return None
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._effects_list
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 154
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 833
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def hs_color(self):
"""Return the color in HS."""
return self._hs_color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_NANOLEAF
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
effect = kwargs.get(ATTR_EFFECT)
transition = kwargs.get(ATTR_TRANSITION)
if hs_color:
hue, saturation = hs_color
self._light.hue = int(hue)
self._light.saturation = int(saturation)
if color_temp_mired:
self._light.color_temperature = mired_to_kelvin(color_temp_mired)
if transition:
if brightness: # tune to the required brightness in n seconds
self._light.brightness_transition(
int(brightness / 2.55), int(transition)
)
else: # If brightness is not specified, assume full brightness
self._light.brightness_transition(100, int(transition))
else: # If no transition is occurring, turn on the light
self._light.on = True
if brightness:
self._light.brightness = int(brightness / 2.55)
if effect:
if effect not in self._effects_list:
raise ValueError(
f"Attempting to apply effect not in the effect list: '{effect}'"
)
self._light.effect = effect
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
transition = kwargs.get(ATTR_TRANSITION)
if transition:
self._light.brightness_transition(0, int(transition))
else:
self._light.on = False
def update(self):
"""Fetch new state data for this light."""
try:
self._available = self._light.available
self._brightness = self._light.brightness
self._effects_list = self._light.effects
# Nanoleaf api returns non-existent effect named "*Solid*" when light set to solid color.
# This causes various issues with scening (see https://github.com/home-assistant/core/issues/36359).
# Until fixed at the library level, we should ensure the effect exists before saving to light properties
self._effect = (
self._light.effect if self._light.effect in self._effects_list else None
)
if self._effect is None:
self._color_temp = self._light.color_temperature
self._hs_color = self._light.hue, self._light.saturation
else:
self._color_temp = None
self._hs_color = None
self._state = self._light.on
except Unavailable as err:
_LOGGER.error("Could not update status for %s (%s)", self.name, err)
self._available = False
| apache-2.0 |
Thortoise/Super-Snake | Blender/animation_nodes-master/nodes/bvh_tree/is_inside_volume.py | 1 | 1470 | import bpy
from random import random
from mathutils import Vector
from ... base_types.node import AnimationNode
# in some cases multiple tests have to done
# to reduce the probability for errors
direction1 = Vector((random(), random(), random())).normalized()
direction2 = Vector((random(), random(), random())).normalized()
direction3 = Vector((random(), random(), random())).normalized()
class IsInsideVolumeBVHTreeNode(bpy.types.Node, AnimationNode):
bl_idname = "an_IsInsideVolumeBVHTreeNode"
bl_label = "Is Inside Volume"
def create(self):
self.newInput("BVHTree", "BVHTree", "bvhTree")
self.newInput("Vector", "Vector", "vector", defaultDrawType = "PROPERTY_ONLY")
self.newOutput("Boolean", "Is Inside", "isInside")
def execute(self, bvhTree, vector):
hits1 = self.countHits(bvhTree, vector, direction1)
if hits1 == 0: return False
if hits1 == 1: return True
hits2 = self.countHits(bvhTree, vector, direction2)
if hits1 % 2 == hits2 % 2:
return hits1 % 2 == 1
hits3 = self.countHits(bvhTree, vector, direction3)
return hits3 % 2 == 1
def countHits(self, bvhTree, start, direction):
hits = 0
offset = direction * 0.0001
location = bvhTree.ray_cast(start, direction)[0]
while location is not None:
hits += 1
location = bvhTree.ray_cast(location + offset, direction)[0]
return hits
| gpl-3.0 |
algolia/algoliasearch-client-python | tests/unit/http/test_transporter.py | 1 | 7769 | import time
import unittest
import os
import mock as mock
from algoliasearch.configs import SearchConfig
from algoliasearch.exceptions import AlgoliaUnreachableHostException, AlgoliaException
from algoliasearch.http.hosts import Host, HostsCollection
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.requester import Requester
from algoliasearch.http.transporter import (
Transporter,
Response,
RetryStrategy,
RetryOutcome,
Request,
)
class TestTransporter(unittest.TestCase):
def setUp(self):
self.requester = Requester()
self.requester.send = mock.Mock(name="send")
self.requester.send.return_value = Response(200, {"foo": "bar"})
self.data = {"data": "foo"}
self.config = SearchConfig("foo", "bar")
self.request_options = RequestOptions.create(
self.config,
{
"User-Agent": "foo",
"createIfNotExists": True,
"readTimeout": 109,
"bodyParam": "bar",
},
)
self.transporter = Transporter(self.requester, self.config)
def test_success_write(self):
response = self.transporter.write(
"post", "endpoint/foo", self.data, self.request_options
)
host = self.config.hosts.write()[0] # type: Host
request = Request(
"POST",
self.request_options.headers,
self.data,
2, # Default connect timeout
30, # Default timeout
)
request.url = "https://{}/endpoint/foo?createIfNotExists=true".format(host.url)
self.requester.send.assert_called_once_with(request)
self.assertEqual({"foo": "bar"}, response)
self.assertEqual(self.requester.send.call_count, 1)
def test_success_read(self):
response = self.transporter.read(
"get", "endpoint/bar", {}, self.request_options
)
host = self.config.hosts.read()[0] # type: Host
request = Request(
"GET", # Upper case letters
self.request_options.headers,
{"bodyParam": "bar"},
2, # Default connect timeout
109, # Customized timeout
)
request.url = "https://{}/endpoint/bar?createIfNotExists=true".format(host.url)
self.requester.send.assert_called_once_with(request)
self.assertEqual({"foo": "bar"}, response)
self.assertEqual(self.requester.send.call_count, 1)
def test_unreachable_hosts_exception(self):
self.requester.send.return_value = Response(300, {"foo": "bar"})
with self.assertRaises(AlgoliaUnreachableHostException) as _:
self.transporter.read("get", "endpoint/bar", {}, self.request_options)
self.assertEqual(self.requester.send.call_count, 4)
self.requester.send.return_value = Response(100, {"foo": "bar"})
with self.assertRaises(AlgoliaUnreachableHostException) as _:
self.transporter.read("get", "endpoint/bar", {}, self.request_options)
# Remains 4, all hosts here down.
self.assertEqual(self.requester.send.call_count, 4)
with self.assertRaises(AlgoliaUnreachableHostException) as _:
self.transporter.write("get", "endpoint/bar", {}, self.request_options)
# It's now 5, write have one different host.
self.assertEqual(self.requester.send.call_count, 5)
with self.assertRaises(AlgoliaUnreachableHostException) as _:
self.transporter.write("get", "endpoint/bar", {}, self.request_options)
# Remains 5, all hosts here down.
self.assertEqual(self.requester.send.call_count, 5)
def test_algolia_exception(self):
self.requester.send.return_value = Response(401, {"foo": "bar"})
with self.assertRaises(AlgoliaException) as _:
self.transporter.read("get", "endpoint/bar", {}, self.request_options)
self.assertEqual(self.requester.send.call_count, 1)
class TestProxiedTransporter(unittest.TestCase):
def setUp(self):
os.environ["HTTPS_PROXY"] = "https://127.0.0.1:8080"
def tearDown(self):
del os.environ["HTTPS_PROXY"]
def test_with_proxy(self):
config = SearchConfig("foo", "bar")
requester = Requester()
requester.send = mock.Mock(name="send")
requester.send.return_value = Response(200, {"foo": "bar"})
transporter = Transporter(requester, config)
headers = RequestOptions.create(config).headers
data = {}
transporter.write("post", "endpoint/foo", data, None)
request = Request(
"POST",
headers,
{},
2, # Default connect timeout
30, # Default timeout
proxies={"https": "https://127.0.0.1:8080"},
)
request.url = "https://foo.algolia.net/endpoint/foo?"
requester.send.assert_called_once_with(request)
class TestRetryStrategy(unittest.TestCase):
def setUp(self):
self.time = time.time()
self.retry_strategy = RetryStrategy()
self.host = Host("foo.com")
self.response = Response()
def test_success_decision(self):
self.response.status_code = 200
decision = self.retry_strategy.decide(self.host, self.response)
self.assertEqual(decision, RetryOutcome.SUCCESS)
self.assertEqual(self.host.up, True)
self.assertGreaterEqual(self.host.last_use, self.time)
self.assertEqual(self.host.retry_count, 0)
def test_retryable_decision_because_status_code(self):
self.response.status_code = 300
decision = self.retry_strategy.decide(self.host, self.response)
self.assertEqual(decision, RetryOutcome.RETRY)
self.assertEqual(self.host.up, False)
self.assertGreaterEqual(self.host.last_use, self.time)
self.assertEqual(self.host.retry_count, 0)
def test_retryable_decision_because_timed_out(self):
self.response.is_timed_out_error = True
decision = self.retry_strategy.decide(self.host, self.response)
self.assertEqual(decision, RetryOutcome.RETRY)
self.assertEqual(self.host.up, True)
self.assertGreaterEqual(self.host.last_use, self.time)
self.assertEqual(self.host.retry_count, 1)
def test_fail_decision(self):
self.response.status_code = 401
decision = self.retry_strategy.decide(self.host, self.response)
self.assertEqual(decision, RetryOutcome.FAIL)
self.assertEqual(self.host.up, True)
self.assertGreaterEqual(self.host.last_use, self.time)
self.assertEqual(self.host.retry_count, 0)
def test_down_hosts(self):
a = Host("a", 10)
b = Host("b", 20)
c = Host("c")
self.retry_strategy._now = mock.Mock(name="_now")
self.retry_strategy._now.return_value = 1000
hosts = list(self.retry_strategy.valid_hosts([a, b, c]))
self.assertEqual(len(hosts), 3)
a.last_use = 800.0 # 1000 - 800 = 200 (lower than TTL - 300)
a.up = False
hosts = list(self.retry_strategy.valid_hosts([a, b, c]))
self.assertEqual(len(hosts), 2) # still down
a.last_use = 400.0 # 1000 - 400 = 600 (bigger than TTL - 300)
hosts = list(self.retry_strategy.valid_hosts([a, b, c]))
self.assertEqual(len(hosts), 3) # got up
class TestHostCollection(unittest.TestCase):
def test_hosts_got_sorted(self):
collection = HostsCollection([Host("a", 10), Host("b", 20), Host("c")])
hosts = collection.read()
self.assertEqual(hosts[0].url, "b")
self.assertEqual(hosts[1].url, "a")
self.assertEqual(hosts[2].url, "c")
| mit |
liu602348184/django | tests/proxy_models/tests.py | 260 | 16941 | from __future__ import unicode_literals
import datetime
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions, management
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement,
ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson,
TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(
Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id + 1
)
self.assertRaises(
Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id + 1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
# don't register this model in the app_cache for the current app,
# otherwise the check fails when other tests are being run.
app_label = 'no_such_app'
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
hint=None,
obj=None,
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
def test_swappable(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['proxy_models'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
finally:
apps.app_configs['proxy_models'].models = _old_models
apps.all_models['proxy_models'] = _old_models
apps.clear_cache()
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(
name='Contributor', status='contrib')
with self.assertRaises(exceptions.FieldError):
TrackerUser.objects.filter(issue=None),
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issue=None),
[tu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='proxy_models.urls',)
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = AuthUser.objects.create(
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='[email protected]', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
self.client.logout()
| bsd-3-clause |
freedesktop-unofficial-mirror/gstreamer__cerbero | cerbero/commands/rdeps.py | 27 | 1717 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
class RDeps(Command):
doc = N_('List the reverse dependencies of a recipe')
name = 'rdeps'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('recipe', nargs=1,
help=_('name of the recipe')),
])
def run(self, config, args):
cookbook = CookBook(config)
recipe_name = args.recipe[0]
recipes = cookbook.list_recipe_reverse_deps(recipe_name)
if len(recipes) == 0:
m.error(_('%s has 0 reverse dependencies') % recipe_name)
return
for recipe in recipes:
m.message(recipe.name)
register_command(RDeps)
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.