repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
ocefpaf/paegan-transport
tests/test_capability.py
2
1860
import unittest from paegan.transport.models.behaviors.capability import Capability import os import json class CapabilityTest(unittest.TestCase): def test_from_json(self): data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/capability_behavior.json"))).read() d = Capability(json=data) assert d.vss == 5.0 assert d.variance == 0.0 assert d.non_swim_turning == "random" assert d.swim_turning == "random" assert d.calculated_vss == 5.0 assert d.calculate_vss() == 5.0 def test_from_dict(self): data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/capability_behavior.json"))).read() d = Capability(data=json.loads(data)) assert d.vss == 5.0 assert d.variance == 0.0 assert d.non_swim_turning == "random" assert d.swim_turning == "random" assert d.calculated_vss == 5.0 assert d.calculate_vss() == 5.0 def test_gaussian_variance(self): d = Capability() d.vss = 4.0 d.variance = 0.5 max_deviation = d.variance * 6 real_vss = d.calculate_vss(method='gaussian') assert real_vss >= d.vss - max_deviation assert real_vss <= d.vss + max_deviation def test_random_variance(self): d = Capability() d.vss = 4.0 d.variance = 0.5 real_vss = d.calculate_vss(method='random') assert real_vss >= d.vss - d.variance assert real_vss <= d.vss + d.variance def test_error_variance(self): d = Capability() d.vss = 4.0 d.variance = 0.5 # Should result in a ValueError try: d.calculate_vss(method='nada') except ValueError: assert True else: assert False
gpl-3.0
BlaisProteomics/mzStudio
mzStudio/Filter_management.py
1
11580
__author__ = 'Scott Ficarro, William Max Alexander' __version__ = '1.0' #Filter management import re def Onms1(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def OnQuantivaQMS(filter_dict, id): #('.*?[+] ([pc]) [NE]SI sid=(\d+?.\d+?) Q([13])MS \[(\d+?.\d+?)-(\d+?.\d+?)\]') filter_dict["mode"]="ms1" filter_dict["analyzer"]='Q' + id.groups()[2] + 'MS' filter_dict["data"]= "+cent" if id.groups()[0]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[3]+'-'+id.groups()[4]+']' filter_dict['sid']=id.groups()[1] return filter_dict def OnQuantivaMS2(filter_dict, id): #('.*?[+] ([pc]) [NE]SI sid=(\d+?.\d+?) (Full ms2)|(pr) (\d+?.\d+?) \[(\d+?.\d+?)-(\d+?.\d+?)\]') # 0 1 2 3 4 5 filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[2] filter_dict["data"]= "+cent" if id.groups()[0]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[4]+'-'+id.groups()[5]+']' filter_dict['sid']=id.groups()[1] filter_dict["precursor"]=id.groups()[3] filter_dict["reaction"]='CAD' filter_dict["energy"]='' return filter_dict def OnQuantivaSRM(filter_dict, id): # # 0 1 2 3 4 5 filter_dict["mode"]="ms2" filter_dict["analyzer"]='SRM' filter_dict["data"]= "+cent" if id.groups()[0]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[3]+'-'+id.groups()[6]+']' filter_dict['sid']=id.groups()[1] filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]='CAD' filter_dict["energy"]='' return filter_dict def Onlockms2(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[5]+'-'+id.groups()[6]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]=id.groups()[3] filter_dict["energy"]=id.groups()[4] + '% NCE' return filter_dict def Onlockms1(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def Onsim_ms1(filter_dict, id): filter_dict["mode"]="sim (ms1)" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def Onpa(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[5]+'-'+id.groups()[6]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]=id.groups()[3] filter_dict["energy"]=id.groups()[4] + '% NCE' return filter_dict def OnSRM(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[5]+'-'+id.groups()[8]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]=id.groups()[3] filter_dict["energy"]=id.groups()[4] + '% NCE' return filter_dict def Ontarg(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[6]+'-'+id.groups()[7]+']' filter_dict["precursor"]=id.groups()[3] filter_dict["reaction"]=id.groups()[4] filter_dict["energy"]=id.groups()[5] + '% NCE' return filter_dict def Onetd(filter_dict, id): #('.*?([FI]TMS) [+] ([cp]) NSI (t E )*d sa Full ms2 (\d+?.\d+?)@(hcd|cid|etd)(\d+?.\d+?) \[(\d+?.\d+?)-(\d+?.\d+?)\]') filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[6]+'-'+id.groups()[7]+']' filter_dict["precursor"]=id.groups()[3] filter_dict["reaction"]=id.groups()[4] filter_dict["energy"]=id.groups()[5] + '% NCE' return filter_dict def Ontarg_ms3(filter_dict, id): #([FI]TMS) [+] ([cp]) [NE]SI Full ms3 (\d+?.\d+?)@(hcd|cid)(\d+?.\d+?) (\d+?.\d+?)@(hcd|cid)(\d+?.\d+?) \[(\d+?.\d+?)-(\d+?.\d+?)\]') #0 1 2 3 4 5 6 7 8 9 filter_dict["mode"]="ms3" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[8]+'-'+id.groups()[9]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]=id.groups()[3] filter_dict["energy"]=id.groups()[4] + '% NCE' filter_dict["precursor ms3"]=id.groups()[5] filter_dict["reaction ms3"]=id.groups()[6] filter_dict["energy ms3"]=id.groups()[7] + '% NCE' return filter_dict def Ondd_ms3(filter_dict, id): #([FI]TMS) [+] ([cp]) sps d [NE]SI Full ms3 (\d+?.\d+?)@(hcd|cid)(\d+?.\d+?) (\d+?.\d+?)@(hcd|cid)(\d+?.\d+?) \[(\d+?.\d+?)-(\d+?.\d+?)\]') #0 1 2 3 4 5 6 7 8 9 filter_dict["mode"]="ms3" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[8]+'-'+id.groups()[9]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]=id.groups()[3] filter_dict["energy"]=id.groups()[4] + '% NCE' filter_dict["precursor ms3"]=id.groups()[5] filter_dict["reaction ms3"]=id.groups()[6] filter_dict["energy ms3"]=id.groups()[7] + '% NCE' return filter_dict def OnDms(filter_dict, id): ##(GC|TOF) MS \+ NSI Full ms(2?) \[(\d+)-(\d+)\] #'TOF MS + NSI Full ms [email protected][58-3000]' #r'(GC|TOF) MS \+ NSI Full (ms[2]?) ((\d+.\d+)@\d+.\d+)?\[(\d+)-(\d+)\]' filter_dict['mode'] = id.groups()[1].upper() filter_dict['analyzer'] = id.groups()[0] # That's what that is, right? filter_dict['data'] = 'cent' filter_dict['mr'] = '[%s-%s]' % id.groups()[-2:] filter_dict["precursor"]= id.groups()[3] filter_dict["reaction"]='CAD' filter_dict["energy"]='' return filter_dict def OnSRM(filter_dict, id): filt = id.groups()[0] words = filt.split() filter_dict['mode'] = words[0] filter_dict['analyzer'] = words[2] filter_dict['data'] = words[1] filter_dict['precursor'] = words[6] filter_dict['reaction'] = 'SRM' filter_dict['energy'] = words[3].replace('sid=', '') ranges = [x.strip('[], ').split('-') for x in words[7:]] filter_dict['mr'] = '[%s-%s]' % (ranges[0][0], ranges[-1][1]) return filter_dict #def OnTOFms2(filter_dict, id): #filter_dict['mode'] = 'ms2' #filter_dict['analyzer'] = 'TOF' #filter_dict['data'] = 'cent' #filter_dict['mr'] = '[%s-%s]' % id.groups() #return filter_dict def Onprecursor(filter_dict, id): #self.precursor = re.compile('.*?(Precursor) [+] ([cp]) [NE]SI Full ms2 (\d+?.\d+?)@(\d+?.\d+?) \[(\d+?.*\d*?)-(\d+?.*\d*?)\]') # 0 1 2 3 4 5 filter_dict["mode"]="Precursor" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[4]+'-'+id.groups()[5]+']' filter_dict["precursor"]= id.groups()[2] filter_dict["reaction"]='CAD' filter_dict["energy"]='' return filter_dict def Onqms1(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def Onqms2(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[-2]+'-'+id.groups()[-1]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]='NA' filter_dict["energy"]='NA' return filter_dict def Onpi(filter_dict, id): filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' filter_dict["precursor"]='NA' filter_dict["reaction"]='CID' filter_dict["energy"]='NA' return filter_dict def Onerms(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def Onq3ms(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def Onems(filter_dict, id): filter_dict["mode"]="ms1" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' return filter_dict def onmrmms(filter_dict, id): filter_dict["mode"]="mrm" filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" #filter_dict["mr"]='[' + id.groups()[2]+'-'+id.groups()[3]+']' #filter_dict["precursor"]='MRM' filter_dict["reaction"]='CAD' return filter_dict #raise NotImplementedError def Onmgf(filter_dict, id): #MGF ms2 542.4232 [100:2000] filter_dict['mode']='ms2' filter_dict['analyzer']='' filter_dict['data']='mgf' filter_dict["mr"]='[' + id.groups()[1]+'-'+id.groups()[2]+']' filter_dict["precursor"]=id.groups()[0] filter_dict["reaction"]="MS2" if id.groups()[4]: filter_dict["reaction"]=id.groups()[4] filter_dict["energy"]='' filter_dict['file scan'] = id.groups()[3] return filter_dict def Ontofms2(filter_dict, id): #TOF MS p NSI Full ms2 540.032306122@0[100-1400][1375:4] filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[4]+'-'+id.groups()[5]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]="TOF MS2" filter_dict["energy"]='Rolling CE' return filter_dict def Onepi(filter_dict, id): #EPI p NSI Full ms2 540.032306122@0[100-1400][1375:4] filter_dict["mode"]="ms2" filter_dict["analyzer"]=id.groups()[0] filter_dict["data"]= "+cent" if id.groups()[1]== "c" else "+prof" filter_dict["mr"]='[' + id.groups()[4]+'-'+id.groups()[5]+']' filter_dict["precursor"]=id.groups()[2] filter_dict["reaction"]="CAD" filter_dict["energy"]='Rolling CE' return filter_dict
gpl-3.0
jalr/privacyidea
privacyidea/webui/login.py
1
3977
# -*- coding: utf-8 -*- # # http://www.privacyidea.org # (c) cornelius kölbel, privacyidea.org # # 2016-01-07 Cornelius Kölbel <[email protected]> # Add password reset # 2015-11-04 Cornelius Kölbel <[email protected]> # Add REMOTE_USER check # 2014-12-22 Cornelius Kölbel, <[email protected]> # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE # License as published by the Free Software Foundation; either # version 3 of the License, or any later version. # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU AFFERO GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # __doc__ = """This is the starting point for the single web application. Other html code is dynamically loaded via angularJS and located in /static/views/... """ __author__ = "Cornelius Kölbel, <[email protected]>" from flask import (Blueprint, render_template, request, current_app) from privacyidea.api.lib.prepolicy import is_remote_user_allowed from privacyidea.lib.passwordreset import is_password_reset from privacyidea.lib.error import HSMException from privacyidea.lib.realm import get_realms from privacyidea.lib.policy import PolicyClass, ACTION, SCOPE DEFAULT_THEME = "/static/contrib/css/bootstrap-theme.css" login_blueprint = Blueprint('login_blueprint', __name__) @login_blueprint.route('/', methods=['GET']) def single_page_application(): instance = request.script_root if instance == "/": instance = "" # The backend URL should come from the configuration of the system. backend_url = "" # The default theme. We can change this later theme = current_app.config.get("PI_CSS", DEFAULT_THEME) # Get further customizations customization = current_app.config.get("PI_CUSTOMIZATION", "/static/customize/") customization = customization.strip('/') # TODO: we should add the CSS into PI_CUSTOMZATION/css # Enrollment-Wizard: # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html browser_lang = request.accept_languages.best_match(["en", "de"]) # check if login with REMOTE_USER is allowed. remote_user = "" password_reset = False # Depending on displaying the realm dropdown, we fill realms or not. policy_object = PolicyClass() realms = "" client_ip = request.access_route[0] if request.access_route else \ request.remote_addr realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN, scope=SCOPE.WEBUI, client=client_ip) if realm_dropdown: realms = ",".join(get_realms().keys()) try: if is_remote_user_allowed(request): remote_user = request.remote_user password_reset = is_password_reset() hsm_ready = True except HSMException: hsm_ready = False return render_template("index.html", instance=instance, backendUrl=backend_url, browser_lang=browser_lang, remote_user=remote_user, theme=theme, password_reset=password_reset, hsm_ready=hsm_ready, customization=customization, realms=realms)
agpl-3.0
yunity/yunity-core
karrot/activities/stats.py
1
2443
from influxdb_metrics.loader import write_points from karrot.groups.stats import group_tags def activity_tags(activity): tags = group_tags(activity.place.group) tags.update({ 'place': str(activity.place.id), 'type': str(activity.activity_type.id), 'type_name': activity.activity_type.name, }) return tags def activity_joined(activity): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': { 'activity_joined': 1 }, }]) def activity_left(activity): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': { 'activity_left': 1 }, }]) def activity_done(activity): participants_count = activity.participants.count() fields = { 'activity_done': 1, 'activity_done_slots_joined': participants_count, } if activity.max_participants is not None and activity.max_participants > 0: fields.update({ 'activity_done_slots_total': activity.max_participants, 'activity_done_slots_percentage': participants_count / activity.max_participants, }) write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': fields, }]) def activity_missed(activity): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': { 'activity_missed': 1 }, }]) def activity_disabled(activity): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': { 'activity_disabled': 1 }, }]) def activity_enabled(activity): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(activity), 'fields': { 'activity_enabled': 1 }, }]) def feedback_given(feedback): write_points([{ 'measurement': 'karrot.events', 'tags': activity_tags(feedback.about), 'fields': { 'feedback': 1 }, }]) def activity_notification_email(group, **kwargs): write_points([{ 'measurement': 'karrot.email.activity_notification', 'tags': group_tags(group), 'fields': { 'value': 1, **kwargs }, }])
agpl-3.0
JoshRosen/hyde
tests/test_htaccess.py
58
1455
""" Tests of .htaccess file generation. For now, this just checks whether the demo site's generated .htaccess file matches a known good file. """ import os import sys from django.conf import settings TEST_ROOT = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(TEST_ROOT + "/..") sys.path = [ROOT] + sys.path from hydeengine.file_system import File, Folder from hydeengine import url, Initializer, setup_env from hydeengine.templatetags.hydetags \ import RenderHydeListingPageRewriteRulesNode as htaccessgen TEST_SITE = Folder(TEST_ROOT).child_folder("test_site") def setup_module(module): Initializer(TEST_SITE.path).initialize(ROOT, template="test", force=True) setup_env(TEST_SITE.path) def teardown_module(module): TEST_SITE.delete() class TestHtaccess: def test_listing_page_rewite_rule_generator(self): # test with two names settings.LISTING_PAGE_NAMES = ['listing', 'index'] expected = r""" ### BEGIN GENERATED REWRITE RULES #### RewriteCond %{REQUEST_FILENAME}/listing.html -f RewriteRule ^(.*) $1/listing.html RewriteCond %{REQUEST_FILENAME}/index.html -f RewriteRule ^(.*) $1/index.html #### END GENERATED REWRITE RULES #### """ actual = htaccessgen().render('') assert actual.strip() == expected.strip() # test with no names settings.LISTING_PAGE_NAMES = [] actual = htaccessgen().render('') assert actual == ""
mit
BosNaufal/statistika
src/vendor/ionicons/builder/scripts/generate_font.py
348
5381
# Font generation script from FontCustom # https://github.com/FontCustom/fontcustom/ # http://fontcustom.com/ import fontforge import os import md5 import subprocess import tempfile import json import copy SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) INPUT_SVG_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'src') OUTPUT_FONT_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'fonts') MANIFEST_PATH = os.path.join(SCRIPT_PATH, '..', 'manifest.json') BUILD_DATA_PATH = os.path.join(SCRIPT_PATH, '..', 'build_data.json') AUTO_WIDTH = True KERNING = 15 cp = 0xf100 m = md5.new() f = fontforge.font() f.encoding = 'UnicodeFull' f.design_size = 16 f.em = 512 f.ascent = 448 f.descent = 64 manifest_file = open(MANIFEST_PATH, 'r') manifest_data = json.loads(manifest_file.read()) manifest_file.close() print "Load Manifest, Icons: %s" % ( len(manifest_data['icons']) ) build_data = copy.deepcopy(manifest_data) build_data['icons'] = [] font_name = manifest_data['name'] m.update(font_name + ';') m.update(manifest_data['prefix'] + ';') for dirname, dirnames, filenames in os.walk(INPUT_SVG_DIR): for filename in filenames: name, ext = os.path.splitext(filename) filePath = os.path.join(dirname, filename) size = os.path.getsize(filePath) if ext in ['.svg', '.eps']: # see if this file is already in the manifest chr_code = None for ionicon in manifest_data['icons']: if ionicon['name'] == name: chr_code = ionicon['code'] break if chr_code is None: # this is a new src icon print 'New Icon: \n - %s' % (name) while True: chr_code = '0x%x' % (cp) already_exists = False for ionicon in manifest_data['icons']: if ionicon.get('code') == chr_code: already_exists = True cp += 1 chr_code = '0x%x' % (cp) continue if not already_exists: break print ' - %s' % chr_code manifest_data['icons'].append({ 'name': name, 'code': chr_code }) build_data['icons'].append({ 'name': name, 'code': chr_code }) if ext in ['.svg']: # hack removal of <switch> </switch> tags svgfile = open(filePath, 'r+') tmpsvgfile = tempfile.NamedTemporaryFile(suffix=ext, delete=False) svgtext = svgfile.read() svgfile.seek(0) # replace the <switch> </switch> tags with 'nothing' svgtext = svgtext.replace('<switch>', '') svgtext = svgtext.replace('</switch>', '') tmpsvgfile.file.write(svgtext) svgfile.close() tmpsvgfile.file.close() filePath = tmpsvgfile.name # end hack m.update(name + str(size) + ';') glyph = f.createChar( int(chr_code, 16) ) glyph.importOutlines(filePath) # if we created a temporary file, let's clean it up if tmpsvgfile: os.unlink(tmpsvgfile.name) # set glyph size explicitly or automatically depending on autowidth if AUTO_WIDTH: glyph.left_side_bearing = glyph.right_side_bearing = 0 glyph.round() # resize glyphs if autowidth is enabled if AUTO_WIDTH: f.autoWidth(0, 0, 512) fontfile = '%s/ionicons' % (OUTPUT_FONT_DIR) build_hash = m.hexdigest() if build_hash == manifest_data.get('build_hash'): print "Source files unchanged, did not rebuild fonts" else: manifest_data['build_hash'] = build_hash f.fontname = font_name f.familyname = font_name f.fullname = font_name f.generate(fontfile + '.ttf') f.generate(fontfile + '.svg') # Fix SVG header for webkit # from: https://github.com/fontello/font-builder/blob/master/bin/fontconvert.py svgfile = open(fontfile + '.svg', 'r+') svgtext = svgfile.read() svgfile.seek(0) svgfile.write(svgtext.replace('''<svg>''', '''<svg xmlns="http://www.w3.org/2000/svg">''')) svgfile.close() scriptPath = os.path.dirname(os.path.realpath(__file__)) try: subprocess.Popen([scriptPath + '/sfnt2woff', fontfile + '.ttf'], stdout=subprocess.PIPE) except OSError: # If the local version of sfnt2woff fails (i.e., on Linux), try to use the # global version. This allows us to avoid forcing OS X users to compile # sfnt2woff from source, simplifying install. subprocess.call(['sfnt2woff', fontfile + '.ttf']) # eotlitetool.py script to generate IE7-compatible .eot fonts subprocess.call('python ' + scriptPath + '/eotlitetool.py ' + fontfile + '.ttf -o ' + fontfile + '.eot', shell=True) subprocess.call('mv ' + fontfile + '.eotlite ' + fontfile + '.eot', shell=True) # Hint the TTF file subprocess.call('ttfautohint -s -f -n ' + fontfile + '.ttf ' + fontfile + '-hinted.ttf > /dev/null 2>&1 && mv ' + fontfile + '-hinted.ttf ' + fontfile + '.ttf', shell=True) manifest_data['icons'] = sorted(manifest_data['icons'], key=lambda k: k['name']) build_data['icons'] = sorted(build_data['icons'], key=lambda k: k['name']) print "Save Manifest, Icons: %s" % ( len(manifest_data['icons']) ) f = open(MANIFEST_PATH, 'w') f.write( json.dumps(manifest_data, indent=2, separators=(',', ': ')) ) f.close() print "Save Build, Icons: %s" % ( len(build_data['icons']) ) f = open(BUILD_DATA_PATH, 'w') f.write( json.dumps(build_data, indent=2, separators=(',', ': ')) ) f.close()
mit
blackzw/openwrt_sdk_dev1
staging_dir/host/lib/python2.7/distutils/sysconfig.py
24
22287
"""Provide access to Python's configuration information. The specific configuration variables available depend heavily on the platform and configuration. The values may be retrieved using get_config_var(name), and the list of variables is available via get_config_vars().keys(). Additional convenience functions are also available. Written by: Fred L. Drake, Jr. Email: <[email protected]> """ __revision__ = "$Id$" import os import re import string import sys from distutils.errors import DistutilsPlatformError # These are needed in a couple of spots, so just compute them once. PREFIX = os.path.normpath(sys.prefix) EXEC_PREFIX = os.path.normpath(sys.exec_prefix) # Path to the base directory of the project. On Windows the binary may # live in project/PCBuild9. If we're dealing with an x64 Windows build, # it'll live in project/PCbuild/amd64. project_base = os.path.dirname(os.path.abspath(sys.executable)) if os.name == "nt" and "pcbuild" in project_base[-8:].lower(): project_base = os.path.abspath(os.path.join(project_base, os.path.pardir)) # PC/VS7.1 if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower(): project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, os.path.pardir)) # PC/AMD64 if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower(): project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, os.path.pardir)) # python_build: (Boolean) if true, we're either building Python or # building an extension with an un-installed Python, so we use # different (hard-wired) directories. # Setup.local is available for Makefile builds including VPATH builds, # Setup.dist is available on Windows def _python_build(): for fn in ("Setup.dist", "Setup.local"): if os.path.isfile(os.path.join(project_base, "Modules", fn)): return True return False python_build = _python_build() def get_python_version(): """Return a string containing the major and minor Python version, leaving off the patchlevel. Sample return values could be '1.5' or '2.2'. """ return sys.version[:3] def get_python_inc(plat_specific=0, prefix=None): """Return the directory containing installed Python header files. If 'plat_specific' is false (the default), this is the path to the non-platform-specific header files, i.e. Python.h and so on; otherwise, this is the path to platform-specific header files (namely pyconfig.h). If 'prefix' is supplied, use it instead of sys.prefix or sys.exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: prefix = plat_specific and EXEC_PREFIX or PREFIX if os.name == "posix": if python_build: buildir = os.path.dirname(sys.executable) if plat_specific: # python.h is located in the buildir inc_dir = buildir else: # the source dir is relative to the buildir srcdir = os.path.abspath(os.path.join(buildir, get_config_var('srcdir'))) # Include is located in the srcdir inc_dir = os.path.join(srcdir, "Include") return inc_dir return os.path.join(prefix, "include", "python" + get_python_version()) elif os.name == "nt": return os.path.join(prefix, "include") elif os.name == "os2": return os.path.join(prefix, "Include") else: raise DistutilsPlatformError( "I don't know where Python installs its C header files " "on platform '%s'" % os.name) def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): """Return the directory containing the Python library (standard or site additions). If 'plat_specific' is true, return the directory containing platform-specific modules, i.e. any module from a non-pure-Python module distribution; otherwise, return the platform-shared library directory. If 'standard_lib' is true, return the directory containing standard Python library modules; otherwise, return the directory for site-specific modules. If 'prefix' is supplied, use it instead of sys.prefix or sys.exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: prefix = plat_specific and EXEC_PREFIX or PREFIX if os.name == "posix": libpython = os.path.join(prefix, "lib", "python" + get_python_version()) if standard_lib: return libpython else: return os.path.join(libpython, "site-packages") elif os.name == "nt": if standard_lib: return os.path.join(prefix, "Lib") else: if get_python_version() < "2.2": return prefix else: return os.path.join(prefix, "Lib", "site-packages") elif os.name == "os2": if standard_lib: return os.path.join(prefix, "Lib") else: return os.path.join(prefix, "Lib", "site-packages") else: raise DistutilsPlatformError( "I don't know where Python installs its library " "on platform '%s'" % os.name) _USE_CLANG = None def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: newcc = os.environ['CC'] elif sys.platform == 'darwin' and cc == 'gcc-4.2': # Issue #13590: # Since Apple removed gcc-4.2 in Xcode 4.2, we can no # longer assume it is available for extension module builds. # If Python was built with gcc-4.2, check first to see if # it is available on this system; if not, try to use clang # instead unless the caller explicitly set CC. global _USE_CLANG if _USE_CLANG is None: from distutils import log from subprocess import Popen, PIPE p = Popen("! type gcc-4.2 && type clang && exit 2", shell=True, stdout=PIPE, stderr=PIPE) p.wait() if p.returncode == 2: _USE_CLANG = True log.warn("gcc-4.2 not found, using clang instead") else: _USE_CLANG = False if _USE_CLANG: newcc = 'clang' if newcc: # On OS X, if CC is overridden, use that as the default # command for LDSHARED as well if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: ldshared = os.environ['LDSHARED'] if 'CPP' in os.environ: cpp = os.environ['CPP'] else: cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: cflags = opt + ' ' + os.environ['CFLAGS'] ldshared = ldshared + ' ' + os.environ['CFLAGS'] if 'CPPFLAGS' in os.environ: cpp = cpp + ' ' + os.environ['CPPFLAGS'] cflags = cflags + ' ' + os.environ['CPPFLAGS'] ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] if 'AR' in os.environ: ar = os.environ['AR'] if 'ARFLAGS' in os.environ: archiver = ar + ' ' + os.environ['ARFLAGS'] else: archiver = ar + ' ' + ar_flags cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, archiver=archiver) compiler.shared_lib_extension = so_ext def get_config_h_filename(): """Return full pathname of installed pyconfig.h file.""" if python_build: if os.name == "nt": inc_dir = os.path.join(project_base, "PC") else: inc_dir = project_base else: inc_dir = get_python_inc(plat_specific=1) if get_python_version() < '2.2': config_h = 'config.h' else: # The name of the config.h file changed in 2.2 config_h = 'pyconfig.h' return os.path.join(inc_dir, config_h) def get_makefile_filename(): """Return full pathname of installed Makefile from the Python build.""" if python_build: return os.path.join(os.path.dirname(sys.executable), "Makefile") lib_dir = get_python_lib(plat_specific=1, standard_lib=1) return os.path.join(lib_dir, "config", "Makefile") def parse_config_h(fp, g=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if g is None: g = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") # while 1: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass g[n] = v else: m = undef_rx.match(line) if m: g[m.group(1)] = 0 return g # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") def parse_makefile(fn, g=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ from distutils.text_file import TextFile fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1) if g is None: g = {} done = {} notdone = {} while 1: line = fp.readline() if line is None: # eof break m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # do variable interpolation here while notdone: for name in notdone.keys(): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value del notdone[name] else: # bogus variable reference; just drop it since we can't deal del notdone[name] fp.close() # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary g.update(done) return g def expand_makefile_vars(s, vars): """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in 'string' according to 'vars' (a dictionary mapping variable names to values). Variables not present in 'vars' are silently expanded to the empty string. The variable values in 'vars' should not contain further variable expansions; if 'vars' is the output of 'parse_makefile()', you're fine. Returns a variable-expanded version of 's'. """ # This algorithm does multiple expansion, so if vars['foo'] contains # "${bar}", it will expand ${foo} to ${bar}, and then expand # ${bar}... and so forth. This is fine as long as 'vars' comes from # 'parse_makefile()', which takes care of such expansions eagerly, # according to make's variable expansion semantics. while 1: m = _findvar1_rx.search(s) or _findvar2_rx.search(s) if m: (beg, end) = m.span() s = s[0:beg] + vars.get(m.group(1)) + s[end:] else: break return s _config_vars = None def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} # load the installed Makefile: try: filename = get_makefile_filename() parse_makefile(filename, g) except IOError, msg: my_msg = "invalid Python installation: unable to open %s" % filename if hasattr(msg, "strerror"): my_msg = my_msg + " (%s)" % msg.strerror raise DistutilsPlatformError(my_msg) # load the installed pyconfig.h: try: filename = get_config_h_filename() parse_config_h(file(filename), g) except IOError, msg: my_msg = "invalid Python installation: unable to open %s" % filename if hasattr(msg, "strerror"): my_msg = my_msg + " (%s)" % msg.strerror raise DistutilsPlatformError(my_msg) # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. if python_build: g['LDSHARED'] = g['BLDSHARED'] elif get_python_version() < '2.1': # The following two branches are for 1.5.2 compatibility. if sys.platform == 'aix4': # what about AIX 3.x ? # Linker script is in the config directory, not in Modules as the # Makefile says. python_lib = get_python_lib(standard_lib=1) ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') python_exp = os.path.join(python_lib, 'config', 'python.exp') g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) elif sys.platform == 'beos': # Linker script is in the config directory. In the Makefile it is # relative to the srcdir, which after installation no longer makes # sense. python_lib = get_python_lib(standard_lib=1) linkerscript_path = string.split(g['LDSHARED'])[0] linkerscript_name = os.path.basename(linkerscript_path) linkerscript = os.path.join(python_lib, 'config', linkerscript_name) # XXX this isn't the right place to do this: adding the Python # library to the link, if needed, should be in the "build_ext" # command. (It's also needed for non-MS compilers on Windows, and # it's taken care of for them by the 'build_ext.get_libraries()' # method.) g['LDSHARED'] = ("%s -L%s/lib -lpython%s" % (linkerscript, PREFIX, get_python_version())) global _config_vars _config_vars = g def _init_nt(): """Initialize the module as appropriate for NT""" g = {} # set basic install directories g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) # XXX hmmm.. a normal install puts include files here g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) global _config_vars _config_vars = g def _init_os2(): """Initialize the module as appropriate for OS/2""" g = {} # set basic install directories g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) # XXX hmmm.. a normal install puts include files here g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' g['EXE'] = ".exe" global _config_vars _config_vars = g def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. Generally this includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's installed Makefile; on Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _config_vars if _config_vars is None: func = globals().get("_init_" + os.name) if func: func() else: _config_vars = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # Distutils. _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX if sys.platform == 'darwin': kernel_version = os.uname()[2] # Kernel version (8.4.3) major_version = int(kernel_version.split('.')[0]) if major_version < 8: # On Mac OS X before 10.4, check if -arch and -isysroot # are in CFLAGS or LDFLAGS and remove them if they are. # This is needed when building extensions on a 10.3 system # using a universal build of python. for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _config_vars[key] flags = re.sub('-arch\s+\w+\s', ' ', flags) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _config_vars[key] = flags else: # Allow the user to override the architecture flags using # an environment variable. # NOTE: This name was introduced by Apple in OSX 10.5 and # is used by several scripting languages distributed with # that OS release. if 'ARCHFLAGS' in os.environ: arch = os.environ['ARCHFLAGS'] for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _config_vars[key] flags = re.sub('-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _config_vars[key] = flags # If we're on OSX 10.5 or later and the user tries to # compiles an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. # # The major usecase for this is users using a Python.org # binary installer on OSX 10.6: that installer uses # the 10.4u SDK, but that SDK is not installed by default # when you install Xcode. # m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS']) if m is not None: sdk = m.group(1) if not os.path.exists(sdk): for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _config_vars[key] flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags) _config_vars[key] = flags if args: vals = [] for name in args: vals.append(_config_vars.get(name)) return vals else: return _config_vars def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name)
gpl-2.0
richardcs/ansible
lib/ansible/modules/windows/win_find.py
52
9498
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_find version_added: "2.3" short_description: Return a list of files based on specific criteria description: - Return a list of files based on specified criteria. - Multiple criteria are AND'd together. - For non-Windows targets, use the M(find) module instead. options: age: description: - Select files or folders whose age is equal to or greater than the specified time. Use a negative age to find files equal to or less than the specified time. You can choose seconds, minutes, hours, days or weeks by specifying the first letter of an of those words (e.g., "2s", "10d", 1w"). age_stamp: description: - Choose the file property against which we compare C(age). The default attribute we compare with is the last modification time. choices: [ atime, ctime, mtime ] default: mtime checksum_algorithm: description: - Algorithm to determine the checksum of a file. Will throw an error if the host is unable to use specified algorithm. choices: [ md5, sha1, sha256, sha384, sha512 ] default: sha1 file_type: description: Type of file to search for. choices: [ directory, file ] default: file follow: description: - Set this to C(yes) to follow symlinks in the path. - This needs to be used in conjunction with C(recurse). type: bool default: 'no' get_checksum: description: - Whether to return a checksum of the file in the return info (default sha1), use C(checksum_algorithm) to change from the default. type: bool default: 'yes' hidden: description: Set this to include hidden files or folders. type: bool default: 'no' paths: description: - List of paths of directories to search for files or folders in. This can be supplied as a single path or a list of paths. required: yes patterns: description: - One or more (powershell or regex) patterns to compare filenames with. The type of pattern matching is controlled by C(use_regex) option. The patterns retrict the list of files or folders to be returned based on the filenames. For a file to be matched it only has to match with one pattern in a list provided. recurse: description: - Will recursively descend into the directory looking for files or folders. type: bool default: 'no' size: description: - Select files or folders whose size is equal to or greater than the specified size. Use a negative value to find files equal to or less than the specified size. You can specify the size with a suffix of the byte type i.e. kilo = k, mega = m... Size is not evaluated for symbolic links. use_regex: description: - Will set patterns to run as a regex check if set to C(yes). type: bool default: 'no' author: - Jordan Borean (@jborean93) ''' EXAMPLES = r''' - name: Find files in path win_find: paths: D:\Temp - name: Find hidden files in path win_find: paths: D:\Temp hidden: yes - name: Find files in multiple paths win_find: paths: - C:\Temp - D:\Temp - name: Find files in directory while searching recursively win_find: paths: D:\Temp recurse: yes - name: Find files in directory while following symlinks win_find: paths: D:\Temp recurse: yes follow: yes - name: Find files with .log and .out extension using powershell wildcards win_find: paths: D:\Temp patterns: [ '*.log', '*.out' ] - name: Find files in path based on regex pattern win_find: paths: D:\Temp patterns: out_\d{8}-\d{6}.log - name: Find files older than 1 day win_find: paths: D:\Temp age: 86400 - name: Find files older than 1 day based on create time win_find: paths: D:\Temp age: 86400 age_stamp: ctime - name: Find files older than 1 day with unit syntax win_find: paths: D:\Temp age: 1d - name: Find files newer than 1 hour win_find: paths: D:\Temp age: -3600 - name: Find files newer than 1 hour with unit syntax win_find: paths: D:\Temp age: -1h - name: Find files larger than 1MB win_find: paths: D:\Temp size: 1048576 - name: Find files larger than 1GB with unit syntax win_find: paths: D:\Temp size: 1g - name: Find files smaller than 1MB win_find: paths: D:\Temp size: -1048576 - name: Find files smaller than 1GB with unit syntax win_find: paths: D:\Temp size: -1g - name: Find folders/symlinks in multiple paths win_find: paths: - C:\Temp - D:\Temp file_type: directory - name: Find files and return SHA256 checksum of files found win_find: paths: C:\Temp get_checksum: yes checksum_algorithm: sha256 - name: Find files and do not return the checksum win_find: paths: C:\Temp get_checksum: no ''' RETURN = r''' examined: description: The number of files/folders that was checked returned: always type: int sample: 10 matched: description: The number of files/folders that match the criteria returned: always type: int sample: 2 files: description: Information on the files/folders that match the criteria returned as a list of dictionary elements for each file matched returned: success type: complex contains: attributes: description: attributes of the file at path in raw form returned: success, path exists type: string sample: "Archive, Hidden" checksum: description: The checksum of a file based on checksum_algorithm specified returned: success, path exists, path is a file, get_checksum == True type: string sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 creationtime: description: the create time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 extension: description: the extension of the file at path returned: success, path exists, path is a file type: string sample: ".ps1" isarchive: description: if the path is ready for archiving or not returned: success, path exists type: boolean sample: True isdir: description: if the path is a directory or not returned: success, path exists type: boolean sample: True ishidden: description: if the path is hidden or not returned: success, path exists type: boolean sample: True islnk: description: if the path is a symbolic link or junction or not returned: success, path exists type: boolean sample: True isreadonly: description: if the path is read only or not returned: success, path exists type: boolean sample: True isshared: description: if the path is shared or not returned: success, path exists type: boolean sample: True lastaccesstime: description: the last access time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lastwritetime: description: the last modification time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lnk_source: description: the target of the symbolic link, will return null if not a link or the link is broken return: success, path exists, path is a symbolic link type: string sample: C:\temp owner: description: the owner of the file returned: success, path exists type: string sample: BUILTIN\Administrators path: description: the full absolute path to the file returned: success, path exists type: string sample: BUILTIN\Administrators sharename: description: the name of share if folder is shared returned: success, path exists, path is a directory and isshared == True type: string sample: file-share size: description: the size in bytes of a file or folder returned: success, path exists, path is not a link type: int sample: 1024 '''
gpl-3.0
Vimos/scikit-learn
examples/svm/plot_separating_hyperplane_unbalanced.py
25
1866
""" ================================================= SVM: Separating hyperplane for unbalanced classes ================================================= Find the optimal separating hyperplane using an SVC for classes that are unbalanced. We first find the separating plane with a plain SVC and then plot (dashed) the separating hyperplane with automatically correction for unbalanced classes. .. currentmodule:: sklearn.linear_model .. note:: This example will also work by replacing ``SVC(kernel="linear")`` with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour such as that of a SVC with a linear kernel. For example try instead of the ``SVC``:: clf = SGDClassifier(n_iter=100, alpha=0.01) """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm #from sklearn.linear_model import SGDClassifier # we create 40 separable points rng = np.random.RandomState(0) n_samples_1 = 1000 n_samples_2 = 100 X = np.r_[1.5 * rng.randn(n_samples_1, 2), 0.5 * rng.randn(n_samples_2, 2) + [2, 2]] y = [0] * (n_samples_1) + [1] * (n_samples_2) # fit the model and get the separating hyperplane clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, y) w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - clf.intercept_[0] / w[1] # get the separating hyperplane using weighted classes wclf = svm.SVC(kernel='linear', class_weight={1: 10}) wclf.fit(X, y) ww = wclf.coef_[0] wa = -ww[0] / ww[1] wyy = wa * xx - wclf.intercept_[0] / ww[1] # plot separating hyperplanes and samples h0 = plt.plot(xx, yy, 'k-', label='no weights') h1 = plt.plot(xx, wyy, 'k--', label='with weights') plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k') plt.legend() plt.axis('tight') plt.show()
bsd-3-clause
ssomenzi/silence
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py
2354
10366
# Unmodified from http://code.activestate.com/recipes/576693/ # other than to add MIT license header (as specified on page, but not in code). # Linked from Python documentation here: # http://docs.python.org/2/library/collections.html#collections.OrderedDict # # This should be deleted once Py2.7 is available on all bots, see # http://crbug.com/241769. # # Copyright (c) 2009 Raymond Hettinger. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) # Suppress 'OrderedDict.update: Method has no argument': # pylint: disable=E0211 def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
agpl-3.0
snakeleon/YouCompleteMe-x86
python/ycm/vimsupport.py
1
38641
# Copyright (C) 2011-2012 Google Inc. # 2016 YouCompleteMe contributors # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * # noqa from future.utils import iterkeys import vim import os import json import re from collections import defaultdict from ycmd.utils import ( GetCurrentDirectory, JoinLinesAsUnicode, ToBytes, ToUnicode ) from ycmd import user_options_store BUFFER_COMMAND_MAP = { 'same-buffer' : 'edit', 'horizontal-split' : 'split', 'vertical-split' : 'vsplit', 'new-tab' : 'tabedit' } FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT = ( 'The requested operation will apply changes to {0} files which are not ' 'currently open. This will therefore open {0} new files in the hidden ' 'buffers. The quickfix list can then be used to review the changes. No ' 'files will be written to disk. Do you wish to continue?' ) NO_SELECTION_MADE_MSG = "No valid selection was made; aborting." def CurrentLineAndColumn(): """Returns the 0-based current line and 0-based current column.""" # See the comment in CurrentColumn about the calculation for the line and # column number line, column = vim.current.window.cursor line -= 1 return line, column def CurrentColumn(): """Returns the 0-based current column. Do NOT access the CurrentColumn in vim.current.line. It doesn't exist yet when the cursor is at the end of the line. Only the chars before the current column exist in vim.current.line.""" # vim's columns are 1-based while vim.current.line columns are 0-based # ... but vim.current.window.cursor (which returns a (line, column) tuple) # columns are 0-based, while the line from that same tuple is 1-based. # vim.buffers buffer objects OTOH have 0-based lines and columns. # Pigs have wings and I'm a loopy purple duck. Everything makes sense now. return vim.current.window.cursor[ 1 ] def CurrentLineContents(): return ToUnicode( vim.current.line ) def TextAfterCursor(): """Returns the text after CurrentColumn.""" return ToUnicode( vim.current.line[ CurrentColumn(): ] ) def TextBeforeCursor(): """Returns the text before CurrentColumn.""" return ToUnicode( vim.current.line[ :CurrentColumn() ] ) # Expects version_string in 'MAJOR.MINOR.PATCH' format, e.g. '7.4.301' def VimVersionAtLeast( version_string ): major, minor, patch = [ int( x ) for x in version_string.split( '.' ) ] # For Vim 7.4.301, v:version is '704' actual_major_and_minor = GetIntValue( 'v:version' ) matching_major_and_minor = major * 100 + minor if actual_major_and_minor != matching_major_and_minor: return actual_major_and_minor > matching_major_and_minor return GetBoolValue( 'has("patch{0}")'.format( patch ) ) # Note the difference between buffer OPTIONS and VARIABLES; the two are not # the same. def GetBufferOption( buffer_object, option ): # NOTE: We used to check for the 'options' property on the buffer_object which # is available in recent versions of Vim and would then use: # # buffer_object.options[ option ] # # to read the value, BUT this caused annoying flickering when the # buffer_object was a hidden buffer (with option = 'ft'). This was all due to # a Vim bug. Until this is fixed, we won't use it. to_eval = 'getbufvar({0}, "&{1}")'.format( buffer_object.number, option ) return GetVariableValue( to_eval ) def BufferModified( buffer_object ): return bool( int( GetBufferOption( buffer_object, 'mod' ) ) ) def GetUnsavedAndSpecifiedBufferData( including_filepath ): """Build part of the request containing the contents and filetypes of all dirty buffers as well as the buffer with filepath |including_filepath|.""" buffers_data = {} for buffer_object in vim.buffers: buffer_filepath = GetBufferFilepath( buffer_object ) if not ( BufferModified( buffer_object ) or buffer_filepath == including_filepath ): continue buffers_data[ buffer_filepath ] = { # Add a newline to match what gets saved to disk. See #1455 for details. 'contents': JoinLinesAsUnicode( buffer_object ) + '\n', 'filetypes': FiletypesForBuffer( buffer_object ) } return buffers_data def GetBufferNumberForFilename( filename, open_file_if_needed = True ): return GetIntValue( u"bufnr('{0}', {1})".format( EscapeForVim( os.path.realpath( filename ) ), int( open_file_if_needed ) ) ) def GetCurrentBufferFilepath(): return GetBufferFilepath( vim.current.buffer ) def BufferIsVisible( buffer_number ): if buffer_number < 0: return False window_number = GetIntValue( "bufwinnr({0})".format( buffer_number ) ) return window_number != -1 def GetBufferFilepath( buffer_object ): if buffer_object.name: return buffer_object.name # Buffers that have just been created by a command like :enew don't have any # buffer name so we use the buffer number for that. return os.path.join( GetCurrentDirectory(), str( buffer_object.number ) ) def UnplaceSignInBuffer( buffer_number, sign_id ): if buffer_number < 0: return vim.command( 'try | exec "sign unplace {0} buffer={1}" | catch /E158/ | endtry'.format( sign_id, buffer_number ) ) def PlaceSign( sign_id, line_num, buffer_num, is_error = True ): # libclang can give us diagnostics that point "outside" the file; Vim borks # on these. if line_num < 1: line_num = 1 sign_name = 'YcmError' if is_error else 'YcmWarning' vim.command( 'sign place {0} line={1} name={2} buffer={3}'.format( sign_id, line_num, sign_name, buffer_num ) ) def PlaceDummySign( sign_id, buffer_num, line_num ): if buffer_num < 0 or line_num < 0: return vim.command( 'sign define ycm_dummy_sign' ) vim.command( 'sign place {0} name=ycm_dummy_sign line={1} buffer={2}'.format( sign_id, line_num, buffer_num, ) ) def UnPlaceDummySign( sign_id, buffer_num ): if buffer_num < 0: return vim.command( 'sign undefine ycm_dummy_sign' ) vim.command( 'sign unplace {0} buffer={1}'.format( sign_id, buffer_num ) ) def ClearYcmSyntaxMatches(): matches = VimExpressionToPythonType( 'getmatches()' ) for match in matches: if match[ 'group' ].startswith( 'Ycm' ): vim.eval( 'matchdelete({0})'.format( match[ 'id' ] ) ) def AddDiagnosticSyntaxMatch( line_num, column_num, line_end_num = None, column_end_num = None, is_error = True ): """Highlight a range in the current window starting from (|line_num|, |column_num|) included to (|line_end_num|, |column_end_num|) excluded. If |line_end_num| or |column_end_num| are not given, highlight the character at (|line_num|, |column_num|). Both line and column numbers are 1-based. Return the ID of the newly added match.""" group = 'YcmErrorSection' if is_error else 'YcmWarningSection' line_num, column_num = LineAndColumnNumbersClamped( line_num, column_num ) if not line_end_num or not column_end_num: return GetIntValue( "matchadd('{0}', '\%{1}l\%{2}c')".format( group, line_num, column_num ) ) # -1 and then +1 to account for column end not included in the range. line_end_num, column_end_num = LineAndColumnNumbersClamped( line_end_num, column_end_num - 1 ) column_end_num += 1 return GetIntValue( "matchadd('{0}', '\%{1}l\%{2}c\_.\\{{-}}\%{3}l\%{4}c')".format( group, line_num, column_num, line_end_num, column_end_num ) ) # Clamps the line and column numbers so that they are not past the contents of # the buffer. Numbers are 1-based byte offsets. def LineAndColumnNumbersClamped( line_num, column_num ): new_line_num = line_num new_column_num = column_num max_line = len( vim.current.buffer ) if line_num and line_num > max_line: new_line_num = max_line max_column = len( vim.current.buffer[ new_line_num - 1 ] ) if column_num and column_num > max_column: new_column_num = max_column return new_line_num, new_column_num def SetLocationList( diagnostics ): """Diagnostics should be in qflist format; see ":h setqflist" for details.""" vim.eval( 'setloclist( 0, {0} )'.format( json.dumps( diagnostics ) ) ) def SetQuickFixList( quickfix_list, focus = False, autoclose = False ): """Populate the quickfix list and open it. List should be in qflist format: see ":h setqflist" for details. When focus is set to True, the quickfix window becomes the active window. When autoclose is set to True, the quickfix window is automatically closed after an entry is selected.""" vim.eval( 'setqflist( {0} )'.format( json.dumps( quickfix_list ) ) ) OpenQuickFixList( focus, autoclose ) def OpenQuickFixList( focus = False, autoclose = False ): """Open the quickfix list to full width at the bottom of the screen with its height automatically set to fit all entries. This behavior can be overridden by using the YcmQuickFixOpened autocommand. See the SetQuickFixList function for the focus and autoclose options.""" vim.command( 'botright copen' ) SetFittingHeightForCurrentWindow() if autoclose: # This autocommand is automatically removed when the quickfix window is # closed. vim.command( 'au WinLeave <buffer> q' ) if VariableExists( '#User#YcmQuickFixOpened' ): vim.command( 'doautocmd User YcmQuickFixOpened' ) if not focus: JumpToPreviousWindow() def SetFittingHeightForCurrentWindow(): window_width = GetIntValue( 'winwidth( 0 )' ) fitting_height = 0 for line in vim.current.buffer: fitting_height += len( line ) // window_width + 1 vim.command( '{0}wincmd _'.format( fitting_height ) ) def ConvertDiagnosticsToQfList( diagnostics ): def ConvertDiagnosticToQfFormat( diagnostic ): # See :h getqflist for a description of the dictionary fields. # Note that, as usual, Vim is completely inconsistent about whether # line/column numbers are 1 or 0 based in its various APIs. Here, it wants # them to be 1-based. The documentation states quite clearly that it # expects a byte offset, by which it means "1-based column number" as # described in :h getqflist ("the first column is 1"). location = diagnostic[ 'location' ] line_num = location[ 'line_num' ] # libclang can give us diagnostics that point "outside" the file; Vim borks # on these. if line_num < 1: line_num = 1 text = diagnostic[ 'text' ] if diagnostic.get( 'fixit_available', False ): text += ' (FixIt available)' return { 'bufnr' : GetBufferNumberForFilename( location[ 'filepath' ] ), 'lnum' : line_num, 'col' : location[ 'column_num' ], 'text' : text, 'type' : diagnostic[ 'kind' ][ 0 ], 'valid' : 1 } return [ ConvertDiagnosticToQfFormat( x ) for x in diagnostics ] def GetVimGlobalsKeys(): return vim.eval( 'keys( g: )' ) def VimExpressionToPythonType( vim_expression ): """Returns a Python type from the return value of the supplied Vim expression. If the expression returns a list, dict or other non-string type, then it is returned unmodified. If the string return can be converted to an integer, returns an integer, otherwise returns the result converted to a Unicode string.""" result = vim.eval( vim_expression ) if not ( isinstance( result, str ) or isinstance( result, bytes ) ): return result try: return int( result ) except ValueError: return ToUnicode( result ) def HiddenEnabled( buffer_object ): return bool( int( GetBufferOption( buffer_object, 'hid' ) ) ) def BufferIsUsable( buffer_object ): return not BufferModified( buffer_object ) or HiddenEnabled( buffer_object ) def EscapedFilepath( filepath ): return filepath.replace( ' ' , r'\ ' ) # Both |line| and |column| need to be 1-based def TryJumpLocationInOpenedTab( filename, line, column ): filepath = os.path.realpath( filename ) for tab in vim.tabpages: for win in tab.windows: if win.buffer.name == filepath: vim.current.tabpage = tab vim.current.window = win vim.current.window.cursor = ( line, column - 1 ) # Center the screen on the jumped-to location vim.command( 'normal! zz' ) return True # 'filename' is not opened in any tab pages return False # Maps User command to vim command def GetVimCommand( user_command, default = 'edit' ): vim_command = BUFFER_COMMAND_MAP.get( user_command, default ) if vim_command == 'edit' and not BufferIsUsable( vim.current.buffer ): vim_command = 'split' return vim_command # Both |line| and |column| need to be 1-based def JumpToLocation( filename, line, column ): # Add an entry to the jumplist vim.command( "normal! m'" ) if filename != GetCurrentBufferFilepath(): # We prefix the command with 'keepjumps' so that opening the file is not # recorded in the jumplist. So when we open the file and move the cursor to # a location in it, the user can use CTRL-O to jump back to the original # location, not to the start of the newly opened file. # Sadly this fails on random occasions and the undesired jump remains in the # jumplist. user_command = user_options_store.Value( 'goto_buffer_command' ) if user_command == 'new-or-existing-tab': if TryJumpLocationInOpenedTab( filename, line, column ): return user_command = 'new-tab' vim_command = GetVimCommand( user_command ) try: vim.command( 'keepjumps {0} {1}'.format( vim_command, EscapedFilepath( filename ) ) ) # When the file we are trying to jump to has a swap file # Vim opens swap-exists-choices dialog and throws vim.error with E325 error, # or KeyboardInterrupt after user selects one of the options. except vim.error as e: if 'E325' not in str( e ): raise # Do nothing if the target file is still not opened (user chose (Q)uit) if filename != GetCurrentBufferFilepath(): return # Thrown when user chooses (A)bort in .swp message box except KeyboardInterrupt: return vim.current.window.cursor = ( line, column - 1 ) # Center the screen on the jumped-to location vim.command( 'normal! zz' ) def NumLinesInBuffer( buffer_object ): # This is actually less than obvious, that's why it's wrapped in a function return len( buffer_object ) # Calling this function from the non-GUI thread will sometimes crash Vim. At # the time of writing, YCM only uses the GUI thread inside Vim (this used to # not be the case). def PostVimMessage( message, warning = True, truncate = False ): """Display a message on the Vim status line. By default, the message is highlighted and logged to Vim command-line history (see :h history). Unset the |warning| parameter to disable this behavior. Set the |truncate| parameter to avoid hit-enter prompts (see :h hit-enter) when the message is longer than the window width.""" echo_command = 'echom' if warning else 'echo' # Displaying a new message while previous ones are still on the status line # might lead to a hit-enter prompt or the message appearing without a # newline so we do a redraw first. vim.command( 'redraw' ) if warning: vim.command( 'echohl WarningMsg' ) message = ToUnicode( message ) if truncate: vim_width = GetIntValue( '&columns' ) message = message.replace( '\n', ' ' ) if len( message ) > vim_width: message = message[ : vim_width - 4 ] + '...' old_ruler = GetIntValue( '&ruler' ) old_showcmd = GetIntValue( '&showcmd' ) vim.command( 'set noruler noshowcmd' ) vim.command( "{0} '{1}'".format( echo_command, EscapeForVim( message ) ) ) SetVariableValue( '&ruler', old_ruler ) SetVariableValue( '&showcmd', old_showcmd ) else: for line in message.split( '\n' ): vim.command( "{0} '{1}'".format( echo_command, EscapeForVim( line ) ) ) if warning: vim.command( 'echohl None' ) def PresentDialog( message, choices, default_choice_index = 0 ): """Presents the user with a dialog where a choice can be made. This will be a dialog for gvim users or a question in the message buffer for vim users or if `set guioptions+=c` was used. choices is list of alternatives. default_choice_index is the 0-based index of the default element that will get choosen if the user hits <CR>. Use -1 for no default. PresentDialog will return a 0-based index into the list or -1 if the dialog was dismissed by using <Esc>, Ctrl-C, etc. If you are presenting a list of options for the user to choose from, such as a list of imports, or lines to insert (etc.), SelectFromList is a better option. See also: :help confirm() in vim (Note that vim uses 1-based indexes) Example call: PresentDialog("Is this a nice example?", ["Yes", "No", "May&be"]) Is this a nice example? [Y]es, (N)o, May(b)e:""" to_eval = "confirm('{0}', '{1}', {2})".format( EscapeForVim( ToUnicode( message ) ), EscapeForVim( ToUnicode( "\n" .join( choices ) ) ), default_choice_index + 1 ) try: return GetIntValue( to_eval ) - 1 except KeyboardInterrupt: return -1 def Confirm( message ): """Display |message| with Ok/Cancel operations. Returns True if the user selects Ok""" return bool( PresentDialog( message, [ "Ok", "Cancel" ] ) == 0 ) def SelectFromList( prompt, items ): """Ask the user to select an item from the list |items|. Presents the user with |prompt| followed by a numbered list of |items|, from which they select one. The user is asked to enter the number of an item or click it. |items| should not contain leading ordinals: they are added automatically. Returns the 0-based index in the list |items| that the user selected, or a negative number if no valid item was selected. See also :help inputlist().""" vim_items = [ prompt ] vim_items.extend( [ "{0}: {1}".format( i + 1, item ) for i, item in enumerate( items ) ] ) # The vim documentation warns not to present lists larger than the number of # lines of display. This is sound advice, but there really isn't any sensible # thing we can do in that scenario. Testing shows that Vim just pages the # message; that behaviour is as good as any, so we don't manipulate the list, # or attempt to page it. # For an explanation of the purpose of inputsave() / inputrestore(), # see :help input(). Briefly, it makes inputlist() work as part of a mapping. vim.eval( 'inputsave()' ) try: # Vim returns the number the user entered, or the line number the user # clicked. This may be wildly out of range for our list. It might even be # negative. # # The first item is index 0, and this maps to our "prompt", so we subtract 1 # from the result and return that, assuming it is within the range of the # supplied list. If not, we return negative. # # See :help input() for explanation of the use of inputsave() and inpput # restore(). It is done in try/finally in case vim.eval ever throws an # exception (such as KeyboardInterrupt) selected = GetIntValue( "inputlist( " + json.dumps( vim_items ) + " )" ) - 1 except KeyboardInterrupt: selected = -1 finally: vim.eval( 'inputrestore()' ) if selected < 0 or selected >= len( items ): # User selected something outside of the range raise RuntimeError( NO_SELECTION_MADE_MSG ) return selected def EscapeForVim( text ): return ToUnicode( text.replace( "'", "''" ) ) def CurrentFiletypes(): return VimExpressionToPythonType( "&filetype" ).split( '.' ) def FiletypesForBuffer( buffer_object ): # NOTE: Getting &ft for other buffers only works when the buffer has been # visited by the user at least once, which is true for modified buffers return GetBufferOption( buffer_object, 'ft' ).split( '.' ) def VariableExists( variable ): return GetBoolValue( "exists( '{0}' )".format( EscapeForVim( variable ) ) ) def SetVariableValue( variable, value ): vim.command( "let {0} = {1}".format( variable, json.dumps( value ) ) ) def GetVariableValue( variable ): return vim.eval( variable ) def GetBoolValue( variable ): return bool( int( vim.eval( variable ) ) ) def GetIntValue( variable ): return int( vim.eval( variable ) ) def _SortChunksByFile( chunks ): """Sort the members of the list |chunks| (which must be a list of dictionaries conforming to ycmd.responses.FixItChunk) by their filepath. Returns a new list in arbitrary order.""" chunks_by_file = defaultdict( list ) for chunk in chunks: filepath = chunk[ 'range' ][ 'start' ][ 'filepath' ] chunks_by_file[ filepath ].append( chunk ) return chunks_by_file def _GetNumNonVisibleFiles( file_list ): """Returns the number of file in the iterable list of files |file_list| which are not curerntly open in visible windows""" return len( [ f for f in file_list if not BufferIsVisible( GetBufferNumberForFilename( f, False ) ) ] ) def _OpenFileInSplitIfNeeded( filepath ): """Ensure that the supplied filepath is open in a visible window, opening a new split if required. Returns the buffer number of the file and an indication of whether or not a new split was opened. If the supplied filename is already open in a visible window, return just return its buffer number. If the supplied file is not visible in a window in the current tab, opens it in a new vertical split. Returns a tuple of ( buffer_num, split_was_opened ) indicating the buffer number and whether or not this method created a new split. If the user opts not to open a file, or if opening fails, this method raises RuntimeError, otherwise, guarantees to return a visible buffer number in buffer_num.""" buffer_num = GetBufferNumberForFilename( filepath, False ) # We only apply changes in the current tab page (i.e. "visible" windows). # Applying changes in tabs does not lead to a better user experience, as the # quickfix list no longer works as you might expect (doesn't jump into other # tabs), and the complexity of choosing where to apply edits is significant. if BufferIsVisible( buffer_num ): # file is already open and visible, just return that buffer number (and an # idicator that we *didn't* open a split) return ( buffer_num, False ) # The file is not open in a visible window, so we open it in a split. # We open the file with a small, fixed height. This means that we don't # make the current buffer the smallest after a series of splits. OpenFilename( filepath, { 'focus': True, 'fix': True, 'size': GetIntValue( '&previewheight' ), } ) # OpenFilename returns us to the original cursor location. This is what we # want, because we don't want to disorientate the user, but we do need to # know the (now open) buffer number for the filename buffer_num = GetBufferNumberForFilename( filepath, False ) if not BufferIsVisible( buffer_num ): # This happens, for example, if there is a swap file and the user # selects the "Quit" or "Abort" options. We just raise an exception to # make it clear to the user that the abort has left potentially # partially-applied changes. raise RuntimeError( 'Unable to open file: {0}\nFixIt/Refactor operation ' 'aborted prior to completion. Your files have not been ' 'fully updated. Please use undo commands to revert the ' 'applied changes.'.format( filepath ) ) # We opened this file in a split return ( buffer_num, True ) def ReplaceChunks( chunks ): """Apply the source file deltas supplied in |chunks| to arbitrary files. |chunks| is a list of changes defined by ycmd.responses.FixItChunk, which may apply arbitrary modifications to arbitrary files. If a file specified in a particular chunk is not currently open in a visible buffer (i.e., one in a window visible in the current tab), we: - issue a warning to the user that we're going to open new files (and offer her the option to abort cleanly) - open the file in a new split, make the changes, then hide the buffer. If for some reason a file could not be opened or changed, raises RuntimeError. Otherwise, returns no meaningful value.""" # We apply the edits file-wise for efficiency, and because we must track the # file-wise offset deltas (caused by the modifications to the text). chunks_by_file = _SortChunksByFile( chunks ) # We sort the file list simply to enable repeatable testing sorted_file_list = sorted( iterkeys( chunks_by_file ) ) # Make sure the user is prepared to have her screen mutilated by the new # buffers num_files_to_open = _GetNumNonVisibleFiles( sorted_file_list ) if num_files_to_open > 0: if not Confirm( FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT.format( num_files_to_open ) ): return # Store the list of locations where we applied changes. We use this to display # the quickfix window showing the user where we applied changes. locations = [] for filepath in sorted_file_list: ( buffer_num, close_window ) = _OpenFileInSplitIfNeeded( filepath ) ReplaceChunksInBuffer( chunks_by_file[ filepath ], vim.buffers[ buffer_num ], locations ) # When opening tons of files, we don't want to have a split for each new # file, as this simply does not scale, so we open the window, make the # edits, then hide the window. if close_window: # Some plugins (I'm looking at you, syntastic) might open a location list # for the window we just opened. We don't want that location list hanging # around, so we close it. lclose is a no-op if there is no location list. vim.command( 'lclose' ) # Note that this doesn't lose our changes. It simply "hides" the buffer, # which can later be re-accessed via the quickfix list or `:ls` vim.command( 'hide' ) # Open the quickfix list, populated with entries for each location we changed. if locations: SetQuickFixList( locations ) PostVimMessage( 'Applied {0} changes'.format( len( chunks ) ), warning = False ) def ReplaceChunksInBuffer( chunks, vim_buffer, locations ): """Apply changes in |chunks| to the buffer-like object |buffer|. Append each chunk's start to the list |locations|""" # We need to track the difference in length, but ensuring we apply fixes # in ascending order of insertion point. chunks.sort( key = lambda chunk: ( chunk[ 'range' ][ 'start' ][ 'line_num' ], chunk[ 'range' ][ 'start' ][ 'column_num' ] ) ) # Remember the line number we're processing. Negative line number means we # haven't processed any lines yet (by nature of being not equal to any # real line number). last_line = -1 line_delta = 0 for chunk in chunks: if chunk[ 'range' ][ 'start' ][ 'line_num' ] != last_line: # If this chunk is on a different line than the previous chunk, # then ignore previous deltas (as offsets won't have changed). last_line = chunk[ 'range' ][ 'end' ][ 'line_num' ] char_delta = 0 ( new_line_delta, new_char_delta ) = ReplaceChunk( chunk[ 'range' ][ 'start' ], chunk[ 'range' ][ 'end' ], chunk[ 'replacement_text' ], line_delta, char_delta, vim_buffer, locations ) line_delta += new_line_delta char_delta += new_char_delta # Replace the chunk of text specified by a contiguous range with the supplied # text. # * start and end are objects with line_num and column_num properties # * the range is inclusive # * indices are all 1-based # * the returned character delta is the delta for the last line # # returns the delta (in lines and characters) that any position after the end # needs to be adjusted by. # # NOTE: Works exclusively with bytes() instances and byte offsets as returned # by ycmd and used within the Vim buffers def ReplaceChunk( start, end, replacement_text, line_delta, char_delta, vim_buffer, locations = None ): # ycmd's results are all 1-based, but vim's/python's are all 0-based # (so we do -1 on all of the values) start_line = start[ 'line_num' ] - 1 + line_delta end_line = end[ 'line_num' ] - 1 + line_delta source_lines_count = end_line - start_line + 1 start_column = start[ 'column_num' ] - 1 + char_delta end_column = end[ 'column_num' ] - 1 if source_lines_count == 1: end_column += char_delta # NOTE: replacement_text is unicode, but all our offsets are byte offsets, # so we convert to bytes replacement_lines = ToBytes( replacement_text ).splitlines( False ) if not replacement_lines: replacement_lines = [ bytes( b'' ) ] replacement_lines_count = len( replacement_lines ) # NOTE: Vim buffers are a list of byte objects on Python 2 but unicode # objects on Python 3. end_existing_text = ToBytes( vim_buffer[ end_line ] )[ end_column : ] start_existing_text = ToBytes( vim_buffer[ start_line ] )[ : start_column ] new_char_delta = ( len( replacement_lines[ -1 ] ) - ( end_column - start_column ) ) if replacement_lines_count > 1: new_char_delta -= start_column replacement_lines[ 0 ] = start_existing_text + replacement_lines[ 0 ] replacement_lines[ -1 ] = replacement_lines[ -1 ] + end_existing_text vim_buffer[ start_line : end_line + 1 ] = replacement_lines[:] if locations is not None: locations.append( { 'bufnr': vim_buffer.number, 'filename': vim_buffer.name, # line and column numbers are 1-based in qflist 'lnum': start_line + 1, 'col': start_column + 1, 'text': replacement_text, 'type': 'F', } ) new_line_delta = replacement_lines_count - source_lines_count return ( new_line_delta, new_char_delta ) def InsertNamespace( namespace ): if VariableExists( 'g:ycm_csharp_insert_namespace_expr' ): expr = GetVariableValue( 'g:ycm_csharp_insert_namespace_expr' ) if expr: SetVariableValue( "g:ycm_namespace_to_insert", namespace ) vim.eval( expr ) return pattern = '^\s*using\(\s\+[a-zA-Z0-9]\+\s\+=\)\?\s\+[a-zA-Z0-9.]\+\s*;\s*' existing_indent = '' line = SearchInCurrentBuffer( pattern ) if line: existing_line = LineTextInCurrentBuffer( line ) existing_indent = re.sub( r"\S.*", "", existing_line ) new_line = "{0}using {1};\n\n".format( existing_indent, namespace ) replace_pos = { 'line_num': line + 1, 'column_num': 1 } ReplaceChunk( replace_pos, replace_pos, new_line, 0, 0, vim.current.buffer ) PostVimMessage( 'Add namespace: {0}'.format( namespace ), warning = False ) def SearchInCurrentBuffer( pattern ): """ Returns the 1-indexed line on which the pattern matches (going UP from the current position) or 0 if not found """ return GetIntValue( "search('{0}', 'Wcnb')".format( EscapeForVim( pattern ))) def LineTextInCurrentBuffer( line_number ): """ Returns the text on the 1-indexed line (NOT 0-indexed) """ return vim.current.buffer[ line_number - 1 ] def ClosePreviewWindow(): """ Close the preview window if it is present, otherwise do nothing """ vim.command( 'silent! pclose!' ) def JumpToPreviewWindow(): """ Jump the vim cursor to the preview window, which must be active. Returns boolean indicating if the cursor ended up in the preview window """ vim.command( 'silent! wincmd P' ) return vim.current.window.options[ 'previewwindow' ] def JumpToPreviousWindow(): """ Jump the vim cursor to its previous window position """ vim.command( 'silent! wincmd p' ) def JumpToTab( tab_number ): """Jump to Vim tab with corresponding number """ vim.command( 'silent! tabn {0}'.format( tab_number ) ) def OpenFileInPreviewWindow( filename ): """ Open the supplied filename in the preview window """ vim.command( 'silent! pedit! ' + filename ) def WriteToPreviewWindow( message ): """ Display the supplied message in the preview window """ # This isn't something that comes naturally to Vim. Vim only wants to show # tags and/or actual files in the preview window, so we have to hack it a # little bit. We generate a temporary file name and "open" that, then write # the data to it. We make sure the buffer can't be edited or saved. Other # approaches include simply opening a split, but we want to take advantage of # the existing Vim options for preview window height, etc. ClosePreviewWindow() OpenFileInPreviewWindow( vim.eval( 'tempname()' ) ) if JumpToPreviewWindow(): # We actually got to the preview window. By default the preview window can't # be changed, so we make it writable, write to it, then make it read only # again. vim.current.buffer.options[ 'modifiable' ] = True vim.current.buffer.options[ 'readonly' ] = False vim.current.buffer[:] = message.splitlines() vim.current.buffer.options[ 'buftype' ] = 'nofile' vim.current.buffer.options[ 'bufhidden' ] = 'wipe' vim.current.buffer.options[ 'buflisted' ] = False vim.current.buffer.options[ 'swapfile' ] = False vim.current.buffer.options[ 'modifiable' ] = False vim.current.buffer.options[ 'readonly' ] = True # We need to prevent closing the window causing a warning about unsaved # file, so we pretend to Vim that the buffer has not been changed. vim.current.buffer.options[ 'modified' ] = False JumpToPreviousWindow() else: # We couldn't get to the preview window, but we still want to give the user # the information we have. The only remaining option is to echo to the # status area. PostVimMessage( message, warning = False ) def BufferIsVisibleForFilename( filename ): """Check if a buffer exists for a specific file.""" buffer_number = GetBufferNumberForFilename( filename, False ) return BufferIsVisible( buffer_number ) def CloseBuffersForFilename( filename ): """Close all buffers for a specific file.""" buffer_number = GetBufferNumberForFilename( filename, False ) while buffer_number != -1: vim.command( 'silent! bwipeout! {0}'.format( buffer_number ) ) new_buffer_number = GetBufferNumberForFilename( filename, False ) if buffer_number == new_buffer_number: raise RuntimeError( "Buffer {0} for filename '{1}' should already be " "wiped out.".format( buffer_number, filename ) ) buffer_number = new_buffer_number def OpenFilename( filename, options = {} ): """Open a file in Vim. Following options are available: - command: specify which Vim command is used to open the file. Choices are same-buffer, horizontal-split, vertical-split, and new-tab (default: horizontal-split); - size: set the height of the window for a horizontal split or the width for a vertical one (default: ''); - fix: set the winfixheight option for a horizontal split or winfixwidth for a vertical one (default: False). See :h winfix for details; - focus: focus the opened file (default: False); - watch: automatically watch for changes (default: False). This is useful for logs; - position: set the position where the file is opened (default: start). Choices are start and end.""" # Set the options. command = GetVimCommand( options.get( 'command', 'horizontal-split' ), 'horizontal-split' ) size = ( options.get( 'size', '' ) if command in [ 'split', 'vsplit' ] else '' ) focus = options.get( 'focus', False ) # There is no command in Vim to return to the previous tab so we need to # remember the current tab if needed. if not focus and command == 'tabedit': previous_tab = GetIntValue( 'tabpagenr()' ) else: previous_tab = None # Open the file. try: vim.command( '{0}{1} {2}'.format( size, command, filename ) ) # When the file we are trying to jump to has a swap file, # Vim opens swap-exists-choices dialog and throws vim.error with E325 error, # or KeyboardInterrupt after user selects one of the options which actually # opens the file (Open read-only/Edit anyway). except vim.error as e: if 'E325' not in str( e ): raise # Otherwise, the user might have chosen Quit. This is detectable by the # current file not being the target file if filename != GetCurrentBufferFilepath(): return except KeyboardInterrupt: # Raised when the user selects "Abort" after swap-exists-choices return _SetUpLoadedBuffer( command, filename, options.get( 'fix', False ), options.get( 'position', 'start' ), options.get( 'watch', False ) ) # Vim automatically set the focus to the opened file so we need to get the # focus back (if the focus option is disabled) when opening a new tab or # window. if not focus: if command == 'tabedit': JumpToTab( previous_tab ) if command in [ 'split', 'vsplit' ]: JumpToPreviousWindow() def _SetUpLoadedBuffer( command, filename, fix, position, watch ): """After opening a buffer, configure it according to the supplied options, which are as defined by the OpenFilename method.""" if command == 'split': vim.current.window.options[ 'winfixheight' ] = fix if command == 'vsplit': vim.current.window.options[ 'winfixwidth' ] = fix if watch: vim.current.buffer.options[ 'autoread' ] = True vim.command( "exec 'au BufEnter <buffer> :silent! checktime {0}'" .format( filename ) ) if position == 'end': vim.command( 'silent! normal! Gzz' )
gpl-3.0
bdupharm/sqlalchemy
test/ext/test_mutable.py
4
28657
from sqlalchemy import Integer, ForeignKey, String from sqlalchemy.types import PickleType, TypeDecorator, VARCHAR from sqlalchemy.orm import mapper, Session, composite from sqlalchemy.orm.mapper import Mapper from sqlalchemy.orm.instrumentation import ClassManager from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import eq_, assert_raises_message, assert_raises from sqlalchemy.testing.util import picklers from sqlalchemy.testing import fixtures from sqlalchemy.ext.mutable import MutableComposite from sqlalchemy.ext.mutable import MutableDict, MutableList, MutableSet class Foo(fixtures.BasicEntity): pass class SubFoo(Foo): pass class FooWithEq(object): def __init__(self, **kw): for k in kw: setattr(self, k, kw[k]) def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id class Point(MutableComposite): def __init__(self, x, y): self.x = x self.y = y def __setattr__(self, key, value): object.__setattr__(self, key, value) self.changed() def __composite_values__(self): return self.x, self.y def __getstate__(self): return self.x, self.y def __setstate__(self, state): self.x, self.y = state def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y class MyPoint(Point): @classmethod def coerce(cls, key, value): if isinstance(value, tuple): value = Point(*value) return value class _MutableDictTestFixture(object): @classmethod def _type_fixture(cls): return MutableDict def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_MutableDictTestFixture, self).teardown() class _MutableDictTestBase(_MutableDictTestFixture): run_define_tables = 'each' def setup_mappers(cls): foo = cls.tables.foo mapper(Foo, foo) def test_coerce_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, None) def test_coerce_raise(self): assert_raises_message( ValueError, "Attribute 'data' does not accept objects of type", Foo, data=set([1, 2, 3]) ) def test_in_place_mutation(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data['a'] = 'c' sess.commit() eq_(f1.data, {'a': 'c'}) def test_clear(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data.clear() sess.commit() eq_(f1.data, {}) def test_update(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data.update({'a': 'z'}) sess.commit() eq_(f1.data, {'a': 'z'}) def test_pop(self): sess = Session() f1 = Foo(data={'a': 'b', 'c': 'd'}) sess.add(f1) sess.commit() eq_(f1.data.pop('a'), 'b') sess.commit() assert_raises(KeyError, f1.data.pop, 'g') eq_(f1.data, {'c': 'd'}) def test_pop_default(self): sess = Session() f1 = Foo(data={'a': 'b', 'c': 'd'}) sess.add(f1) sess.commit() eq_(f1.data.pop('a', 'q'), 'b') eq_(f1.data.pop('a', 'q'), 'q') sess.commit() eq_(f1.data, {'c': 'd'}) def test_popitem(self): sess = Session() orig = {'a': 'b', 'c': 'd'} # the orig dict remains unchanged when we assign, # but just making this future-proof data = dict(orig) f1 = Foo(data=data) sess.add(f1) sess.commit() k, v = f1.data.popitem() assert k in ('a', 'c') orig.pop(k) sess.commit() eq_(f1.data, orig) def test_setdefault(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() eq_(f1.data.setdefault('c', 'd'), 'd') sess.commit() eq_(f1.data, {'a': 'b', 'c': 'd'}) eq_(f1.data.setdefault('c', 'q'), 'd') sess.commit() eq_(f1.data, {'a': 'b', 'c': 'd'}) def test_replace(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.flush() f1.data = {'b': 'c'} sess.commit() eq_(f1.data, {'b': 'c'}) def test_replace_itself_still_ok(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.flush() f1.data = f1.data f1.data['b'] = 'c' sess.commit() eq_(f1.data, {'a': 'b', 'b': 'c'}) def test_pickle_parent(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data['a'] = 'c' assert f2 in sess.dirty def test_unrelated_flush(self): sess = Session() f1 = Foo(data={"a": "b"}, unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data["a"] = "c" sess.commit() eq_(f1.data["a"], "c") def _test_non_mutable(self): sess = Session() f1 = Foo(non_mutable_data={'a': 'b'}) sess.add(f1) sess.commit() f1.non_mutable_data['a'] = 'c' sess.commit() eq_(f1.non_mutable_data, {'a': 'b'}) class _MutableListTestFixture(object): @classmethod def _type_fixture(cls): return MutableList def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_MutableListTestFixture, self).teardown() class _MutableListTestBase(_MutableListTestFixture): run_define_tables = 'each' def setup_mappers(cls): foo = cls.tables.foo mapper(Foo, foo) def test_coerce_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, None) def test_coerce_raise(self): assert_raises_message( ValueError, "Attribute 'data' does not accept objects of type", Foo, data=set([1, 2, 3]) ) def test_in_place_mutation(self): sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data[0] = 3 sess.commit() eq_(f1.data, [3, 2]) def test_in_place_slice_mutation(self): sess = Session() f1 = Foo(data=[1, 2, 3, 4]) sess.add(f1) sess.commit() f1.data[1:3] = 5, 6 sess.commit() eq_(f1.data, [1, 5, 6, 4]) def test_del_slice(self): sess = Session() f1 = Foo(data=[1, 2, 3, 4]) sess.add(f1) sess.commit() del f1.data[1:3] sess.commit() eq_(f1.data, [1, 4]) def test_clear(self): if not hasattr(list, 'clear'): # py2 list doesn't have 'clear' return sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data.clear() sess.commit() eq_(f1.data, []) def test_pop(self): sess = Session() f1 = Foo(data=[1, 2, 3]) sess.add(f1) sess.commit() eq_(f1.data.pop(), 3) eq_(f1.data.pop(0), 1) sess.commit() assert_raises(IndexError, f1.data.pop, 5) eq_(f1.data, [2]) def test_append(self): sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data.append(5) sess.commit() eq_(f1.data, [1, 2, 5]) def test_extend(self): sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data.extend([5]) sess.commit() eq_(f1.data, [1, 2, 5]) def test_insert(self): sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data.insert(1, 5) sess.commit() eq_(f1.data, [1, 5, 2]) def test_remove(self): sess = Session() f1 = Foo(data=[1, 2, 3]) sess.add(f1) sess.commit() f1.data.remove(2) sess.commit() eq_(f1.data, [1, 3]) def test_sort(self): sess = Session() f1 = Foo(data=[1, 3, 2]) sess.add(f1) sess.commit() f1.data.sort() sess.commit() eq_(f1.data, [1, 2, 3]) def test_reverse(self): sess = Session() f1 = Foo(data=[1, 3, 2]) sess.add(f1) sess.commit() f1.data.reverse() sess.commit() eq_(f1.data, [2, 3, 1]) def test_pickle_parent(self): sess = Session() f1 = Foo(data=[1, 2]) sess.add(f1) sess.commit() f1.data sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data[0] = 3 assert f2 in sess.dirty def test_unrelated_flush(self): sess = Session() f1 = Foo(data=[1, 2], unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data[0] = 3 sess.commit() eq_(f1.data[0], 3) class _MutableSetTestFixture(object): @classmethod def _type_fixture(cls): return MutableSet def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_MutableSetTestFixture, self).teardown() class _MutableSetTestBase(_MutableSetTestFixture): run_define_tables = 'each' def setup_mappers(cls): foo = cls.tables.foo mapper(Foo, foo) def test_coerce_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, None) def test_coerce_raise(self): assert_raises_message( ValueError, "Attribute 'data' does not accept objects of type", Foo, data=[1, 2, 3] ) def test_clear(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.clear() sess.commit() eq_(f1.data, set()) def test_pop(self): sess = Session() f1 = Foo(data=set([1])) sess.add(f1) sess.commit() eq_(f1.data.pop(), 1) sess.commit() assert_raises(KeyError, f1.data.pop) eq_(f1.data, set()) def test_add(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.add(5) sess.commit() eq_(f1.data, set([1, 2, 5])) def test_update(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.update(set([2, 5])) sess.commit() eq_(f1.data, set([1, 2, 5])) def test_intersection_update(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.intersection_update(set([2, 5])) sess.commit() eq_(f1.data, set([2])) def test_difference_update(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.difference_update(set([2, 5])) sess.commit() eq_(f1.data, set([1])) def test_symmetric_difference_update(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data.symmetric_difference_update(set([2, 5])) sess.commit() eq_(f1.data, set([1, 5])) def test_remove(self): sess = Session() f1 = Foo(data=set([1, 2, 3])) sess.add(f1) sess.commit() f1.data.remove(2) sess.commit() eq_(f1.data, set([1, 3])) def test_discard(self): sess = Session() f1 = Foo(data=set([1, 2, 3])) sess.add(f1) sess.commit() f1.data.discard(2) sess.commit() eq_(f1.data, set([1, 3])) f1.data.discard(2) sess.commit() eq_(f1.data, set([1, 3])) def test_pickle_parent(self): sess = Session() f1 = Foo(data=set([1, 2])) sess.add(f1) sess.commit() f1.data sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data.add(3) assert f2 in sess.dirty def test_unrelated_flush(self): sess = Session() f1 = Foo(data=set([1, 2]), unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data.add(3) sess.commit() eq_(f1.data, set([1, 2, 3])) class MutableColumnDefaultTest(_MutableDictTestFixture, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableDict = cls._type_fixture() mutable_pickle = MutableDict.as_mutable(PickleType) Table( 'foo', metadata, Column( 'id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', mutable_pickle, default={}), ) def setup_mappers(cls): foo = cls.tables.foo mapper(Foo, foo) def test_evt_on_flush_refresh(self): # test for #3427 sess = Session() f1 = Foo() sess.add(f1) sess.flush() assert isinstance(f1.data, self._type_fixture()) assert f1 not in sess.dirty f1.data['foo'] = 'bar' assert f1 in sess.dirty class MutableWithScalarPickleTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableDict = cls._type_fixture() mutable_pickle = MutableDict.as_mutable(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', mutable_pickle), Column('data', mutable_pickle), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) def test_non_mutable(self): self._test_non_mutable() class MutableWithScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value MutableDict = cls._type_fixture() Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', MutableDict.as_mutable(JSONEncodedDict)), Column('non_mutable_data', JSONEncodedDict), Column('unrelated_data', String(50)) ) def test_non_mutable(self): self._test_non_mutable() class MutableListWithScalarPickleTest(_MutableListTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableList = cls._type_fixture() mutable_pickle = MutableList.as_mutable(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', mutable_pickle), Column('data', mutable_pickle), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) class MutableSetWithScalarPickleTest(_MutableSetTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableSet = cls._type_fixture() mutable_pickle = MutableSet.as_mutable(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', mutable_pickle), Column('data', mutable_pickle), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) class MutableAssocWithAttrInheritTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', PickleType), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) Table('subfoo', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), ) def setup_mappers(cls): foo = cls.tables.foo subfoo = cls.tables.subfoo mapper(Foo, foo) mapper(SubFoo, subfoo, inherits=Foo) MutableDict.associate_with_attribute(Foo.data) def test_in_place_mutation(self): sess = Session() f1 = SubFoo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data['a'] = 'c' sess.commit() eq_(f1.data, {'a': 'c'}) def test_replace(self): sess = Session() f1 = SubFoo(data={'a': 'b'}) sess.add(f1) sess.flush() f1.data = {'b': 'c'} sess.commit() eq_(f1.data, {'b': 'c'}) class MutableAssociationScalarPickleTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableDict = cls._type_fixture() MutableDict.associate_with(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', PickleType), Column('data', PickleType), Column('unrelated_data', String(50)) ) class MutableAssociationScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value MutableDict = cls._type_fixture() MutableDict.associate_with(JSONEncodedDict) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', JSONEncodedDict), Column('unrelated_data', String(50)) ) class CustomMutableAssociationScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest): CustomMutableDict = None @classmethod def _type_fixture(cls): if not(getattr(cls, 'CustomMutableDict')): MutableDict = super( CustomMutableAssociationScalarJSONTest, cls)._type_fixture() class CustomMutableDict(MutableDict): pass cls.CustomMutableDict = CustomMutableDict return cls.CustomMutableDict @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value CustomMutableDict = cls._type_fixture() CustomMutableDict.associate_with(JSONEncodedDict) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', JSONEncodedDict), Column('unrelated_data', String(50)) ) def test_pickle_parent(self): # Picklers don't know how to pickle CustomMutableDict, # but we aren't testing that here pass def test_coerce(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.flush() eq_(type(f1.data), self._type_fixture()) class _CompositeTestBase(object): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer), Column('y', Integer), Column('unrelated_data', String(50)) ) def setup(self): from sqlalchemy.ext import mutable mutable._setup_composite_listener() super(_CompositeTestBase, self).setup() def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_CompositeTestBase, self).teardown() @classmethod def _type_fixture(cls): return Point class MutableCompositeColumnDefaultTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( 'foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer, default=5), Column('y', Integer, default=9), Column('unrelated_data', String(50)) ) @classmethod def setup_mappers(cls): foo = cls.tables.foo cls.Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(cls.Point, foo.c.x, foo.c.y) }) def test_evt_on_flush_refresh(self): # this still worked prior to #3427 being fixed in any case sess = Session() f1 = Foo(data=self.Point(None, None)) sess.add(f1) sess.flush() eq_(f1.data, self.Point(5, 9)) assert f1 not in sess.dirty f1.data.x = 10 assert f1 in sess.dirty class MutableCompositesUnpickleTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def setup_mappers(cls): foo = cls.tables.foo cls.Point = cls._type_fixture() mapper(FooWithEq, foo, properties={ 'data': composite(cls.Point, foo.c.x, foo.c.y) }) def test_unpickle_modified_eq(self): u1 = FooWithEq(data=self.Point(3, 5)) for loads, dumps in picklers(): loads(dumps(u1)) class MutableCompositesTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def setup_mappers(cls): foo = cls.tables.foo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) def test_in_place_mutation(self): sess = Session() d = Point(3, 4) f1 = Foo(data=d) sess.add(f1) sess.commit() f1.data.y = 5 sess.commit() eq_(f1.data, Point(3, 5)) def test_pickle_of_parent(self): sess = Session() d = Point(3, 4) f1 = Foo(data=d) sess.add(f1) sess.commit() f1.data assert 'data' in f1.__dict__ sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data.y = 12 assert f2 in sess.dirty def test_set_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, Point(None, None)) f1.data.y = 5 sess.commit() eq_(f1.data, Point(None, 5)) def test_set_illegal(self): f1 = Foo() assert_raises_message( ValueError, "Attribute 'data' does not accept objects", setattr, f1, 'data', 'foo' ) def test_unrelated_flush(self): sess = Session() f1 = Foo(data=Point(3, 4), unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data.x = 5 sess.commit() eq_(f1.data.x, 5) class MutableCompositeCallableTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def setup_mappers(cls): foo = cls.tables.foo Point = cls._type_fixture() # in this case, this is not actually a MutableComposite. # so we don't expect it to track changes mapper(Foo, foo, properties={ 'data': composite(lambda x, y: Point(x, y), foo.c.x, foo.c.y) }) def test_basic(self): sess = Session() f1 = Foo(data=Point(3, 4)) sess.add(f1) sess.flush() f1.data.x = 5 sess.commit() # we didn't get the change. eq_(f1.data.x, 3) class MutableCompositeCustomCoerceTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def _type_fixture(cls): return MyPoint @classmethod def setup_mappers(cls): foo = cls.tables.foo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) def test_custom_coerce(self): f = Foo() f.data = (3, 4) eq_(f.data, Point(3, 4)) def test_round_trip_ok(self): sess = Session() f = Foo() f.data = (3, 4) sess.add(f) sess.commit() eq_(f.data, Point(3, 4)) class MutableInheritedCompositesTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer), Column('y', Integer) ) Table('subfoo', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), ) @classmethod def setup_mappers(cls): foo = cls.tables.foo subfoo = cls.tables.subfoo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) mapper(SubFoo, subfoo, inherits=Foo) def test_in_place_mutation_subclass(self): sess = Session() d = Point(3, 4) f1 = SubFoo(data=d) sess.add(f1) sess.commit() f1.data.y = 5 sess.commit() eq_(f1.data, Point(3, 5)) def test_pickle_of_parent_subclass(self): sess = Session() d = Point(3, 4) f1 = SubFoo(data=d) sess.add(f1) sess.commit() f1.data assert 'data' in f1.__dict__ sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data.y = 12 assert f2 in sess.dirty
mit
accomac/namebench
nb_third_party/dns/inet.py
248
3236
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Generic Internet address helper functions.""" import socket import dns.ipv4 import dns.ipv6 # We assume that AF_INET is always defined. AF_INET = socket.AF_INET # AF_INET6 might not be defined in the socket module, but we need it. # We'll try to use the socket module's value, and if it doesn't work, # we'll use our own value. try: AF_INET6 = socket.AF_INET6 except AttributeError: AF_INET6 = 9999 def inet_pton(family, text): """Convert the textual form of a network address into its binary form. @param family: the address family @type family: int @param text: the textual address @type text: string @raises NotImplementedError: the address family specified is not implemented. @rtype: string """ if family == AF_INET: return dns.ipv4.inet_aton(text) elif family == AF_INET6: return dns.ipv6.inet_aton(text) else: raise NotImplementedError def inet_ntop(family, address): """Convert the binary form of a network address into its textual form. @param family: the address family @type family: int @param address: the binary address @type address: string @raises NotImplementedError: the address family specified is not implemented. @rtype: string """ if family == AF_INET: return dns.ipv4.inet_ntoa(address) elif family == AF_INET6: return dns.ipv6.inet_ntoa(address) else: raise NotImplementedError def af_for_address(text): """Determine the address family of a textual-form network address. @param text: the textual address @type text: string @raises ValueError: the address family cannot be determined from the input. @rtype: int """ try: junk = dns.ipv4.inet_aton(text) return AF_INET except: try: junk = dns.ipv6.inet_aton(text) return AF_INET6 except: raise ValueError def is_multicast(text): """Is the textual-form network address a multicast address? @param text: the textual address @raises ValueError: the address family cannot be determined from the input. @rtype: bool """ try: first = ord(dns.ipv4.inet_aton(text)[0]) return (first >= 224 and first <= 239) except: try: first = ord(dns.ipv6.inet_aton(text)[0]) return (first == 255) except: raise ValueError
apache-2.0
szopu/django
django/contrib/gis/gdal/srs.py
15
12001
""" The Spatial Reference class, represents OGR Spatial Reference objects. Example: >>> from django.contrib.gis.gdal import SpatialReference >>> srs = SpatialReference('WGS84') >>> print(srs) GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.01745329251994328, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]] >>> print(srs.proj) +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs >>> print(srs.ellipsoid) (6378137.0, 6356752.3142451793, 298.25722356300003) >>> print(srs.projected, srs.geographic) False True >>> srs.import_epsg(32140) >>> print(srs.name) NAD83 / Texas South Central """ from ctypes import byref, c_char_p, c_int # Getting the error checking routine and exceptions from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import SRSException from django.contrib.gis.gdal.prototypes import srs as capi from django.utils import six from django.utils.encoding import force_bytes #### Spatial Reference class. #### class SpatialReference(GDALBase): """ A wrapper for the OGRSpatialReference object. According to the GDAL Web site, the SpatialReference object "provide[s] services to represent coordinate systems (projections and datums) and to transform between them." """ #### Python 'magic' routines #### def __init__(self, srs_input=''): """ Creates a GDAL OSR Spatial Reference object from the given input. The input may be string of OGC Well Known Text (WKT), an integer EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83'). """ srs_type = 'user' if isinstance(srs_input, six.string_types): # Encoding to ASCII if unicode passed in. if isinstance(srs_input, six.text_type): srs_input = srs_input.encode('ascii') try: # If SRID is a string, e.g., '4326', then make acceptable # as user input. srid = int(srs_input) srs_input = 'EPSG:%d' % srid except ValueError: pass elif isinstance(srs_input, six.integer_types): # EPSG integer code was input. srs_type = 'epsg' elif isinstance(srs_input, self.ptr_type): srs = srs_input srs_type = 'ogr' else: raise TypeError('Invalid SRS type "%s"' % srs_type) if srs_type == 'ogr': # Input is already an SRS pointer. srs = srs_input else: # Creating a new SRS pointer, using the string buffer. buf = c_char_p(b'') srs = capi.new_srs(buf) # If the pointer is NULL, throw an exception. if not srs: raise SRSException('Could not create spatial reference from: %s' % srs_input) else: self.ptr = srs # Importing from either the user input string or an integer SRID. if srs_type == 'user': self.import_user_input(srs_input) elif srs_type == 'epsg': self.import_epsg(srs_input) def __del__(self): "Destroys this spatial reference." if self._ptr and capi: capi.release_srs(self._ptr) def __getitem__(self, target): """ Returns the value of the given string attribute node, None if the node doesn't exist. Can also take a tuple as a parameter, (target, child), where child is the index of the attribute in the WKT. For example: >>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]' >>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326 >>> print(srs['GEOGCS']) WGS 84 >>> print(srs['DATUM']) WGS_1984 >>> print(srs['AUTHORITY']) EPSG >>> print(srs['AUTHORITY', 1]) # The authority value 4326 >>> print(srs['TOWGS84', 4]) # the fourth value in this wkt 0 >>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole. EPSG >>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units 9122 """ if isinstance(target, tuple): return self.attr_value(*target) else: return self.attr_value(target) def __str__(self): "The string representation uses 'pretty' WKT." return self.pretty_wkt #### SpatialReference Methods #### def attr_value(self, target, index=0): """ The attribute value for the given target node (e.g. 'PROJCS'). The index keyword specifies an index of the child node to return. """ if not isinstance(target, six.string_types) or not isinstance(index, int): raise TypeError return capi.get_attr_value(self.ptr, force_bytes(target), index) def auth_name(self, target): "Returns the authority name for the given string target node." return capi.get_auth_name(self.ptr, force_bytes(target)) def auth_code(self, target): "Returns the authority code for the given string target node." return capi.get_auth_code(self.ptr, force_bytes(target)) def clone(self): "Returns a clone of this SpatialReference object." return SpatialReference(capi.clone_srs(self.ptr)) def from_esri(self): "Morphs this SpatialReference from ESRI's format to EPSG." capi.morph_from_esri(self.ptr) def identify_epsg(self): """ This method inspects the WKT of this SpatialReference, and will add EPSG authority nodes where an EPSG identifier is applicable. """ capi.identify_epsg(self.ptr) def to_esri(self): "Morphs this SpatialReference to ESRI's format." capi.morph_to_esri(self.ptr) def validate(self): "Checks to see if the given spatial reference is valid." capi.srs_validate(self.ptr) #### Name & SRID properties #### @property def name(self): "Returns the name of this Spatial Reference." if self.projected: return self.attr_value('PROJCS') elif self.geographic: return self.attr_value('GEOGCS') elif self.local: return self.attr_value('LOCAL_CS') else: return None @property def srid(self): "Returns the SRID of top-level authority, or None if undefined." try: return int(self.attr_value('AUTHORITY', 1)) except (TypeError, ValueError): return None #### Unit Properties #### @property def linear_name(self): "Returns the name of the linear units." units, name = capi.linear_units(self.ptr, byref(c_char_p())) return name @property def linear_units(self): "Returns the value of the linear units." units, name = capi.linear_units(self.ptr, byref(c_char_p())) return units @property def angular_name(self): "Returns the name of the angular units." units, name = capi.angular_units(self.ptr, byref(c_char_p())) return name @property def angular_units(self): "Returns the value of the angular units." units, name = capi.angular_units(self.ptr, byref(c_char_p())) return units @property def units(self): """ Returns a 2-tuple of the units value and the units name, and will automatically determines whether to return the linear or angular units. """ units, name = None, None if self.projected or self.local: units, name = capi.linear_units(self.ptr, byref(c_char_p())) elif self.geographic: units, name = capi.angular_units(self.ptr, byref(c_char_p())) if name is not None: name.decode() return (units, name) #### Spheroid/Ellipsoid Properties #### @property def ellipsoid(self): """ Returns a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening) """ return (self.semi_major, self.semi_minor, self.inverse_flattening) @property def semi_major(self): "Returns the Semi Major Axis for this Spatial Reference." return capi.semi_major(self.ptr, byref(c_int())) @property def semi_minor(self): "Returns the Semi Minor Axis for this Spatial Reference." return capi.semi_minor(self.ptr, byref(c_int())) @property def inverse_flattening(self): "Returns the Inverse Flattening for this Spatial Reference." return capi.invflattening(self.ptr, byref(c_int())) #### Boolean Properties #### @property def geographic(self): """ Returns True if this SpatialReference is geographic (root node is GEOGCS). """ return bool(capi.isgeographic(self.ptr)) @property def local(self): "Returns True if this SpatialReference is local (root node is LOCAL_CS)." return bool(capi.islocal(self.ptr)) @property def projected(self): """ Returns True if this SpatialReference is a projected coordinate system (root node is PROJCS). """ return bool(capi.isprojected(self.ptr)) #### Import Routines ##### def import_epsg(self, epsg): "Imports the Spatial Reference from the EPSG code (an integer)." capi.from_epsg(self.ptr, epsg) def import_proj(self, proj): "Imports the Spatial Reference from a PROJ.4 string." capi.from_proj(self.ptr, proj) def import_user_input(self, user_input): "Imports the Spatial Reference from the given user input string." capi.from_user_input(self.ptr, force_bytes(user_input)) def import_wkt(self, wkt): "Imports the Spatial Reference from OGC WKT (string)" capi.from_wkt(self.ptr, byref(c_char_p(wkt))) def import_xml(self, xml): "Imports the Spatial Reference from an XML string." capi.from_xml(self.ptr, xml) #### Export Properties #### @property def wkt(self): "Returns the WKT representation of this Spatial Reference." return capi.to_wkt(self.ptr, byref(c_char_p())) @property def pretty_wkt(self, simplify=0): "Returns the 'pretty' representation of the WKT." return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify) @property def proj(self): "Returns the PROJ.4 representation for this Spatial Reference." return capi.to_proj(self.ptr, byref(c_char_p())) @property def proj4(self): "Alias for proj()." return self.proj @property def xml(self, dialect=''): "Returns the XML representation of this Spatial Reference." return capi.to_xml(self.ptr, byref(c_char_p()), dialect) class CoordTransform(GDALBase): "The coordinate system transformation object." def __init__(self, source, target): "Initializes on a source and target SpatialReference objects." if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference): raise TypeError('source and target must be of type SpatialReference') self.ptr = capi.new_ct(source._ptr, target._ptr) self._srs1_name = source.name self._srs2_name = target.name def __del__(self): "Deletes this Coordinate Transformation object." if self._ptr and capi: capi.destroy_ct(self._ptr) def __str__(self): return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
bsd-3-clause
Workday/OpenFrame
tools/telemetry/third_party/gsutilz/third_party/boto/boto/sqs/__init__.py
129
1705
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.sqs.regioninfo import SQSRegionInfo from boto.regioninfo import get_regions def regions(): """ Get all available regions for the SQS service. :rtype: list :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` """ from boto.sqs.connection import SQSConnection return get_regions( 'sqs', region_cls=SQSRegionInfo, connection_cls=SQSConnection ) def connect_to_region(region_name, **kw_params): for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
bsd-3-clause
darktears/chromium-crosswalk
tools/perf/page_sets/login_helpers/login_utils.py
25
2047
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import os DEFAULT_CREDENTIAL_PATH = os.path.join( os.path.dirname(__file__), os.path.pardir, 'data', 'credentials.json') def GetAccountNameAndPassword(credential, credentials_path=DEFAULT_CREDENTIAL_PATH): """Returns username and password for |credential| in credentials_path file. Args: credential: The credential to retrieve from the file (type string). credentials_path: The string that specifies the path to credential file. Returns: A tuple (username, password) in which both are username and password strings. """ with open(credentials_path, 'r') as f: credentials = json.load(f) c = credentials.get(credential) return c['username'], c['password'] def InputForm(action_runner, input_text, input_id, form_id=None): """Sets the text value of an input field in a form on the page. Waits until the input element exists on the page. Then executes JS to populate the value property of the element with |input_text|. Args: action_runner: ActionRunner instance to execute JS to populate form fields. input_text: Text string to populate the input field with. input_id: Id of the input field to populate. (type string). form_id: Optional form id string to identify |input_id| in querySelector. Raises: exceptions.TimeoutException: If waiting to find the element times out. exceptions.Error: See ExecuteJavaScript() for a detailed list of possible exceptions. """ if form_id and input_id: element_selector = '#%s #%s' % (form_id, input_id) elif input_id: element_selector = '#%s' % (input_id) else: raise ValueError("Input ID can not be None or empty.") action_runner.WaitForElement(selector=element_selector) action_runner.ExecuteJavaScript( 'document.querySelector("%s").value = "%s";' % (element_selector, input_text))
bsd-3-clause
tgarc/rideagg
requests/packages/urllib3/contrib/ntlmpool.py
714
4741
# urllib3/contrib/ntlmpool.py # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ try: from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection from logging import getLogger from ntlm import ntlm from urllib3 import HTTPSConnectionPool log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = 'https' def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' % (self.num_connections, self.host, self.authurl)) headers = {} headers['Connection'] = 'Keep-Alive' req_header = 'Authorization' resp_header = 'www-authenticate' conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = ( 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % reshdr) log.debug('Response data: %s [...]' % res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(', ') auth_header_value = None for s in auth_header_values: if s[:5] == 'NTLM ': auth_header_value = s[5:] if auth_header_value is None: raise Exception('Unexpected %s response header: %s' % (resp_header, reshdr[resp_header])) # Send authentication message ServerChallenge, NegotiateFlags = \ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags) headers[req_header] = 'NTLM %s' % auth_msg log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % dict(res.getheaders())) log.debug('Response data: %s [...]' % res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception('Server rejected request: wrong ' 'username or password') raise Exception('Wrong server response: %s %s' % (res.status, res.reason)) res.fp = None log.debug('Connection established') return conn def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True): if headers is None: headers = {} headers['Connection'] = 'Keep-Alive' return super(NTLMConnectionPool, self).urlopen(method, url, body, headers, retries, redirect, assert_same_host)
apache-2.0
bkg/django-spillway
spillway/fields.py
1
2068
"""Serializer fields""" from __future__ import absolute_import import collections from django.contrib.gis import geos, forms from django.db.models.query import QuerySet from rest_framework import renderers from rest_framework.fields import Field, FileField from spillway.compat import json from spillway.forms import fields class GeometryField(Field): def bind(self, field_name, parent): try: renderer = parent.context['request'].accepted_renderer except (AttributeError, KeyError): pass else: obj = parent.root.instance try: has_format = renderer.format in obj.query.annotations except AttributeError: if not isinstance(obj, QuerySet): try: obj = obj[0] except (IndexError, TypeError): pass has_format = hasattr(obj, renderer.format) if has_format: self.source = renderer.format super(GeometryField, self).bind(field_name, parent) def get_attribute(self, instance): # SpatiaLite returns empty/invalid geometries in WKT or GeoJSON with # exceedingly high simplification tolerances. try: return super(GeometryField, self).get_attribute(instance) except geos.GEOSException: return None def to_internal_value(self, data): # forms.GeometryField cannot handle geojson dicts. if isinstance(data, collections.Mapping): data = json.dumps(data) field = fields.GeometryField(widget=forms.BaseGeometryWidget()) return field.to_python(data) def to_representation(self, value): # Create a dict from the GEOSGeometry when the value is not previously # serialized from the spatial db. try: return {'type': value.geom_type, 'coordinates': value.coords} # Value is already serialized as geojson, kml, etc. except AttributeError: return value
bsd-3-clause
godfather1103/WeiboRobot
python27/1.0/lib/ctypes/test/test_python_api.py
35
2955
from ctypes import * import unittest, sys from ctypes.test import requires ################################################################ # This section should be moved into ctypes\__init__.py, when it's ready. from _ctypes import PyObj_FromPtr ################################################################ from sys import getrefcount as grc if sys.version_info > (2, 4): c_py_ssize_t = c_size_t else: c_py_ssize_t = c_int class PythonAPITestCase(unittest.TestCase): def test_PyString_FromStringAndSize(self): PyString_FromStringAndSize = pythonapi.PyString_FromStringAndSize PyString_FromStringAndSize.restype = py_object PyString_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t self.assertEqual(PyString_FromStringAndSize("abcdefghi", 3), "abc") def test_PyString_FromString(self): pythonapi.PyString_FromString.restype = py_object pythonapi.PyString_FromString.argtypes = (c_char_p,) s = "abc" refcnt = grc(s) pyob = pythonapi.PyString_FromString(s) self.assertEqual(grc(s), refcnt) self.assertEqual(s, pyob) del pyob self.assertEqual(grc(s), refcnt) # This test is unreliable, because it is possible that code in # unittest changes the refcount of the '42' integer. So, it # is disabled by default. def test_PyInt_Long(self): requires("refcount") ref42 = grc(42) pythonapi.PyInt_FromLong.restype = py_object self.assertEqual(pythonapi.PyInt_FromLong(42), 42) self.assertEqual(grc(42), ref42) pythonapi.PyInt_AsLong.argtypes = (py_object,) pythonapi.PyInt_AsLong.restype = c_long res = pythonapi.PyInt_AsLong(42) self.assertEqual(grc(res), ref42 + 1) del res self.assertEqual(grc(42), ref42) def test_PyObj_FromPtr(self): s = "abc def ghi jkl" ref = grc(s) # id(python-object) is the address pyobj = PyObj_FromPtr(id(s)) self.assertIs(s, pyobj) self.assertEqual(grc(s), ref + 1) del pyobj self.assertEqual(grc(s), ref) def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p buf = c_buffer(256) PyOS_snprintf(buf, sizeof(buf), "Hello from %s", "ctypes") self.assertEqual(buf.value, "Hello from ctypes") PyOS_snprintf(buf, sizeof(buf), "Hello from %s", "ctypes", 1, 2, 3) self.assertEqual(buf.value, "Hello from ctypes") # not enough arguments self.assertRaises(TypeError, PyOS_snprintf, buf) def test_pyobject_repr(self): self.assertEqual(repr(py_object()), "py_object(<NULL>)") self.assertEqual(repr(py_object(42)), "py_object(42)") self.assertEqual(repr(py_object(object)), "py_object(%r)" % object) if __name__ == "__main__": unittest.main()
gpl-3.0
tlakshman26/cinder-https-changes
cinder/volume/flows/api/manage_existing.py
17
6017
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils from cinder.i18n import _LE from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' CONF = cfg.CONF class EntryCreateTask(flow_utils.CinderTask): """Creates an entry for the given volume creation in the database. Reversion strategy: remove the volume_id created from the database. """ default_provides = set(['volume_properties', 'volume']) def __init__(self, db): requires = ['availability_zone', 'description', 'metadata', 'name', 'host', 'bootable', 'volume_type', 'ref'] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, **kwargs): """Creates a database entry for the given inputs and returns details. Accesses the database and creates a new entry for the to be created volume using the given volume properties which are extracted from the input kwargs. """ volume_type = kwargs.pop('volume_type') volume_type_id = volume_type['id'] if volume_type else None volume_properties = { 'size': 0, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', # Rename these to the internal name. 'display_description': kwargs.pop('description'), 'display_name': kwargs.pop('name'), 'host': kwargs.pop('host'), 'availability_zone': kwargs.pop('availability_zone'), 'volume_type_id': volume_type_id, 'metadata': kwargs.pop('metadata'), 'bootable': kwargs.pop('bootable'), } volume = self.db.volume_create(context, volume_properties) return { 'volume_properties': volume_properties, # NOTE(harlowja): it appears like further usage of this volume # result actually depend on it being a sqlalchemy object and not # just a plain dictionary so that's why we are storing this here. # # In the future where this task results can be serialized and # restored automatically for continued running we will need to # resolve the serialization & recreation of this object since raw # sqlalchemy objects can't be serialized. 'volume': volume, } def revert(self, context, result, optional_args, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return vol_id = result['volume_id'] try: self.db.volume_destroy(context.elevated(), vol_id) except exception.CinderException: LOG.exception(_LE("Failed destroying volume entry: %s."), vol_id) class ManageCastTask(flow_utils.CinderTask): """Performs a volume manage cast to the scheduler and to the volume manager. This which will signal a transition of the api workflow to another child and/or related workflow. """ def __init__(self, scheduler_rpcapi, db): requires = ['volume', 'volume_properties', 'volume_type', 'ref'] super(ManageCastTask, self).__init__(addons=[ACTION], requires=requires) self.scheduler_rpcapi = scheduler_rpcapi self.db = db def execute(self, context, **kwargs): volume = kwargs.pop('volume') request_spec = kwargs.copy() # Call the scheduler to ensure that the host exists and that it can # accept the volume self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic, volume['id'], request_spec=request_spec) def revert(self, context, result, flow_failures, **kwargs): # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: manage failed."), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info) def get_flow(scheduler_rpcapi, db_api, create_what): """Constructs and returns the api entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extracts and validates the input keys & values. 3. Creates the database entry. 4. Casts to volume manager and scheduler for further processing. """ flow_name = ACTION.replace(":", "_") + "_api" api_flow = linear_flow.Flow(flow_name) # This will cast it out to either the scheduler or volume manager via # the rpc apis provided. api_flow.add(EntryCreateTask(db_api), ManageCastTask(scheduler_rpcapi, db_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(api_flow, store=create_what)
apache-2.0
3dfxsoftware/cbss-addons
poi_pos_cashier_lock/__init__.py
3
1093
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import point_of_sale import res_users # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
gpl-2.0
joeythesaint/yocto-autobuilder
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_threads.py
5
13048
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Test methods in twisted.internet.threads and reactor thread APIs. """ import sys, os, time from twisted.trial import unittest from twisted.internet import reactor, defer, interfaces, threads, protocol, error from twisted.python import failure, threadable, log, threadpool class ReactorThreadsTestCase(unittest.TestCase): """ Tests for the reactor threading API. """ def test_suggestThreadPoolSize(self): """ Try to change maximum number of threads. """ reactor.suggestThreadPoolSize(34) self.assertEqual(reactor.threadpool.max, 34) reactor.suggestThreadPoolSize(4) self.assertEqual(reactor.threadpool.max, 4) def _waitForThread(self): """ The reactor's threadpool is only available when the reactor is running, so to have a sane behavior during the tests we make a dummy L{threads.deferToThread} call. """ return threads.deferToThread(time.sleep, 0) def test_callInThread(self): """ Test callInThread functionality: set a C{threading.Event}, and check that it's not in the main thread. """ def cb(ign): waiter = threading.Event() result = [] def threadedFunc(): result.append(threadable.isInIOThread()) waiter.set() reactor.callInThread(threadedFunc) waiter.wait(120) if not waiter.isSet(): self.fail("Timed out waiting for event.") else: self.assertEqual(result, [False]) return self._waitForThread().addCallback(cb) def test_callFromThread(self): """ Test callFromThread functionality: from the main thread, and from another thread. """ def cb(ign): firedByReactorThread = defer.Deferred() firedByOtherThread = defer.Deferred() def threadedFunc(): reactor.callFromThread(firedByOtherThread.callback, None) reactor.callInThread(threadedFunc) reactor.callFromThread(firedByReactorThread.callback, None) return defer.DeferredList( [firedByReactorThread, firedByOtherThread], fireOnOneErrback=True) return self._waitForThread().addCallback(cb) def test_wakerOverflow(self): """ Try to make an overflow on the reactor waker using callFromThread. """ def cb(ign): self.failure = None waiter = threading.Event() def threadedFunction(): # Hopefully a hundred thousand queued calls is enough to # trigger the error condition for i in xrange(100000): try: reactor.callFromThread(lambda: None) except: self.failure = failure.Failure() break waiter.set() reactor.callInThread(threadedFunction) waiter.wait(120) if not waiter.isSet(): self.fail("Timed out waiting for event") if self.failure is not None: return defer.fail(self.failure) return self._waitForThread().addCallback(cb) def _testBlockingCallFromThread(self, reactorFunc): """ Utility method to test L{threads.blockingCallFromThread}. """ waiter = threading.Event() results = [] errors = [] def cb1(ign): def threadedFunc(): try: r = threads.blockingCallFromThread(reactor, reactorFunc) except Exception, e: errors.append(e) else: results.append(r) waiter.set() reactor.callInThread(threadedFunc) return threads.deferToThread(waiter.wait, self.getTimeout()) def cb2(ign): if not waiter.isSet(): self.fail("Timed out waiting for event") return results, errors return self._waitForThread().addCallback(cb1).addBoth(cb2) def test_blockingCallFromThread(self): """ Test blockingCallFromThread facility: create a thread, call a function in the reactor using L{threads.blockingCallFromThread}, and verify the result returned. """ def reactorFunc(): return defer.succeed("foo") def cb(res): self.assertEqual(res[0][0], "foo") return self._testBlockingCallFromThread(reactorFunc).addCallback(cb) def test_asyncBlockingCallFromThread(self): """ Test blockingCallFromThread as above, but be sure the resulting Deferred is not already fired. """ def reactorFunc(): d = defer.Deferred() reactor.callLater(0.1, d.callback, "egg") return d def cb(res): self.assertEqual(res[0][0], "egg") return self._testBlockingCallFromThread(reactorFunc).addCallback(cb) def test_errorBlockingCallFromThread(self): """ Test error report for blockingCallFromThread. """ def reactorFunc(): return defer.fail(RuntimeError("bar")) def cb(res): self.assert_(isinstance(res[1][0], RuntimeError)) self.assertEqual(res[1][0].args[0], "bar") return self._testBlockingCallFromThread(reactorFunc).addCallback(cb) def test_asyncErrorBlockingCallFromThread(self): """ Test error report for blockingCallFromThread as above, but be sure the resulting Deferred is not already fired. """ def reactorFunc(): d = defer.Deferred() reactor.callLater(0.1, d.errback, RuntimeError("spam")) return d def cb(res): self.assert_(isinstance(res[1][0], RuntimeError)) self.assertEqual(res[1][0].args[0], "spam") return self._testBlockingCallFromThread(reactorFunc).addCallback(cb) class Counter: index = 0 problem = 0 def add(self): """A non thread-safe method.""" next = self.index + 1 # another thread could jump in here and increment self.index on us if next != self.index + 1: self.problem = 1 raise ValueError # or here, same issue but we wouldn't catch it. We'd overwrite # their results, and the index will have lost a count. If # several threads get in here, we will actually make the count # go backwards when we overwrite it. self.index = next class DeferredResultTestCase(unittest.TestCase): """ Test twisted.internet.threads. """ def setUp(self): reactor.suggestThreadPoolSize(8) def tearDown(self): reactor.suggestThreadPoolSize(0) def testCallMultiple(self): L = [] N = 10 d = defer.Deferred() def finished(): self.assertEqual(L, range(N)) d.callback(None) threads.callMultipleInThread([ (L.append, (i,), {}) for i in xrange(N) ] + [(reactor.callFromThread, (finished,), {})]) return d def test_deferredResult(self): """ L{threads.deferToThread} executes the function passed, and correctly handles the positional and keyword arguments given. """ d = threads.deferToThread(lambda x, y=5: x + y, 3, y=4) d.addCallback(self.assertEqual, 7) return d def test_deferredFailure(self): """ Check that L{threads.deferToThread} return a failure object with an appropriate exception instance when the called function raises an exception. """ class NewError(Exception): pass def raiseError(): raise NewError() d = threads.deferToThread(raiseError) return self.assertFailure(d, NewError) def test_deferredFailureAfterSuccess(self): """ Check that a successfull L{threads.deferToThread} followed by a one that raises an exception correctly result as a failure. """ # set up a condition that causes cReactor to hang. These conditions # can also be set by other tests when the full test suite is run in # alphabetical order (test_flow.FlowTest.testThreaded followed by # test_internet.ReactorCoreTestCase.testStop, to be precise). By # setting them up explicitly here, we can reproduce the hang in a # single precise test case instead of depending upon side effects of # other tests. # # alas, this test appears to flunk the default reactor too d = threads.deferToThread(lambda: None) d.addCallback(lambda ign: threads.deferToThread(lambda: 1//0)) return self.assertFailure(d, ZeroDivisionError) class DeferToThreadPoolTestCase(unittest.TestCase): """ Test L{twisted.internet.threads.deferToThreadPool}. """ def setUp(self): self.tp = threadpool.ThreadPool(0, 8) self.tp.start() def tearDown(self): self.tp.stop() def test_deferredResult(self): """ L{threads.deferToThreadPool} executes the function passed, and correctly handles the positional and keyword arguments given. """ d = threads.deferToThreadPool(reactor, self.tp, lambda x, y=5: x + y, 3, y=4) d.addCallback(self.assertEqual, 7) return d def test_deferredFailure(self): """ Check that L{threads.deferToThreadPool} return a failure object with an appropriate exception instance when the called function raises an exception. """ class NewError(Exception): pass def raiseError(): raise NewError() d = threads.deferToThreadPool(reactor, self.tp, raiseError) return self.assertFailure(d, NewError) _callBeforeStartupProgram = """ import time import %(reactor)s %(reactor)s.install() from twisted.internet import reactor def threadedCall(): print 'threaded call' reactor.callInThread(threadedCall) # Spin very briefly to try to give the thread a chance to run, if it # is going to. Is there a better way to achieve this behavior? for i in xrange(100): time.sleep(0.0) """ class ThreadStartupProcessProtocol(protocol.ProcessProtocol): def __init__(self, finished): self.finished = finished self.out = [] self.err = [] def outReceived(self, out): self.out.append(out) def errReceived(self, err): self.err.append(err) def processEnded(self, reason): self.finished.callback((self.out, self.err, reason)) class StartupBehaviorTestCase(unittest.TestCase): """ Test cases for the behavior of the reactor threadpool near startup boundary conditions. In particular, this asserts that no threaded calls are attempted until the reactor starts up, that calls attempted before it starts are in fact executed once it has started, and that in both cases, the reactor properly cleans itself up (which is tested for somewhat implicitly, by requiring a child process be able to exit, something it cannot do unless the threadpool has been properly torn down). """ def testCallBeforeStartupUnexecuted(self): progname = self.mktemp() progfile = file(progname, 'w') progfile.write(_callBeforeStartupProgram % {'reactor': reactor.__module__}) progfile.close() def programFinished((out, err, reason)): if reason.check(error.ProcessTerminated): self.fail("Process did not exit cleanly (out: %s err: %s)" % (out, err)) if err: log.msg("Unexpected output on standard error: %s" % (err,)) self.failIf(out, "Expected no output, instead received:\n%s" % (out,)) def programTimeout(err): err.trap(error.TimeoutError) proto.signalProcess('KILL') return err env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(sys.path) d = defer.Deferred().addCallbacks(programFinished, programTimeout) proto = ThreadStartupProcessProtocol(d) reactor.spawnProcess(proto, sys.executable, ('python', progname), env) return d if interfaces.IReactorThreads(reactor, None) is None: for cls in (ReactorThreadsTestCase, DeferredResultTestCase, StartupBehaviorTestCase): cls.skip = "No thread support, nothing to test here." else: import threading if interfaces.IReactorProcess(reactor, None) is None: for cls in (StartupBehaviorTestCase,): cls.skip = "No process support, cannot run subprocess thread tests."
gpl-2.0
harrisonfeng/pip
pip/_vendor/distlib/resources.py
224
10615
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import bisect import io import logging import os import pkgutil import shutil import sys import types import zipimport from . import DistlibException from .util import cached_property, get_cache_base, path_to_cache_dir, Cache logger = logging.getLogger(__name__) cache = None # created when needed class ResourceCache(Cache): def __init__(self, base=None): if base is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('resource-cache')) super(ResourceCache, self).__init__(base) def is_stale(self, resource, path): """ Is the cache stale for the given resource? :param resource: The :class:`Resource` being cached. :param path: The path of the resource in the cache. :return: True if the cache is stale. """ # Cache invalidation is a hard problem :-) return True def get(self, resource): """ Get a resource into the cache, :param resource: A :class:`Resource` instance. :return: The pathname of the resource in the cache. """ prefix, path = resource.finder.get_cache_info(resource) if prefix is None: result = path else: result = os.path.join(self.base, self.prefix_to_dir(prefix), path) dirname = os.path.dirname(result) if not os.path.isdir(dirname): os.makedirs(dirname) if not os.path.exists(result): stale = True else: stale = self.is_stale(resource, path) if stale: # write the bytes of the resource to the cache location with open(result, 'wb') as f: f.write(resource.bytes) return result class ResourceBase(object): def __init__(self, finder, name): self.finder = finder self.name = name class Resource(ResourceBase): """ A class representing an in-package resource, such as a data file. This is not normally instantiated by user code, but rather by a :class:`ResourceFinder` which manages the resource. """ is_container = False # Backwards compatibility def as_stream(self): """ Get the resource as a stream. This is not a property to make it obvious that it returns a new stream each time. """ return self.finder.get_stream(self) @cached_property def file_path(self): global cache if cache is None: cache = ResourceCache() return cache.get(self) @cached_property def bytes(self): return self.finder.get_bytes(self) @cached_property def size(self): return self.finder.get_size(self) class ResourceContainer(ResourceBase): is_container = True # Backwards compatibility @cached_property def resources(self): return self.finder.get_resources(self) class ResourceFinder(object): """ Resource finder for file system resources. """ if sys.platform.startswith('java'): skipped_extensions = ('.pyc', '.pyo', '.class') else: skipped_extensions = ('.pyc', '.pyo') def __init__(self, module): self.module = module self.loader = getattr(module, '__loader__', None) self.base = os.path.dirname(getattr(module, '__file__', '')) def _adjust_path(self, path): return os.path.realpath(path) def _make_path(self, resource_name): # Issue #50: need to preserve type of path on Python 2.x # like os.path._get_sep if isinstance(resource_name, bytes): # should only happen on 2.x sep = b'/' else: sep = '/' parts = resource_name.split(sep) parts.insert(0, self.base) result = os.path.join(*parts) return self._adjust_path(result) def _find(self, path): return os.path.exists(path) def get_cache_info(self, resource): return None, resource.path def find(self, resource_name): path = self._make_path(resource_name) if not self._find(path): result = None else: if self._is_directory(path): result = ResourceContainer(self, resource_name) else: result = Resource(self, resource_name) result.path = path return result def get_stream(self, resource): return open(resource.path, 'rb') def get_bytes(self, resource): with open(resource.path, 'rb') as f: return f.read() def get_size(self, resource): return os.path.getsize(resource.path) def get_resources(self, resource): def allowed(f): return (f != '__pycache__' and not f.endswith(self.skipped_extensions)) return set([f for f in os.listdir(resource.path) if allowed(f)]) def is_container(self, resource): return self._is_directory(resource.path) _is_directory = staticmethod(os.path.isdir) def iterator(self, resource_name): resource = self.find(resource_name) if resource is not None: todo = [resource] while todo: resource = todo.pop(0) yield resource if resource.is_container: rname = resource.name for name in resource.resources: if not rname: new_name = name else: new_name = '/'.join([rname, name]) child = self.find(new_name) if child.is_container: todo.append(child) else: yield child class ZipResourceFinder(ResourceFinder): """ Resource finder for resources in .zip files. """ def __init__(self, module): super(ZipResourceFinder, self).__init__(module) archive = self.loader.archive self.prefix_len = 1 + len(archive) # PyPy doesn't have a _files attr on zipimporter, and you can't set one if hasattr(self.loader, '_files'): self._files = self.loader._files else: self._files = zipimport._zip_directory_cache[archive] self.index = sorted(self._files) def _adjust_path(self, path): return path def _find(self, path): path = path[self.prefix_len:] if path in self._files: result = True else: if path and path[-1] != os.sep: path = path + os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False if not result: logger.debug('_find failed: %r %r', path, self.loader.prefix) else: logger.debug('_find worked: %r %r', path, self.loader.prefix) return result def get_cache_info(self, resource): prefix = self.loader.archive path = resource.path[1 + len(prefix):] return prefix, path def get_bytes(self, resource): return self.loader.get_data(resource.path) def get_stream(self, resource): return io.BytesIO(self.get_bytes(resource)) def get_size(self, resource): path = resource.path[self.prefix_len:] return self._files[path][3] def get_resources(self, resource): path = resource.path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep plen = len(path) result = set() i = bisect.bisect(self.index, path) while i < len(self.index): if not self.index[i].startswith(path): break s = self.index[i][plen:] result.add(s.split(os.sep, 1)[0]) # only immediate children i += 1 return result def _is_directory(self, path): path = path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False return result _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder } try: import _frozen_importlib _finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder _finder_registry[_frozen_importlib.FileFinder] = ResourceFinder except (ImportError, AttributeError): pass def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker _finder_cache = {} def finder(package): """ Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package. """ if package in _finder_cache: result = _finder_cache[package] else: if package not in sys.modules: __import__(package) module = sys.modules[package] path = getattr(module, '__path__', None) if path is None: raise DistlibException('You cannot get a finder for a module, ' 'only for a package') loader = getattr(module, '__loader__', None) finder_maker = _finder_registry.get(type(loader)) if finder_maker is None: raise DistlibException('Unable to locate finder for %r' % package) result = finder_maker(module) _finder_cache[package] = result return result _dummy_module = types.ModuleType(str('__dummy__')) def finder_for_path(path): """ Return a resource finder for a path, which should represent a container. :param path: The path. :return: A :class:`ResourceFinder` instance for the path. """ result = None # calls any path hooks, gets importer into cache pkgutil.get_importer(path) loader = sys.path_importer_cache.get(path) finder = _finder_registry.get(type(loader)) if finder: module = _dummy_module module.__file__ = os.path.join(path, '') module.__loader__ = loader result = finder(module) return result
mit
thenenadx/forseti-security
google/cloud/security/notifier/pipelines/email_violations_pipeline.py
2
5623
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Email pipeline to perform notifications""" from datetime import datetime # TODO: Investigate improving so we can avoid the pylint disable. # pylint: disable=line-too-long from google.cloud.security.common.util import log_util from google.cloud.security.common.util import parser from google.cloud.security.common.util.email_util import EmailUtil from google.cloud.security.notifier.pipelines import base_notification_pipeline as bnp # pylint: enable=line-too-long # TODO: The next editor must remove this disable and correct issues. # pylint: disable=missing-type-doc,missing-return-type-doc # pylint: disable=missing-param-doc,differing-param-doc LOGGER = log_util.get_logger(__name__) TEMP_DIR = '/tmp' VIOLATIONS_JSON_FMT = 'violations.{}.{}.{}.json' OUTPUT_TIMESTAMP_FMT = '%Y%m%dT%H%M%SZ' class EmailViolationsPipeline(bnp.BaseNotificationPipeline): """Email pipeline to perform notifications""" def __init__(self, resource, cycle_timestamp, violations, notifier_config, pipeline_config): super(EmailViolationsPipeline, self).__init__(resource, cycle_timestamp, violations, notifier_config, pipeline_config) self.mail_util = EmailUtil(self.pipeline_config['sendgrid_api_key']) def _get_output_filename(self): """Create the output filename. Returns: The output filename for the violations json. """ now_utc = datetime.utcnow() output_timestamp = now_utc.strftime(OUTPUT_TIMESTAMP_FMT) output_filename = VIOLATIONS_JSON_FMT.format(self.resource, self.cycle_timestamp, output_timestamp) return output_filename def _write_temp_attachment(self): """Write the attachment to a temp file. Returns: The output filename for the violations json just written. """ # Make attachment output_file_name = self._get_output_filename() output_file_path = '{}/{}'.format(TEMP_DIR, output_file_name) with open(output_file_path, 'w+') as f: f.write(parser.json_stringify(self.violations)) return output_file_name def _make_attachment(self): """Create the attachment object. Returns: The attachment object. """ output_file_name = self._write_temp_attachment() attachment = self.mail_util.create_attachment( file_location='{}/{}'.format(TEMP_DIR, output_file_name), content_type='text/json', filename=output_file_name, disposition='attachment', content_id='Violations' ) return attachment def _make_content(self): """Create the email content. Returns: A tuple containing the email subject and the content """ timestamp = datetime.strptime( self.cycle_timestamp, '%Y%m%dT%H%M%SZ') pretty_timestamp = timestamp.strftime("%d %B %Y - %H:%M:%S") email_content = self.mail_util.render_from_template( 'notification_summary.jinja', { 'scan_date': pretty_timestamp, 'resource': self.resource, 'violation_errors': self.violations, }) email_subject = 'Forseti Violations {} - {}'.format( pretty_timestamp, self.resource) return email_subject, email_content def _compose(self, **kwargs): """Compose the email pipeline map Returns: Returns a map with subject, content, attachemnt """ email_map = {} attachment = self._make_attachment() subject, content = self._make_content() email_map['subject'] = subject email_map['content'] = content email_map['attachment'] = attachment return email_map def _send(self, **kwargs): """Send a summary email of the scan. Args: subject: Email subject conetent: Email content attachment: Attachment object """ notification_map = kwargs.get('notification') subject = notification_map['subject'] content = notification_map['content'] attachment = notification_map['attachment'] self.mail_util.send(email_sender=self.pipeline_config['sender'], email_recipient=self.pipeline_config['recipient'], email_subject=subject, email_content=content, content_type='text/html', attachment=attachment) def run(self): """Run the email pipeline""" email_notification = self._compose() self._send(notification=email_notification)
apache-2.0
mpurzynski/MozDef
tests/alerts/geomodel/test_factors.py
1
2514
from datetime import datetime import alerts.geomodel.alert as alert import alerts.geomodel.factors as factors class MockMMDB: '''Mocks a MaxMind database connection with a dictionary of records mapping IP adresses to dictionaries containing information about ASNs. ''' def __init__(self, records): self.records = records def get(self, ip): return self.records.get(ip) def close(self): return def null_origin(ip): return alert.Origin( ip=ip, city='Null', country='NA', latitude=0.0, longitude=0.0, observed=datetime.now(), geopoint='0.0,0.0') # A set of records for a mocked MaxMind database containing information about # ASNs used to test the `asn_movement` factor implementation with. asn_mvmt_records = { '1.2.3.4': { 'autonomous_system_number': 54321, 'autonomous_system_organization': 'CLOUDFLARENET' }, '4.3.2.1': { 'autonomous_system_number': 12345, 'autonomous_system_organization': 'MOZILLA_SFO1' }, '5.6.7.8': { 'autonomous_system_number': 67891, 'autonomous_system_organization': 'AMAZONAWSNET' } } def test_asn_movement(): factor = factors.asn_movement( MockMMDB(asn_mvmt_records), alert.Severity.WARNING) test_hops = [ alert.Hop( origin=null_origin('1.2.3.4'), destination=null_origin('4.3.2.1')), alert.Hop( origin=null_origin('4.3.2.1'), destination=null_origin('5.6.7.8')) ] test_alert = alert.Alert( username='tester', hops=test_hops, severity=alert.Severity.INFO, factors=[]) pipeline = [factor] modified_alert = factors.pipe(test_alert, pipeline) assert modified_alert.username == test_alert.username assert modified_alert.severity == alert.Severity.WARNING assert len(modified_alert.factors) == 1 assert 'asn_hops' in modified_alert.factors[0] assert len(modified_alert.factors[0]['asn_hops']) == 2 asn_key = 'autonomous_system_organization' asn1 = modified_alert.factors[0]['asn_hops'][0][0][asn_key] asn2 = modified_alert.factors[0]['asn_hops'][0][1][asn_key] asn3 = modified_alert.factors[0]['asn_hops'][1][0][asn_key] asn4 = modified_alert.factors[0]['asn_hops'][1][1][asn_key] assert asn1 == 'CLOUDFLARENET' assert asn2 == 'MOZILLA_SFO1' assert asn3 == 'MOZILLA_SFO1' assert asn4 == 'AMAZONAWSNET'
mpl-2.0
Captain-Coder/tribler
Tribler/Core/Modules/channel/channel.py
1
5233
import codecs import collections import logging import os from binascii import hexlify from twisted.internet import reactor from twisted.internet.defer import DeferredList from Tribler.Core.Modules.channel.channel_rss import ChannelRssParser import Tribler.Core.Utilities.json_util as json from Tribler.Core.simpledefs import SIGNAL_CHANNEL, SIGNAL_ON_CREATED, SIGNAL_RSS_FEED, SIGNAL_ON_UPDATED from Tribler.pyipv8.ipv8.taskmanager import TaskManager class ChannelObject(TaskManager): def __init__(self, session, channel_community, is_created=False): super(ChannelObject, self).__init__() self._logger = logging.getLogger(self.__class__.__name__) self._session = session self._channel_community = channel_community self._is_created = is_created self._rss_feed_dict = collections.OrderedDict() rss_name = u"channel_rss_%s.json" % hexlify(self._channel_community.cid) self._rss_file_path = os.path.join(self._session.config.get_state_dir(), rss_name) @property def channel_id(self): return self._channel_community.get_channel_id() @property def name(self): return self._channel_community.get_channel_name() @property def description(self): return self._channel_community.get_channel_description() @property def mode(self): return self._channel_community.get_channel_mode() def get_rss_feed_url_list(self): return [url for url in self._rss_feed_dict.iterkeys()] def refresh_all_feeds(self): deferreds = [feed.parse_feed() for feed in self._rss_feed_dict.itervalues()] return DeferredList(deferreds, consumeErrors=True) def initialize(self): # load existing rss_feeds if os.path.exists(self._rss_file_path): self._logger.debug(u"loading existing channel rss list from %s...", self._rss_file_path) with codecs.open(self._rss_file_path, 'rb', encoding='utf8') as f: rss_list = json.load(f) for rss_url in rss_list: self._rss_feed_dict[rss_url] = None if self._is_created: # create rss-parsers for rss_feed_url in self._rss_feed_dict: rss_parser = ChannelRssParser(self._session, self._channel_community, rss_feed_url) rss_parser.initialize() self._rss_feed_dict[rss_feed_url] = rss_parser else: # subscribe to the channel creation event self._session.add_observer(self._on_channel_created, SIGNAL_CHANNEL, [SIGNAL_ON_CREATED]) def shutdown(self): self.shutdown_task_manager() for key, rss_parser in self._rss_feed_dict.iteritems(): if rss_parser is not None: rss_parser.shutdown() self._rss_feed_dict = None self._channel_community = None self._session = None def _on_channel_created(self, subject, change_type, object_id, channel_data): if channel_data[u'channel'].cid != self._channel_community.cid: return def _create_rss_feed(channel_date): self._is_created = True # create rss feed parsers self._logger.debug(u"channel %s %s created", self.name, hexlify(self._channel_community.cid)) for rss_feed_url in self._rss_feed_dict: assert self._rss_feed_dict[rss_feed_url] is None rss_parser = ChannelRssParser(self._session, self._channel_community, rss_feed_url) rss_parser.initialize() self._rss_feed_dict[rss_feed_url] = rss_parser task_name = u'create_rss_%s' % hexlify(channel_data[u'channel'].cid) self.register_task(task_name, reactor.callLater(0, _create_rss_feed, channel_data)) def create_rss_feed(self, rss_feed_url): if rss_feed_url in self._rss_feed_dict: self._logger.warn(u"skip existing rss feed: %s", repr(rss_feed_url)) return if not self._is_created: # append the rss url if the channel has not been created yet self._rss_feed_dict[rss_feed_url] = None else: # create an rss feed parser for this rss_parser = ChannelRssParser(self._session, self._channel_community, rss_feed_url) rss_parser.initialize() self._rss_feed_dict[rss_feed_url] = rss_parser # flush the rss_feed_url to json file with codecs.open(self._rss_file_path, 'wb', encoding='utf8') as f: rss_list = [rss_url for rss_url in self._rss_feed_dict.iterkeys()] json.dump(rss_list, f) def remove_rss_feed(self, rss_feed_url): if rss_feed_url not in self._rss_feed_dict: self._logger.warn(u"skip existing rss feed: %s", repr(rss_feed_url)) return rss_parser = self._rss_feed_dict[rss_feed_url] if rss_parser is not None: rss_parser.shutdown() del self._rss_feed_dict[rss_feed_url] rss_feed_data = {u'channel': self._channel_community, u'rss_feed_url': rss_feed_url} self._session.notifier.notify(SIGNAL_RSS_FEED, SIGNAL_ON_UPDATED, None, rss_feed_data)
lgpl-3.0
rayrrr/luigi
test/contrib/hadoop_test.py
5
18230
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import json import unittest import luigi import luigi.format import luigi.contrib.hadoop import luigi.contrib.hdfs import luigi.contrib.mrrunner import luigi.notifications import minicluster import mock from luigi.mock import MockTarget from luigi.six import StringIO from nose.plugins.attrib import attr luigi.notifications.DEBUG = True luigi.contrib.hadoop.attach(minicluster) class OutputMixin(luigi.Task): use_hdfs = luigi.BoolParameter(default=False) def get_output(self, fn): if self.use_hdfs: return luigi.contrib.hdfs.HdfsTarget('/tmp/' + fn, format=luigi.format.get_default_format() >> luigi.contrib.hdfs.PlainDir) else: return MockTarget(fn) class HadoopJobTask(luigi.contrib.hadoop.JobTask, OutputMixin): def job_runner(self): if self.use_hdfs: return minicluster.MiniClusterHadoopJobRunner() else: return luigi.contrib.hadoop.LocalJobRunner() class Words(OutputMixin): def output(self): return self.get_output('words') def run(self): f = self.output().open('w') f.write('kj kj lkj lkj ljoi j iljlk jlk jlk jk jkl jlk jlkj j ioj ioj kuh kjh\n') f.write('kjsfsdfkj sdjkf kljslkj flskjdfj jkkd jjfk jk jk jk jk jk jklkjf kj lkj lkj\n') f.close() class WordCountJob(HadoopJobTask): def mapper(self, line): for word in line.strip().split(): self.incr_counter('word', word, 1) yield word, 1 def reducer(self, word, occurences): yield word, sum(occurences) def requires(self): return Words(self.use_hdfs) def output(self): return self.get_output('wordcount') class WordFreqJob(HadoopJobTask): def init_local(self): self.n = 0 for line in self.input_local().open('r'): word, count = line.strip().split() self.n += int(count) def mapper(self, line): for word in line.strip().split(): yield word, 1.0 / self.n def combiner(self, word, occurrences): yield word, sum(occurrences) def reducer(self, word, occurences): yield word, sum(occurences) def requires_local(self): return WordCountJob(self.use_hdfs) def requires_hadoop(self): return Words(self.use_hdfs) def output(self): return self.get_output('luigitest-2') def extra_files(self): fn = os.listdir('.')[0] # Just return some file, doesn't matter which return [(fn, 'my_dir/my_file')] def init_remote(self): open('my_dir/my_file') # make sure it exists class MapOnlyJob(HadoopJobTask): def mapper(self, line): for word in line.strip().split(): yield (word,) def requires_hadoop(self): return Words(self.use_hdfs) def output(self): return self.get_output('luigitest-3') class UnicodeJob(HadoopJobTask): def mapper(self, line): yield u'test', 1 yield b'test', 1 def reducer(self, word, occurences): yield word, sum(occurences) def requires(self): return Words(self.use_hdfs) def output(self): return self.get_output('luigitest-4') class UseJsonAsDataInteterchangeFormatJob(HadoopJobTask): data_interchange_format = "json" def mapper(self, line): yield "json", {"data type": "json"} def reducer(self, _, vals): yield "", json.dumps(list(vals)[0]) def requires(self): """ Two lines from Word.task will cause two `mapper` call. """ return Words(self.use_hdfs) def output(self): return self.get_output('luigitest-5') class FailingJobException(Exception): pass class FailingJob(HadoopJobTask): def init_hadoop(self): raise FailingJobException('failure') def output(self): return self.get_output('failing') class MyStreamingJob(luigi.contrib.hadoop.JobTask): param = luigi.Parameter() def read_wordcount_output(p): count = {} for line in p.open('r'): k, v = line.strip().split() count[k] = v return count class CommonTests(object): @staticmethod def test_run(test_case): job = WordCountJob(use_hdfs=test_case.use_hdfs) luigi.build([job], local_scheduler=True) c = read_wordcount_output(job.output()) test_case.assertEqual(int(c['jk']), 6) @staticmethod def test_run_2(test_case): job = WordFreqJob(use_hdfs=test_case.use_hdfs) luigi.build([job], local_scheduler=True) c = read_wordcount_output(job.output()) test_case.assertAlmostEquals(float(c['jk']), 6.0 / 33.0) @staticmethod def test_map_only(test_case): job = MapOnlyJob(use_hdfs=test_case.use_hdfs) luigi.build([job], local_scheduler=True) c = [] for line in job.output().open('r'): c.append(line.strip()) test_case.assertEqual(c[0], 'kj') test_case.assertEqual(c[4], 'ljoi') @staticmethod def test_unicode_job(test_case): job = UnicodeJob(use_hdfs=test_case.use_hdfs) luigi.build([job], local_scheduler=True) c = [] for line in job.output().open('r'): c.append(line) # Make sure unicode('test') isnt grouped with str('test') # Since this is what happens when running on cluster test_case.assertEqual(len(c), 2) test_case.assertEqual(c[0], "test\t2\n") @staticmethod def test_use_json_as_data_interchange_format_job(test_case): job = UseJsonAsDataInteterchangeFormatJob(use_hdfs=test_case.use_hdfs) luigi.build([job], local_scheduler=True) c = [] for line in job.output().open('r'): c.append(line) test_case.assertEqual(c, ['{"data type": "json"}\n']) @staticmethod def test_failing_job(test_case): job = FailingJob(use_hdfs=test_case.use_hdfs) success = luigi.build([job], local_scheduler=True) test_case.assertFalse(success) @attr('apache') class MapreduceLocalTest(unittest.TestCase): use_hdfs = False def run_and_check(self, args): run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args) return run_exit_status def test_run(self): CommonTests.test_run(self) def test_run_2(self): CommonTests.test_run_2(self) def test_map_only(self): CommonTests.test_map_only(self) def test_unicode_job(self): CommonTests.test_unicode_job(self) def test_use_json_as_data_interchange_format_job(self): CommonTests.test_use_json_as_data_interchange_format_job(self) def test_failing_job(self): CommonTests.test_failing_job(self) def test_instantiate_job(self): # See https://github.com/spotify/luigi/issues/738 MyStreamingJob('param_value') def test_cmd_line(self): class DummyHadoopTask(luigi.contrib.hadoop.JobTask): param = luigi.Parameter() def run(self): if 'mypool' not in ''.join(self.jobconfs()): raise ValueError("noooooo") self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--pool', 'mypool'])) self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--hadoop-pool', 'mypool'])) def setUp(self): MockTarget.fs.clear() @attr('minicluster') class MapreduceIntegrationTest(minicluster.MiniClusterTestCase): """ Uses the Minicluster functionality to test this against Hadoop """ use_hdfs = True def test_run(self): CommonTests.test_run(self) def test_run_2(self): CommonTests.test_run_2(self) def test_map_only(self): CommonTests.test_map_only(self) # TODO(erikbern): some really annoying issue with minicluster causes # test_unicode_job to hang def test_failing_job(self): CommonTests.test_failing_job(self) @attr('apache') class CreatePackagesArchive(unittest.TestCase): def setUp(self): sys.path.append(os.path.join('test', 'create_packages_archive_root')) def tearDown(self): sys.path.remove(os.path.join('test', 'create_packages_archive_root')) def _assert_module(self, add): add.assert_called_once_with('test/create_packages_archive_root/module.py', 'module.py') def _assert_package(self, add): add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py') add.assert_any_call('test/create_packages_archive_root/package/submodule.py', 'package/submodule.py') add.assert_any_call('test/create_packages_archive_root/package/submodule_with_absolute_import.py', 'package/submodule_with_absolute_import.py') add.assert_any_call('test/create_packages_archive_root/package/submodule_without_imports.py', 'package/submodule_without_imports.py') add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py') add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py') add.assert_any_call('test/create_packages_archive_root/package.egg-info/top_level.txt', 'package.egg-info/top_level.txt') assert add.call_count == 7 def _assert_package_subpackage(self, add): add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py') add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py') add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py') assert add.call_count == 3 @mock.patch('tarfile.open') def test_create_packages_archive_module(self, tar): module = __import__("module", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([module], '/dev/null') self._assert_module(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package(self, tar): package = __import__("package", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package], '/dev/null') self._assert_package(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package_submodule(self, tar): package_submodule = __import__("package.submodule", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package_submodule], '/dev/null') self._assert_package(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package_submodule_with_absolute_import(self, tar): package_submodule_with_absolute_import = __import__("package.submodule_with_absolute_import", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package_submodule_with_absolute_import], '/dev/null') self._assert_package(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package_submodule_without_imports(self, tar): package_submodule_without_imports = __import__("package.submodule_without_imports", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package_submodule_without_imports], '/dev/null') self._assert_package(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package_subpackage(self, tar): package_subpackage = __import__("package.subpackage", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package_subpackage], '/dev/null') self._assert_package_subpackage(tar.return_value.add) @mock.patch('tarfile.open') def test_create_packages_archive_package_subpackage_submodule(self, tar): package_subpackage_submodule = __import__("package.subpackage.submodule", None, None, 'dummy') luigi.contrib.hadoop.create_packages_archive([package_subpackage_submodule], '/dev/null') self._assert_package_subpackage(tar.return_value.add) class MockProcess(object): def __init__(self, err_lines, returncode): err = ''.join(err_lines) self.__err_len = len(err) self.stderr = StringIO(err) self.__rc = returncode self.returncode = None def poll(self): if self.stderr.tell() == self.__err_len: self.returncode = self.__rc return self.returncode class KeyboardInterruptedMockProcess(MockProcess): def __init__(self, err_lines): super(KeyboardInterruptedMockProcess, self).__init__(err_lines, 0) def poll(self): if super(KeyboardInterruptedMockProcess, self).poll() is not None: raise KeyboardInterrupt @attr('apache') class JobRunnerTest(unittest.TestCase): def setUp(self): self.tracking_urls = [] def track(self, url): self.tracking_urls.append(url) def _run_and_track(self, err_lines, returncode): with mock.patch('luigi.contrib.hadoop.subprocess') as subprocess: subprocess.Popen.return_value = MockProcess(err_lines, returncode) _, err = luigi.contrib.hadoop.run_and_track_hadoop_job([], self.track) self.assertEqual(err, ''.join(err_lines)) def test_tracking_url_yarn(self): url = 'http://example.jobtracker.com:8080/proxy/application_1234_5678/' yarn_lines = [ "INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1234_5678\n", "INFO impl.YarnClientImpl: Submitted application application_1234_5678\n", "INFO mapreduce.Job: The url to track the job: %s\n" % url, "INFO mapreduce.Job: Running job: job_1234_5678\n", "INFO mapreduce.Job: Job job_1234_5678 running in uber mode : false\n", "INFO mapreduce.Job: Job job_1234_5678 completed successfully\n", ] self._run_and_track(yarn_lines, 0) self.assertEqual([url], self.tracking_urls) def test_tracking_url_old_version(self): url = 'http://tracker.com/1234_5678' err_lines = [ 'INFO tracking url: %s\n' % url, ] self._run_and_track(err_lines, 0) self.assertEqual([url], self.tracking_urls) def test_multiple_tracking_urls(self): urls = [ 'http://tracking/1', 'http://tracking/2', 'http://tracking/3', ] err_lines = [ 'running...\n', 'The url to track the job: %s\n' % urls[0], 'done\n', 'running another stage...\n', 'The url to track the job: %s\n' % urls[1], 'done\n', 'running another stage...\n', 'The url to track the job: %s\n' % urls[2], 'done\n', ] self._run_and_track(err_lines, 0) self.assertEqual(urls, self.tracking_urls) def test_tracking_url_captured_on_fail(self): url = 'http://tracking/' err_lines = [ 'The url to track the job: %s\n' % url, ] with self.assertRaises(luigi.contrib.hadoop.HadoopJobError): self._run_and_track(err_lines, 1) self.assertEqual([url], self.tracking_urls) def _run_and_track_with_interrupt(self, err_lines): proc = KeyboardInterruptedMockProcess(err_lines) with mock.patch('luigi.contrib.hadoop.subprocess') as subprocess: subprocess.Popen.return_value = proc with self.assertRaises(KeyboardInterrupt): luigi.contrib.hadoop.run_and_track_hadoop_job([], proc) return subprocess def test_kill_job_on_interrupt(self): job_id = 'job_1234_5678' err_lines = [ 'FlowStep: [SomeJob()] submitted hadoop job: %s\n' % job_id, 'some other line\n', ] subprocess = self._run_and_track_with_interrupt(err_lines) subprocess.call.assert_called_once_with(['mapred', 'job', '-kill', job_id]) def test_kill_last_mapreduce_on_interrupt(self): job_id = 'job_1234_5678' err_lines = [ 'FlowStep: [SomeJob()] submitted hadoop job: job_0000_0000\n', 'FlowStep: [SomeJob()] submitted hadoop job: %s\n' % job_id, 'some other line\n', ] subprocess = self._run_and_track_with_interrupt(err_lines) subprocess.call.assert_called_once_with(['mapred', 'job', '-kill', job_id]) def test_kill_application_on_interrupt(self): application_id = 'application_1234_5678' err_lines = [ 'YarnClientImpl: Submitted application %s\n' % application_id, 'FlowStep: [SomeJob()] submitted hadoop job: job_1234_5678\n', ] subprocess = self._run_and_track_with_interrupt(err_lines) subprocess.call.assert_called_once_with(['yarn', 'application', '-kill', application_id]) def test_kill_last_application_on_interrupt(self): application_id = 'application_1234_5678' err_lines = [ 'YarnClientImpl: Submitted application application_0000_0000\n', 'FlowStep: [SomeJob()] submitted hadoop job: job_0000_0000\n', 'YarnClientImpl: Submitted application %s\n' % application_id, 'FlowStep: [SomeJob()] submitted hadoop job: job_1234_5678\n', ] subprocess = self._run_and_track_with_interrupt(err_lines) subprocess.call.assert_called_once_with(['yarn', 'application', '-kill', application_id])
apache-2.0
vkscool/nupic
tests/unit/nupic/data/functionsource_test.py
9
3242
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Unit tests for functionsource. """ import pickle import unittest from nupic.data import FunctionSource def dataFunction(stat): ret = {"reset": 0, "sequence": 0, "data": 0} if stat is not None: val = stat.get("val", 0) + 1 ret["val"] = stat["val"] = val return ret class FunctionSourceTest(unittest.TestCase): def testDefaultArgs(self): fs = FunctionSource(dataFunction, state=None, resetFieldName=None, sequenceIdFieldName=None) self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) def testResetField(self): fs = FunctionSource(dataFunction, state=None, resetFieldName="reset", sequenceIdFieldName=None) self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) def testSequenceField(self): fs = FunctionSource(dataFunction, state=None, resetFieldName=None, sequenceIdFieldName="sequence") self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) def testResetAndSequenceFields(self): fs = FunctionSource(dataFunction, state=None, resetFieldName="reset", sequenceIdFieldName="sequence") self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) def testState(self): state = dict(val=100) fs = FunctionSource(dataFunction, state=state, resetFieldName="reset", sequenceIdFieldName="sequence") self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) r = fs.getNextRecordDict() r = fs.getNextRecordDict() self.assertEqual(103, state["val"]) def testPickle(self): state = dict(val=100) fs = FunctionSource(dataFunction, state=state, resetFieldName="reset", sequenceIdFieldName="sequence") self.assertIsNotNone(fs) r = fs.getNextRecordDict() self.assertIsNotNone(r) pkl = pickle.dumps(fs) self.assertIsNotNone(pkl) fs2 = pickle.loads(pkl) self.assertIsNotNone(fs2) r = fs2.getNextRecordDict() r = fs2.getNextRecordDict() self.assertEqual(103, fs2.state["val"]) if __name__ == "__main__": unittest.main()
gpl-3.0
PopCap/GameIdea
Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/third_party/ply/test/lex_hedit.py
174
1141
# ----------------------------------------------------------------------------- # hedit.py # # Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson) # # These tokens can't be easily tokenized because they are of the following # form: # # nHc1...cn # # where n is a positive integer and c1 ... cn are characters. # # This example shows how to modify the state of the lexer to parse # such tokens # ----------------------------------------------------------------------------- import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.lex as lex tokens = ( 'H_EDIT_DESCRIPTOR', ) # Tokens t_ignore = " \t\n" def t_H_EDIT_DESCRIPTOR(t): r"\d+H.*" # This grabs all of the remaining text i = t.value.index('H') n = eval(t.value[:i]) # Adjust the tokenizing position t.lexer.lexpos -= len(t.value) - (i+1+n) t.value = t.value[i+1:i+1+n] return t def t_error(t): print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) # Build the lexer lex.lex() lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
bsd-2-clause
denny820909/builder
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/python/test/test_hashlib.py
27
3308
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.python.hashlib} """ from twisted.trial.unittest import TestCase from twisted.python.hashlib import md5, sha1 class HashObjectTests(TestCase): """ Tests for the hash object APIs presented by L{hashlib}, C{md5} and C{sha1}. """ def test_md5(self): """ L{hashlib.md5} returns an object which can be used to compute an MD5 hash as defined by U{RFC 1321<http://www.ietf.org/rfc/rfc1321.txt>}. """ # Test the result using values from section A.5 of the RFC. self.assertEqual( md5().hexdigest(), "d41d8cd98f00b204e9800998ecf8427e") self.assertEqual( md5("a").hexdigest(), "0cc175b9c0f1b6a831c399e269772661") self.assertEqual( md5("abc").hexdigest(), "900150983cd24fb0d6963f7d28e17f72") self.assertEqual( md5("message digest").hexdigest(), "f96b697d7cb7938d525a2f31aaf161d0") self.assertEqual( md5("abcdefghijklmnopqrstuvwxyz").hexdigest(), "c3fcd3d76192e4007dfb496cca67e13b") self.assertEqual( md5("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" "0123456789").hexdigest(), "d174ab98d277d9f5a5611c2c9f419d9f") self.assertEqual( md5("1234567890123456789012345678901234567890123456789012345678901" "2345678901234567890").hexdigest(), "57edf4a22be3c955ac49da2e2107b67a") # It should have digest and update methods, too. self.assertEqual( md5().digest().encode('hex'), "d41d8cd98f00b204e9800998ecf8427e") hash = md5() hash.update("a") self.assertEqual( hash.digest().encode('hex'), "0cc175b9c0f1b6a831c399e269772661") # Instances of it should have a digest_size attribute self.assertEqual(md5().digest_size, 16) def test_sha1(self): """ L{hashlib.sha1} returns an object which can be used to compute a SHA1 hash as defined by U{RFC 3174<http://tools.ietf.org/rfc/rfc3174.txt>}. """ def format(s): return ''.join(s.split()).lower() # Test the result using values from section 7.3 of the RFC. self.assertEqual( sha1("abc").hexdigest(), format( "A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D")) self.assertEqual( sha1("abcdbcdecdefdefgefghfghighijhi" "jkijkljklmklmnlmnomnopnopq").hexdigest(), format( "84 98 3E 44 1C 3B D2 6E BA AE 4A A1 F9 51 29 E5 E5 46 70 F1")) # It should have digest and update methods, too. self.assertEqual( sha1("abc").digest().encode('hex'), format( "A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D")) hash = sha1() hash.update("abc") self.assertEqual( hash.digest().encode('hex'), format( "A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D")) # Instances of it should have a digest_size attribute. self.assertEqual( sha1().digest_size, 20)
mit
TripleDogDare/RadioWCSpy
backend/env/lib/python2.7/site-packages/setuptools/archive_util.py
520
6609
"""Utilities for extracting common archive formats""" __all__ = [ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", "UnrecognizedFormat", "extraction_drivers", "unpack_directory", ] import zipfile import tarfile import os import shutil import posixpath import contextlib from pkg_resources import ensure_directory, ContextualZipFile from distutils.errors import DistutilsError class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" def default_filter(src,dst): """The default progress/filter callback; returns True for all files""" return dst def unpack_archive(filename, extract_dir, progress_filter=default_filter, drivers=None): """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order. """ for driver in drivers or extraction_drivers: try: driver(filename, extract_dir, progress_filter) except UnrecognizedFormat: continue else: return else: raise UnrecognizedFormat( "Not a recognized archive type: %s" % filename ) def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % filename) paths = { filename: ('', extract_dir), } for base, dirs, files in os.walk(filename): src, dst = paths[base] for d in dirs: paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) for f in files: target = os.path.join(dst, f) target = progress_filter(src + f, target) if not target: # skip non-files continue ensure_directory(target) f = os.path.join(base, f) shutil.copyfile(f, target) shutil.copystat(f, target) def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): """Unpack zip `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ if not zipfile.is_zipfile(filename): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) with ContextualZipFile(filename) as z: for info in z.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name.split('/'): continue target = os.path.join(extract_dir, *name.split('/')) target = progress_filter(name, target) if not target: continue if name.endswith('/'): # directory ensure_directory(target) else: # file ensure_directory(target) data = z.read(info.filename) with open(target, 'wb') as f: f.write(data) unix_attributes = info.external_attr >> 16 if unix_attributes: os.chmod(target, unix_attributes) def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined by ``tarfile.open()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ try: tarobj = tarfile.open(filename) except tarfile.TarError: raise UnrecognizedFormat( "%s is not a compressed or uncompressed tar file" % (filename,) ) with contextlib.closing(tarobj): # don't do any chowning! tarobj.chown = lambda *args: None for member in tarobj: name = member.name # don't extract absolute paths or ones with .. in them if not name.startswith('/') and '..' not in name.split('/'): prelim_dst = os.path.join(extract_dir, *name.split('/')) # resolve any links and to extract the link targets as normal # files while member is not None and (member.islnk() or member.issym()): linkpath = member.linkname if member.issym(): base = posixpath.dirname(member.name) linkpath = posixpath.join(base, linkpath) linkpath = posixpath.normpath(linkpath) member = tarobj._getmember(linkpath) if member is not None and (member.isfile() or member.isdir()): final_dst = progress_filter(name, prelim_dst) if final_dst: if final_dst.endswith(os.sep): final_dst = final_dst[:-1] try: # XXX Ugh tarobj._extract_member(member, final_dst) except tarfile.ExtractError: # chown/chmod/mkfifo/mknode/makedev failed pass return True extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
mit
bharath2020/SmartHome
temperature-sensor/temperature_sensor.py
1
1403
import Adafruit_DHT import time import json import httplib import sys URL='bb-smart-home.herokuapp.com' DEVICE_NAME ='raspberry' def temperature_humidity(sensor,pin): humidity, temperature = Adafruit_DHT.read_retry(sensor,pin) if humidity is not None and temperature is not None: return [temperature,humidity] else: return [] def upload_temp(): points = temperature_humidity(22,17) if len(points) != 0: record = { 'temperature' : points[0], 'humidity' : points[1], 'device_name' : DEVICE_NAME, 'timestamp' : long(time.time()) } conn = httplib.HTTPConnection(URL) conn.request("POST",'/add',json.dumps(record),{'Content-Type':'application/json'}) response = conn.getresponse() print response.status, response.reason conn.close() def upload_current_stats(): points = temperature_humidity(22,17) if( len(points) != 0): record = { 'cur_temperature' : points[0], 'cur_humidity' : points[1], 'device_name' :DEVICE_NAME, 'timestamp' : long(time.time()) } conn = httplib.HTTPConnection(URL) conn.request("POST",'/update_stats',json.dumps(record),{'Content-Type':'application/json'}) response = conn.getresponse() print response.status, response.reason conn.close() if __name__=='__main__': operation = sys.argv[1] print(operation) if( operation=='add_reading'): upload_temp() elif( operation == 'cur_reading'): upload_current_stats()
apache-2.0
boundarydevices/android_external_chromium_org
build/android/gyp/get_device_configuration.py
103
2134
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Gets and writes the configurations of the attached devices. This configuration is used by later build steps to determine which devices to install to and what needs to be installed to those devices. """ import optparse import sys from util import build_utils from util import build_device def main(argv): parser = optparse.OptionParser() parser.add_option('--stamp', action='store') parser.add_option('--output', action='store') options, _ = parser.parse_args(argv) devices = build_device.GetAttachedDevices() device_configurations = [] for d in devices: configuration, is_online, has_root = ( build_device.GetConfigurationForDevice(d)) if not is_online: build_utils.PrintBigWarning( '%s is not online. Skipping managed install for this device. ' 'Try rebooting the device to fix this warning.' % d) continue if not has_root: build_utils.PrintBigWarning( '"adb root" failed on device: %s\n' 'Skipping managed install for this device.' % configuration['description']) continue device_configurations.append(configuration) if len(device_configurations) == 0: build_utils.PrintBigWarning( 'No valid devices attached. Skipping managed install steps.') elif len(devices) > 1: # Note that this checks len(devices) and not len(device_configurations). # This way, any time there are multiple devices attached it is # explicitly stated which device we will install things to even if all but # one device were rejected for other reasons (e.g. two devices attached, # one w/o root). build_utils.PrintBigWarning( 'Multiple devices attached. ' 'Installing to the preferred device: ' '%(id)s (%(description)s)' % (device_configurations[0])) build_device.WriteConfigurations(device_configurations, options.output) if __name__ == '__main__': sys.exit(main(sys.argv))
bsd-3-clause
Ale-/civics
apps/api/urls.py
1
1342
from django.conf.urls import url, include from apps.models.models import Initiative from . import views urlpatterns = [ url(r'^initiative$', views.initiative_service, name='get_initiative'), url(r'^initiatives$', views.initiatives_service, name='get_initiatives'), url(r'^initiatives_featured$', views.initiatives_featured_service, name='get_initiatives_featured'), url(r'^initiatives_xls$', views.initiatives_service_xls, name='get_initiatives_xls'), url(r'^initiatives_csv$', views.initiatives_service_csv, name='get_initiatives_csv'), url(r'^event$', views.event_service, name='get_event'), url(r'^event_create$', views.create_event, name='create_event'), url(r'^events_fb_id$', views.events_by_fb_id_service, name='events_by_fb_id'), url(r'^events$', views.events_service, name='get_events'), url(r'^events_featured$', views.events_featured_service, name='get_events_featured'), url(r'^events_xls$', views.events_service_xls, name='get_events_xls'), url(r'^events_csv$', views.events_service_xls, name='get_events_csv'), url(r'^autocomplete$', views.autocomplete_service, name='autocomplete'), url(r'^cities_with_initiatives$', views.cities_with_initiatives, name='cities_with_initiatives'), url(r'^cities_with_events$', views.cities_with_events, name='cities_with_events'), ]
gpl-3.0
dol-sen/gentoo-keys
gkeys/log.py
1
2035
# #-*- coding:utf-8 -*- """ Gentoo-Keys - Log.py Logging module, placeholder for our site-wide logging module @copyright: 2012 by Brian Dolbec <dol-sen> <[email protected]> @license: GNU GPL2, see COPYING for details. """ import logging import time import os NAMESPACE = 'gentoo-keys' logger = None Console_handler = None File_handler = None log_levels = { 'CRITICAL': logging.CRITICAL, 'DEBUG': logging.DEBUG, 'ERROR': logging.ERROR, 'FATAL': logging.FATAL, 'INFO': logging.INFO, 'NOTSET': logging.NOTSET, 'WARN': logging.WARN, 'WARNING':logging.WARNING, } def set_logger(namespace=None, logpath='', level=None): global logger, NAMESPACE, Console_handler, File_handler if not namespace: namespace = NAMESPACE else: NAMESPACE = namespace logger = logging.getLogger(namespace) logger.setLevel(log_levels['DEBUG']) # create formatter and add it to the handlers log_format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s' formatter = logging.Formatter(log_format) # add the handlers to logger if logpath: logname = os.path.join(logpath, '%s-%s.log' % (namespace,time.strftime('%Y%m%d-%H:%M'))) File_handler = logging.FileHandler(logname) if level: #print "Setting cli log level", level, log_levels[level] File_handler.setLevel(log_levels[level]) else: #print "Create file handler which logs even debug messages" File_handler.setLevel(log_levels['DEBUG']) File_handler.setFormatter(formatter) # create console handler with a higher log level Console_handler = logging.StreamHandler() Console_handler.setLevel(logging.ERROR) #Console_handler.setFormatter(formatter) logger.addHandler(Console_handler) logger.addHandler(File_handler) #print "File logger suppose to be initialized", logger, File_handler, Console_handler logger.debug("Loggers initialized") return logger
gpl-2.0
ljbade/libswiftnav
python/tests/test_lambda.py
1
4184
#!/usr/bin/env python # Copyright (C) 2015 Swift Navigation Inc. # Copyright (C) 2007-2008 by T.TAKASU, All rights reserved. # Contact: Bhaskar Mookerji <[email protected]> # # This source is subject to the license found in the file 'LICENSE' which must # be be distributed together with this source. All other rights reserved. # # THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, # EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. # Taken from the lambda/mlambda integer least square tests for rtklib import numpy as np import swiftnav.lambda_ as l def test_lambda1(): m = 2 x = np.array([1585184.171, -6716599.430, 3915742.905, 7627233.455, 9565990.879, 989457273.200]) sigma = np.matrix([[0.227134, 0.112202, 0.112202, 0.112202, 0.112202, 0.103473], [0.112202, 0.227134, 0.112202, 0.112202, 0.112202, 0.103473], [0.112202, 0.112202, 0.227134, 0.112202, 0.112202, 0.103473], [0.112202, 0.112202, 0.112202, 0.227134, 0.112202, 0.103473], [0.112202, 0.112202, 0.112202, 0.112202, 0.227134, 0.103473], [0.103473, 0.103473, 0.103473, 0.103473, 0.103473, 0.434339]]) F, s = l.lambda_solution_(x, sigma, m) assert np.allclose(F, np.array([[ 1.58518400e+06, 3.91574300e+06, 9.56599100e+06, 1.58518400e+06, 3.91574300e+06, 9.56599100e+06], [ -6.71659900e+06, 7.62723400e+06, 9.89457273e+08, -6.71660000e+06, 7.62723300e+06, 9.89457273e+08]])) assert np.allclose(s, np.array([ 3.50798444, 3.70845619])) def test_lambda2(): m = 2 x = np.array([-13324172.755747, -10668894.713608, -7157225.010770, -6149367.974367, -7454133.571066, -5969200.494550, 8336734.058423, 6186974.084502, -17549093.883655, -13970158.922370]) sigma = np.matrix([[0.446320, 0.223160, 0.223160, 0.223160, 0.223160, 0.572775, 0.286388, 0.286388, 0.286388, 0.286388], [0.223160, 0.446320, 0.223160, 0.223160, 0.223160, 0.286388, 0.572775, 0.286388, 0.286388, 0.286388], [0.223160, 0.223160, 0.446320, 0.223160, 0.223160, 0.286388, 0.286388, 0.572775, 0.286388, 0.286388], [0.223160, 0.223160, 0.223160, 0.446320, 0.223160, 0.286388, 0.286388, 0.286388, 0.572775, 0.286388], [0.223160, 0.223160, 0.223160, 0.223160, 0.446320, 0.286388, 0.286388, 0.286388, 0.286388, 0.572775], [0.572775, 0.286388, 0.286388, 0.286388, 0.286388, 0.735063, 0.367531, 0.367531, 0.367531, 0.367531], [0.286388, 0.572775, 0.286388, 0.286388, 0.286388, 0.367531, 0.735063, 0.367531, 0.367531, 0.367531], [0.286388, 0.286388, 0.572775, 0.286388, 0.286388, 0.367531, 0.367531, 0.735063, 0.367531, 0.367531], [0.286388, 0.286388, 0.286388, 0.572775, 0.286388, 0.367531, 0.367531, 0.367531, 0.735063, 0.367531], [0.286388, 0.286388, 0.286388, 0.286388, 0.572775, 0.367531, 0.367531, 0.367531, 0.367531, 0.735063]]) F, s = l.lambda_solution_(x, sigma, m) assert np.allclose(F, [[-13324188., -7157236.00000014, -7454143.00000017, 8336726.00000008, -17549108.00000054, -13324187.99999997, -7157236.00000013, -7454143.00000013, 8336717.00000021, -17549108.00000053], [-10668900.99999994, -6149379.00000041, -5969220., 6186959.99999982, -13970171.00000022, -10668907.99999984, -6149379.00000041, -5969219.99999996, 6186959.99999983, -13970171.00000017]]) assert np.allclose(s, np.array([ 1506.43579559, 1612.81177168]))
lgpl-3.0
thumbor-community/shortener
tc_shortener/handlers/shortener.py
1
3214
# -*- coding: utf-8 -*- # Copyright (c) 2016, thumbor-community # Use of this source code is governed by the MIT license that can be # found in the LICENSE file. import os.path import tornado.gen as gen import tornado.web import urlparse import json from thumbor.handlers.imaging import ImagingHandler from thumbor.utils import logger from tc_shortener.shortener import Shortener from tc_core.web import RequestParser class UrlShortenerHandler(ImagingHandler): should_return_image = True @classmethod def regex(cls): ''' :return: The regex used for routing. :rtype: string ''' return r'/shortener/?(?P<key>.+)?' @gen.coroutine def get(self, **kwargs): shortener = Shortener(self.context) # Get the url from the shortener and parse the values. url = yield gen.maybe_future(shortener.get(kwargs['key'])) if not url: raise tornado.web.HTTPError(404) # Patch the request uri to allow normal thumbor operations self.request.uri = urlparse.urlparse(url).path options = RequestParser.path_to_parameters(self.request.uri) name = os.path.basename(options.get('image', None)) if name: self.set_header( 'Content-Disposition', 'inline; filename="{name}"'.format( name=name ) ) # Call the original ImageHandler.get method to serve the image. super(UrlShortenerHandler, self).get(**options) @gen.coroutine def post(self, **kwargs): self.should_return_image = False content_type = self.request.headers.get("Content-Type", '') if 'key' in kwargs and kwargs['key']: url = kwargs['key'] elif content_type.startswith("application/json"): data = json.loads(self.request.body) url = data['url'] if 'url' in data else None else: url = self.get_body_argument('url', None) if not url: logger.error("Couldn't find url param in body or key in URL...") raise tornado.web.HTTPError(400) options = RequestParser.path_to_parameters(url) yield self.check_image(options) # We check the status code, if != 200 the image is incorrect, and we shouldn't store the key if self.get_status() == 200: logger.debug("Image is checked, clearing the response before trying to store...") self.clear() try: shortener = Shortener(self.context) key = shortener.generate(url) shortener.put(key, url) self.write(json.dumps({'key': key})) self.set_header("Content-Type", "application/json") except Exception as e: logger.error("An error occurred while trying to store shortened URL: {error}.".format(error=e.message)) self.set_status(500) self.write(json.dumps({'error': e.message})) @gen.coroutine def execute_image_operations(self): if self.should_return_image: super(UrlShortenerHandler, self).execute_image_operations()
mit
gannetson/django
django/middleware/security.py
510
1753
import re from django.conf import settings from django.http import HttpResponsePermanentRedirect class SecurityMiddleware(object): def __init__(self): self.sts_seconds = settings.SECURE_HSTS_SECONDS self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF self.xss_filter = settings.SECURE_BROWSER_XSS_FILTER self.redirect = settings.SECURE_SSL_REDIRECT self.redirect_host = settings.SECURE_SSL_HOST self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT] def process_request(self, request): path = request.path.lstrip("/") if (self.redirect and not request.is_secure() and not any(pattern.search(path) for pattern in self.redirect_exempt)): host = self.redirect_host or request.get_host() return HttpResponsePermanentRedirect( "https://%s%s" % (host, request.get_full_path()) ) def process_response(self, request, response): if (self.sts_seconds and request.is_secure() and 'strict-transport-security' not in response): sts_header = "max-age=%s" % self.sts_seconds if self.sts_include_subdomains: sts_header = sts_header + "; includeSubDomains" response["strict-transport-security"] = sts_header if self.content_type_nosniff and 'x-content-type-options' not in response: response["x-content-type-options"] = "nosniff" if self.xss_filter and 'x-xss-protection' not in response: response["x-xss-protection"] = "1; mode=block" return response
bsd-3-clause
Lujeni/ansible
lib/ansible/modules/network/fortios/fortios_system_proxy_arp.py
13
9662
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_proxy_arp short_description: Configure proxy-ARP in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system feature and proxy_arp category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true state: description: - Indicates whether to create or remove the object. type: str required: true choices: - present - absent system_proxy_arp: description: - Configure proxy-ARP. default: null type: dict suboptions: end_ip: description: - End IP of IP range to be proxied. type: str id: description: - Unique integer ID of the entry. required: true type: int interface: description: - Interface acting proxy-ARP. Source system.interface.name. type: str ip: description: - IP address or start IP to be proxied. type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure proxy-ARP. fortios_system_proxy_arp: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" system_proxy_arp: end_ip: "<your_own_value>" id: "4" interface: "<your_own_value> (source system.interface.name)" ip: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_proxy_arp_data(json): option_list = ['end_ip', 'id', 'interface', 'ip'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_proxy_arp(data, fos): vdom = data['vdom'] state = data['state'] system_proxy_arp_data = data['system_proxy_arp'] filtered_data = underscore_to_hyphen(filter_system_proxy_arp_data(system_proxy_arp_data)) if state == "present": return fos.set('system', 'proxy-arp', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('system', 'proxy-arp', mkey=filtered_data['id'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_system(data, fos): if data['system_proxy_arp']: resp = system_proxy_arp(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "system_proxy_arp": { "required": False, "type": "dict", "default": None, "options": { "end_ip": {"required": False, "type": "str"}, "id": {"required": True, "type": "int"}, "interface": {"required": False, "type": "str"}, "ip": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
Lujeni/ansible
test/units/modules/cloud/amazon/test_aws_api_gateway.py
13
1943
# # (c) 2016 Michael De La Rue # # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import pytest from ansible.module_utils.ec2 import HAS_BOTO3 from units.modules.utils import set_module_args if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules") import ansible.modules.cloud.amazon.aws_api_gateway as agw from ansible.module_utils.aws import core exit_return_dict = {} def fake_exit_json(self, **kwargs): """ store the kwargs given to exit_json rather than putting them out to stdout""" global exit_return_dict exit_return_dict = kwargs sys.exit(0) def test_upload_api(monkeypatch): class FakeConnection: def put_rest_api(self, *args, **kwargs): assert kwargs["body"] == "the-swagger-text-is-fake" return {"msg": "success!"} def return_fake_connection(*args, **kwargs): return FakeConnection() monkeypatch.setattr(core, "boto3_conn", return_fake_connection) monkeypatch.setattr(core.AnsibleAWSModule, "exit_json", fake_exit_json) set_module_args({ "api_id": "fred", "state": "present", "swagger_text": "the-swagger-text-is-fake", "region": 'mars-north-1', "_ansible_tmpdir": "/tmp/ansibl-abcdef", }) with pytest.raises(SystemExit): agw.main() assert exit_return_dict["changed"] def test_warn_if_region_not_specified(): set_module_args({ "name": "aws_api_gateway", "state": "present", "runtime": 'python2.7', "role": 'arn:aws:iam::987654321012:role/lambda_basic_execution', "handler": 'lambda_python.my_handler'}) with pytest.raises(SystemExit): print(agw.main())
gpl-3.0
lorentey/swift
utils/build_swift/argparse/__init__.py
23
1658
# This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors """ Wrapper module around the standard argparse that extends the default functionality with support for multi-destination actions, an expressive DSL for constructing parsers and more argument types. This module exposes a strict super-set of the argparse API and is meant to be used as a drop-in replacement. """ from argparse import (ArgumentDefaultsHelpFormatter, ArgumentError, ArgumentTypeError, FileType, HelpFormatter, Namespace, RawDescriptionHelpFormatter, RawTextHelpFormatter) from argparse import ONE_OR_MORE, OPTIONAL, SUPPRESS, ZERO_OR_MORE from .actions import Action, Nargs from .parser import ArgumentParser from .types import (BoolType, ClangVersionType, CompilerVersion, PathType, RegexType, ShellSplitType, SwiftVersionType) __all__ = [ 'Action', 'ArgumentDefaultsHelpFormatter', 'ArgumentError', 'ArgumentParser', 'ArgumentTypeError', 'HelpFormatter', 'Namespace', 'Nargs', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'CompilerVersion', 'BoolType', 'FileType', 'PathType', 'RegexType', 'ClangVersionType', 'SwiftVersionType', 'ShellSplitType', 'SUPPRESS', 'OPTIONAL', 'ZERO_OR_MORE', 'ONE_OR_MORE', ]
apache-2.0
acshi/osf.io
addons/github/tests/utils.py
16
9354
import mock import github3 from addons.github.api import GitHubClient from github3.repos.branch import Branch from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase from addons.github.models import GitHubProvider from addons.github.tests.factories import GitHubAccountFactory class GitHubAddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase): ADDON_SHORT_NAME = 'github' ExternalAccountFactory = GitHubAccountFactory Provider = GitHubProvider def set_node_settings(self, settings): super(GitHubAddonTestCase, self).set_node_settings(settings) settings.repo = 'abc' settings.user = 'octo-cat' settings.save() # TODO: allow changing the repo name def create_mock_github(user='octo-cat', private=False): """Factory for mock GitHub objects. Example: :: >>> github = create_mock_github(user='octocat') >>> github.branches(user='octocat', repo='hello-world') >>> [{u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'}, ... u'name': u'dev'}, ... {u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'}, ... u'name': u'master'}, ... {u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'}, ... u'name': u'no-bundle'}] :param str user: Github username. :param bool private: Whether repo is private. :return: An autospecced GitHub Mock object """ github_mock = mock.create_autospec(GitHubClient) github_mock.repo.return_value = github3.repos.Repository.from_json({ u'archive_url': u'https://api.github.com/repos/{user}/mock-repo/{{archive_format}}{{/ref}}'.format(user=user), u'assignees_url': u'https://api.github.com/repos/{user}/mock-repo/assignees{{/user}}'.format(user=user), u'blobs_url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs{{/sha}}'.format(user=user), u'branches_url': u'https://api.github.com/repos/{user}/mock-repo/branches{{/bra.format(user=user)nch}}'.format(user=user), u'clone_url': u'https://github.com/{user}/mock-repo.git'.format(user=user), u'collaborators_url': u'https://api.github.com/repos/{user}/mock-repo/collaborators{{/collaborator}}'.format(user=user), u'comments_url': u'https://api.github.com/repos/{user}/mock-repo/comments{{/number}}'.format(user=user), u'commits_url': u'https://api.github.com/repos/{user}/mock-repo/commits{{/sha}}'.format(user=user), u'compare_url': u'https://api.github.com/repos/{user}/mock-repo/compare/{{base}}...{{head}}', u'contents_url': u'https://api.github.com/repos/{user}/mock-repo/contents/{{+path}}'.format(user=user), u'contributors_url': u'https://api.github.com/repos/{user}/mock-repo/contributors'.format(user=user), u'created_at': u'2013-06-30T18:29:18Z', u'default_branch': u'dev', u'description': u'Simple, Pythonic, text processing--Sentiment analysis, part-of-speech tagging, noun phrase extraction, translation, and more.', u'downloads_url': u'https://api.github.com/repos/{user}/mock-repo/downloads'.format(user=user), u'events_url': u'https://api.github.com/repos/{user}/mock-repo/events'.format(user=user), u'fork': False, u'forks': 89, u'forks_count': 89, u'forks_url': u'https://api.github.com/repos/{user}/mock-repo/forks', u'full_name': u'{user}/mock-repo', u'git_commits_url': u'https://api.github.com/repos/{user}/mock-repo/git/commits{{/sha}}'.format(user=user), u'git_refs_url': u'https://api.github.com/repos/{user}/mock-repo/git/refs{{/sha}}'.format(user=user), u'git_tags_url': u'https://api.github.com/repos/{user}/mock-repo/git/tags{{/sha}}'.format(user=user), u'git_url': u'git://github.com/{user}/mock-repo.git'.format(user=user), u'has_downloads': True, u'has_issues': True, u'has_wiki': True, u'homepage': u'https://mock-repo.readthedocs.org/', u'hooks_url': u'https://api.github.com/repos/{user}/mock-repo/hooks'.format(user=user), u'html_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'id': 11075275, u'issue_comment_url': u'https://api.github.com/repos/{user}/mock-repo/issues/comments/{{number}}'.format(user=user), u'issue_events_url': u'https://api.github.com/repos/{user}/mock-repo/issues/events{{/number}}'.format(user=user), u'issues_url': u'https://api.github.com/repos/{user}/mock-repo/issues{{/number}}'.format(user=user), u'keys_url': u'https://api.github.com/repos/{user}/mock-repo/keys{{/key_id}}'.format(user=user), u'labels_url': u'https://api.github.com/repos/{user}/mock-repo/labels{{/name}}'.format(user=user), u'language': u'Python', u'languages_url': u'https://api.github.com/repos/{user}/mock-repo/languages'.format(user=user), u'master_branch': u'dev', u'merges_url': u'https://api.github.com/repos/{user}/mock-repo/merges'.format(user=user), u'milestones_url': u'https://api.github.com/repos/{user}/mock-repo/milestones{{/number}}'.format(user=user), u'mirror_url': None, u'name': u'mock-repo', u'network_count': 89, u'notifications_url': u'https://api.github.com/repos/{user}/mock-repo/notifications{{?since,all,participating}}'.format(user=user), u'open_issues': 2, u'open_issues_count': 2, u'owner': {u'avatar_url': u'https://gravatar.com/avatar/c74f9cfd7776305a82ede0b765d65402?d=https%3A%2F%2Fidenticons.github.com%2F3959fe3bcd263a12c28ae86a66ec75ef.png&r=x', u'events_url': u'https://api.github.com/users/{user}/events{{/privacy}}'.format(user=user), u'followers_url': u'https://api.github.com/users/{user}/followers'.format(user=user), u'following_url': u'https://api.github.com/users/{user}/following{{/other_user}}'.format(user=user), u'gists_url': u'https://api.github.com/users/{user}/gists{{/gist_id}}'.format(user=user), u'gravatar_id': u'c74f9cfd7776305a82ede0b765d65402', u'html_url': u'https://github.com/{user}'.format(user=user), u'id': 2379650, u'login': user, u'organizations_url': u'https://api.github.com/users/{user}/orgs'.format(user=user), u'received_events_url': u'https://api.github.com/users/{user}/received_events', u'repos_url': u'https://api.github.com/users/{user}/repos'.format(user=user), u'site_admin': False, u'starred_url': u'https://api.github.com/users/{user}/starred{{/owner}}{{/repo}}', u'subscriptions_url': u'https://api.github.com/users/{user}/subscriptions'.format(user=user), u'type': u'User', u'url': u'https://api.github.com/users/{user}'.format(user=user)}, u'private': private, u'pulls_url': u'https://api.github.com/repos/{user}/mock-repo/pulls{{/number}}'.format(user=user), u'pushed_at': u'2013-12-30T16:05:54Z', u'releases_url': u'https://api.github.com/repos/{user}/mock-repo/releases{{/id}}'.format(user=user), u'size': 8717, u'ssh_url': u'[email protected]:{user}/mock-repo.git'.format(user=user), u'stargazers_count': 1469, u'stargazers_url': u'https://api.github.com/repos/{user}/mock-repo/stargazers'.format(user=user), u'statuses_url': u'https://api.github.com/repos/{user}/mock-repo/statuses/{{sha}}'.format(user=user), u'subscribers_count': 86, u'subscribers_url': u'https://api.github.com/repos/{user}/mock-repo/subscribers'.format(user=user), u'subscription_url': u'https://api.github.com/repos/{user}/mock-repo/subscription'.format(user=user), u'svn_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'tags_url': u'https://api.github.com/repos/{user}/mock-repo/tags'.format(user=user), u'teams_url': u'https://api.github.com/repos/{user}/mock-repo/teams'.format(user=user), u'trees_url': u'https://api.github.com/repos/{user}/mock-repo/git/trees{{/sha}}'.format(user=user), u'updated_at': u'2014-01-12T21:23:50Z', u'url': u'https://api.github.com/repos/{user}/mock-repo'.format(user=user), u'watchers': 1469, u'watchers_count': 1469, # NOTE: permissions are only available if authorized on the repo 'permissions': { 'push': True } }) github_mock.branches.return_value = [ Branch.from_json({u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'.format(user=user)}, u'name': u'dev'}), Branch.from_json({u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'.format(user=user)}, u'name': u'master'}), Branch.from_json({u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'.format(user=user)}, u'name': u'no-bundle'}) ] return github_mock
apache-2.0
bonitadecker77/python-for-android
python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/cipherfactory.py
357
3177
"""Factory functions for symmetric cryptography.""" import os import Python_AES import Python_RC4 import cryptomath tripleDESPresent = False if cryptomath.m2cryptoLoaded: import OpenSSL_AES import OpenSSL_RC4 import OpenSSL_TripleDES tripleDESPresent = True if cryptomath.cryptlibpyLoaded: import Cryptlib_AES import Cryptlib_RC4 import Cryptlib_TripleDES tripleDESPresent = True if cryptomath.pycryptoLoaded: import PyCrypto_AES import PyCrypto_RC4 import PyCrypto_TripleDES tripleDESPresent = True # ************************************************************************** # Factory Functions for AES # ************************************************************************** def createAES(key, IV, implList=None): """Create a new AES object. @type key: str @param key: A 16, 24, or 32 byte string. @type IV: str @param IV: A 16 byte string @rtype: L{tlslite.utils.AES} @return: An AES object. """ if implList == None: implList = ["cryptlib", "openssl", "pycrypto", "python"] for impl in implList: if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: return Cryptlib_AES.new(key, 2, IV) elif impl == "openssl" and cryptomath.m2cryptoLoaded: return OpenSSL_AES.new(key, 2, IV) elif impl == "pycrypto" and cryptomath.pycryptoLoaded: return PyCrypto_AES.new(key, 2, IV) elif impl == "python": return Python_AES.new(key, 2, IV) raise NotImplementedError() def createRC4(key, IV, implList=None): """Create a new RC4 object. @type key: str @param key: A 16 to 32 byte string. @type IV: object @param IV: Ignored, whatever it is. @rtype: L{tlslite.utils.RC4} @return: An RC4 object. """ if implList == None: implList = ["cryptlib", "openssl", "pycrypto", "python"] if len(IV) != 0: raise AssertionError() for impl in implList: if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: return Cryptlib_RC4.new(key) elif impl == "openssl" and cryptomath.m2cryptoLoaded: return OpenSSL_RC4.new(key) elif impl == "pycrypto" and cryptomath.pycryptoLoaded: return PyCrypto_RC4.new(key) elif impl == "python": return Python_RC4.new(key) raise NotImplementedError() #Create a new TripleDES instance def createTripleDES(key, IV, implList=None): """Create a new 3DES object. @type key: str @param key: A 24 byte string. @type IV: str @param IV: An 8 byte string @rtype: L{tlslite.utils.TripleDES} @return: A 3DES object. """ if implList == None: implList = ["cryptlib", "openssl", "pycrypto"] for impl in implList: if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: return Cryptlib_TripleDES.new(key, 2, IV) elif impl == "openssl" and cryptomath.m2cryptoLoaded: return OpenSSL_TripleDES.new(key, 2, IV) elif impl == "pycrypto" and cryptomath.pycryptoLoaded: return PyCrypto_TripleDES.new(key, 2, IV) raise NotImplementedError()
apache-2.0
dennisss/sympy
sympy/mpmath/libmp/libmpi.py
18
27592
""" Computational functions for interval arithmetic. """ from .backend import xrange from .libmpf import ( ComplexResult, round_down, round_up, round_floor, round_ceiling, round_nearest, prec_to_dps, repr_dps, dps_to_prec, bitcount, from_float, fnan, finf, fninf, fzero, fhalf, fone, fnone, mpf_sign, mpf_lt, mpf_le, mpf_gt, mpf_ge, mpf_eq, mpf_cmp, mpf_min_max, mpf_floor, from_int, to_int, to_str, from_str, mpf_abs, mpf_neg, mpf_pos, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, mpf_div, mpf_shift, mpf_pow_int, from_man_exp, MPZ_ONE) from .libelefun import ( mpf_log, mpf_exp, mpf_sqrt, mpf_atan, mpf_atan2, mpf_pi, mod_pi2, mpf_cos_sin ) from .gammazeta import mpf_gamma, mpf_rgamma, mpf_loggamma, mpc_loggamma def mpi_str(s, prec): sa, sb = s dps = prec_to_dps(prec) + 5 return "[%s, %s]" % (to_str(sa, dps), to_str(sb, dps)) #dps = prec_to_dps(prec) #m = mpi_mid(s, prec) #d = mpf_shift(mpi_delta(s, 20), -1) #return "%s +/- %s" % (to_str(m, dps), to_str(d, 3)) mpi_zero = (fzero, fzero) mpi_one = (fone, fone) def mpi_eq(s, t): return s == t def mpi_ne(s, t): return s != t def mpi_lt(s, t): sa, sb = s ta, tb = t if mpf_lt(sb, ta): return True if mpf_ge(sa, tb): return False return None def mpi_le(s, t): sa, sb = s ta, tb = t if mpf_le(sb, ta): return True if mpf_gt(sa, tb): return False return None def mpi_gt(s, t): return mpi_lt(t, s) def mpi_ge(s, t): return mpi_le(t, s) def mpi_add(s, t, prec=0): sa, sb = s ta, tb = t a = mpf_add(sa, ta, prec, round_floor) b = mpf_add(sb, tb, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = finf return a, b def mpi_sub(s, t, prec=0): sa, sb = s ta, tb = t a = mpf_sub(sa, tb, prec, round_floor) b = mpf_sub(sb, ta, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = finf return a, b def mpi_delta(s, prec): sa, sb = s return mpf_sub(sb, sa, prec, round_up) def mpi_mid(s, prec): sa, sb = s return mpf_shift(mpf_add(sa, sb, prec, round_nearest), -1) def mpi_pos(s, prec): sa, sb = s a = mpf_pos(sa, prec, round_floor) b = mpf_pos(sb, prec, round_ceiling) return a, b def mpi_neg(s, prec=0): sa, sb = s a = mpf_neg(sb, prec, round_floor) b = mpf_neg(sa, prec, round_ceiling) return a, b def mpi_abs(s, prec=0): sa, sb = s sas = mpf_sign(sa) sbs = mpf_sign(sb) # Both points nonnegative? if sas >= 0: a = mpf_pos(sa, prec, round_floor) b = mpf_pos(sb, prec, round_ceiling) # Upper point nonnegative? elif sbs >= 0: a = fzero negsa = mpf_neg(sa) if mpf_lt(negsa, sb): b = mpf_pos(sb, prec, round_ceiling) else: b = mpf_pos(negsa, prec, round_ceiling) # Both negative? else: a = mpf_neg(sb, prec, round_floor) b = mpf_neg(sa, prec, round_ceiling) return a, b # TODO: optimize def mpi_mul_mpf(s, t, prec): return mpi_mul(s, (t, t), prec) def mpi_div_mpf(s, t, prec): return mpi_div(s, (t, t), prec) def mpi_mul(s, t, prec=0): sa, sb = s ta, tb = t sas = mpf_sign(sa) sbs = mpf_sign(sb) tas = mpf_sign(ta) tbs = mpf_sign(tb) if sas == sbs == 0: # Should maybe be undefined if ta == fninf or tb == finf: return fninf, finf return fzero, fzero if tas == tbs == 0: # Should maybe be undefined if sa == fninf or sb == finf: return fninf, finf return fzero, fzero if sas >= 0: # positive * positive if tas >= 0: a = mpf_mul(sa, ta, prec, round_floor) b = mpf_mul(sb, tb, prec, round_ceiling) if a == fnan: a = fzero if b == fnan: b = finf # positive * negative elif tbs <= 0: a = mpf_mul(sb, ta, prec, round_floor) b = mpf_mul(sa, tb, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = fzero # positive * both signs else: a = mpf_mul(sb, ta, prec, round_floor) b = mpf_mul(sb, tb, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = finf elif sbs <= 0: # negative * positive if tas >= 0: a = mpf_mul(sa, tb, prec, round_floor) b = mpf_mul(sb, ta, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = fzero # negative * negative elif tbs <= 0: a = mpf_mul(sb, tb, prec, round_floor) b = mpf_mul(sa, ta, prec, round_ceiling) if a == fnan: a = fzero if b == fnan: b = finf # negative * both signs else: a = mpf_mul(sa, tb, prec, round_floor) b = mpf_mul(sa, ta, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = finf else: # General case: perform all cross-multiplications and compare # Since the multiplications can be done exactly, we need only # do 4 (instead of 8: two for each rounding mode) cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)] if fnan in cases: a, b = (fninf, finf) else: a, b = mpf_min_max(cases) a = mpf_pos(a, prec, round_floor) b = mpf_pos(b, prec, round_ceiling) return a, b def mpi_square(s, prec=0): sa, sb = s if mpf_ge(sa, fzero): a = mpf_mul(sa, sa, prec, round_floor) b = mpf_mul(sb, sb, prec, round_ceiling) elif mpf_le(sb, fzero): a = mpf_mul(sb, sb, prec, round_floor) b = mpf_mul(sa, sa, prec, round_ceiling) else: sa = mpf_neg(sa) sa, sb = mpf_min_max([sa, sb]) a = fzero b = mpf_mul(sb, sb, prec, round_ceiling) return a, b def mpi_div(s, t, prec): sa, sb = s ta, tb = t sas = mpf_sign(sa) sbs = mpf_sign(sb) tas = mpf_sign(ta) tbs = mpf_sign(tb) # 0 / X if sas == sbs == 0: # 0 / <interval containing 0> if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0): return fninf, finf return fzero, fzero # Denominator contains both negative and positive numbers; # this should properly be a multi-interval, but the closest # match is the entire (extended) real line if tas < 0 and tbs > 0: return fninf, finf # Assume denominator to be nonnegative if tas < 0: return mpi_div(mpi_neg(s), mpi_neg(t), prec) # Division by zero # XXX: make sure all results make sense if tas == 0: # Numerator contains both signs? if sas < 0 and sbs > 0: return fninf, finf if tas == tbs: return fninf, finf # Numerator positive? if sas >= 0: a = mpf_div(sa, tb, prec, round_floor) b = finf if sbs <= 0: a = fninf b = mpf_div(sb, tb, prec, round_ceiling) # Division with positive denominator # We still have to handle nans resulting from inf/0 or inf/inf else: # Nonnegative numerator if sas >= 0: a = mpf_div(sa, tb, prec, round_floor) b = mpf_div(sb, ta, prec, round_ceiling) if a == fnan: a = fzero if b == fnan: b = finf # Nonpositive numerator elif sbs <= 0: a = mpf_div(sa, ta, prec, round_floor) b = mpf_div(sb, tb, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = fzero # Numerator contains both signs? else: a = mpf_div(sa, ta, prec, round_floor) b = mpf_div(sb, ta, prec, round_ceiling) if a == fnan: a = fninf if b == fnan: b = finf return a, b def mpi_pi(prec): a = mpf_pi(prec, round_floor) b = mpf_pi(prec, round_ceiling) return a, b def mpi_exp(s, prec): sa, sb = s # exp is monotonic a = mpf_exp(sa, prec, round_floor) b = mpf_exp(sb, prec, round_ceiling) return a, b def mpi_log(s, prec): sa, sb = s # log is monotonic a = mpf_log(sa, prec, round_floor) b = mpf_log(sb, prec, round_ceiling) return a, b def mpi_sqrt(s, prec): sa, sb = s # sqrt is monotonic a = mpf_sqrt(sa, prec, round_floor) b = mpf_sqrt(sb, prec, round_ceiling) return a, b def mpi_atan(s, prec): sa, sb = s a = mpf_atan(sa, prec, round_floor) b = mpf_atan(sb, prec, round_ceiling) return a, b def mpi_pow_int(s, n, prec): sa, sb = s if n < 0: return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec) if n == 0: return (fone, fone) if n == 1: return s if n == 2: return mpi_square(s, prec) # Odd -- signs are preserved if n & 1: a = mpf_pow_int(sa, n, prec, round_floor) b = mpf_pow_int(sb, n, prec, round_ceiling) # Even -- important to ensure positivity else: sas = mpf_sign(sa) sbs = mpf_sign(sb) # Nonnegative? if sas >= 0: a = mpf_pow_int(sa, n, prec, round_floor) b = mpf_pow_int(sb, n, prec, round_ceiling) # Nonpositive? elif sbs <= 0: a = mpf_pow_int(sb, n, prec, round_floor) b = mpf_pow_int(sa, n, prec, round_ceiling) # Mixed signs? else: a = fzero # max(-a,b)**n sa = mpf_neg(sa) if mpf_ge(sa, sb): b = mpf_pow_int(sa, n, prec, round_ceiling) else: b = mpf_pow_int(sb, n, prec, round_ceiling) return a, b def mpi_pow(s, t, prec): ta, tb = t if ta == tb and ta not in (finf, fninf): if ta == from_int(to_int(ta)): return mpi_pow_int(s, to_int(ta), prec) if ta == fhalf: return mpi_sqrt(s, prec) u = mpi_log(s, prec + 20) v = mpi_mul(u, t, prec + 20) return mpi_exp(v, prec) def MIN(x, y): if mpf_le(x, y): return x return y def MAX(x, y): if mpf_ge(x, y): return x return y def cos_sin_quadrant(x, wp): sign, man, exp, bc = x if x == fzero: return fone, fzero, 0 # TODO: combine evaluation code to avoid duplicate modulo c, s = mpf_cos_sin(x, wp) t, n, wp_ = mod_pi2(man, exp, exp+bc, 15) if sign: n = -1-n return c, s, n def mpi_cos_sin(x, prec): a, b = x if a == b == fzero: return (fone, fone), (fzero, fzero) # Guaranteed to contain both -1 and 1 if (finf in x) or (fninf in x): return (fnone, fone), (fnone, fone) wp = prec + 20 ca, sa, na = cos_sin_quadrant(a, wp) cb, sb, nb = cos_sin_quadrant(b, wp) ca, cb = mpf_min_max([ca, cb]) sa, sb = mpf_min_max([sa, sb]) # Both functions are monotonic within one quadrant if na == nb: pass # Guaranteed to contain both -1 and 1 elif nb - na >= 4: return (fnone, fone), (fnone, fone) else: # cos has maximum between a and b if na//4 != nb//4: cb = fone # cos has minimum if (na-2)//4 != (nb-2)//4: ca = fnone # sin has maximum if (na-1)//4 != (nb-1)//4: sb = fone # sin has minimum if (na-3)//4 != (nb-3)//4: sa = fnone # Perturb to force interval rounding more = from_man_exp((MPZ_ONE<<wp) + (MPZ_ONE<<10), -wp) less = from_man_exp((MPZ_ONE<<wp) - (MPZ_ONE<<10), -wp) def finalize(v, rounding): if bool(v[0]) == (rounding == round_floor): p = more else: p = less v = mpf_mul(v, p, prec, rounding) sign, man, exp, bc = v if exp+bc >= 1: if sign: return fnone return fone return v ca = finalize(ca, round_floor) cb = finalize(cb, round_ceiling) sa = finalize(sa, round_floor) sb = finalize(sb, round_ceiling) return (ca,cb), (sa,sb) def mpi_cos(x, prec): return mpi_cos_sin(x, prec)[0] def mpi_sin(x, prec): return mpi_cos_sin(x, prec)[1] def mpi_tan(x, prec): cos, sin = mpi_cos_sin(x, prec+20) return mpi_div(sin, cos, prec) def mpi_cot(x, prec): cos, sin = mpi_cos_sin(x, prec+20) return mpi_div(cos, sin, prec) def mpi_from_str_a_b(x, y, percent, prec): wp = prec + 20 xa = from_str(x, wp, round_floor) xb = from_str(x, wp, round_ceiling) #ya = from_str(y, wp, round_floor) y = from_str(y, wp, round_ceiling) assert mpf_ge(y, fzero) if percent: y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling) y = mpf_div(y, from_int(100), wp, round_ceiling) a = mpf_sub(xa, y, prec, round_floor) b = mpf_add(xb, y, prec, round_ceiling) return a, b def mpi_from_str(s, prec): """ Parse an interval number given as a string. Allowed forms are "-1.23e-27" Any single decimal floating-point literal. "a +- b" or "a (b)" a is the midpoint of the interval and b is the half-width "a +- b%" or "a (b%)" a is the midpoint of the interval and the half-width is b percent of a (`a \times b / 100`). "[a, b]" The interval indicated directly. "x[y,z]e" x are shared digits, y and z are unequal digits, e is the exponent. """ e = ValueError("Improperly formed interval number '%s'" % s) s = s.replace(" ", "") wp = prec + 20 if "+-" in s: x, y = s.split("+-") return mpi_from_str_a_b(x, y, False, prec) # case 2 elif "(" in s: # Don't confuse with a complex number (x,y) if s[0] == "(" or ")" not in s: raise e s = s.replace(")", "") percent = False if "%" in s: if s[-1] != "%": raise e percent = True s = s.replace("%", "") x, y = s.split("(") return mpi_from_str_a_b(x, y, percent, prec) elif "," in s: if ('[' not in s) or (']' not in s): raise e if s[0] == '[': # case 3 s = s.replace("[", "") s = s.replace("]", "") a, b = s.split(",") a = from_str(a, prec, round_floor) b = from_str(b, prec, round_ceiling) return a, b else: # case 4 x, y = s.split('[') y, z = y.split(',') if 'e' in s: z, e = z.split(']') else: z, e = z.rstrip(']'), '' a = from_str(x+y+e, prec, round_floor) b = from_str(x+z+e, prec, round_ceiling) return a, b else: a = from_str(s, prec, round_floor) b = from_str(s, prec, round_ceiling) return a, b def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs): """ Convert a mpi interval to a string. **Arguments** *dps* decimal places to use for printing *use_spaces* use spaces for more readable output, defaults to true *brackets* pair of strings (or two-character string) giving left and right brackets *mode* mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff' *error_dps* limit the error to *error_dps* digits (mode 'plusminus and 'percent') Additional keyword arguments are forwarded to the mpf-to-string conversion for the components of the output. **Examples** >>> from sympy.mpmath import mpi, mp >>> mp.dps = 30 >>> x = mpi(1, 2) >>> mpi_to_str(x, mode='plusminus') '1.5 +- 5.0e-1' >>> mpi_to_str(x, mode='percent') '1.5 (33.33%)' >>> mpi_to_str(x, mode='brackets') '[1.0, 2.0]' >>> mpi_to_str(x, mode='brackets' , brackets=('<', '>')) '<1.0, 2.0>' >>> x = mpi('5.2582327113062393041', '5.2582327113062749951') >>> mpi_to_str(x, mode='diff') '5.2582327113062[4, 7]' >>> mpi_to_str(mpi(0), mode='percent') '0.0 (0%)' """ prec = dps_to_prec(dps) wp = prec + 20 a, b = x mid = mpi_mid(x, prec) delta = mpi_delta(x, prec) a_str = to_str(a, dps, **kwargs) b_str = to_str(b, dps, **kwargs) mid_str = to_str(mid, dps, **kwargs) sp = "" if use_spaces: sp = " " br1, br2 = brackets if mode == 'plusminus': delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs) s = mid_str + sp + "+-" + sp + delta_str elif mode == 'percent': if mid == fzero: p = fzero else: # p = 100 * delta(x) / (2*mid(x)) p = mpf_mul(delta, from_int(100)) p = mpf_div(p, mpf_mul(mid, from_int(2)), wp) s = mid_str + sp + "(" + to_str(p, error_dps) + "%)" elif mode == 'brackets': s = br1 + a_str + "," + sp + b_str + br2 elif mode == 'diff': # use more digits if str(x.a) and str(x.b) are equal if a_str == b_str: a_str = to_str(a, dps+3, **kwargs) b_str = to_str(b, dps+3, **kwargs) # separate mantissa and exponent a = a_str.split('e') if len(a) == 1: a.append('') b = b_str.split('e') if len(b) == 1: b.append('') if a[1] == b[1]: if a[0] != b[0]: for i in xrange(len(a[0]) + 1): if a[0][i] != b[0][i]: break s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2 + 'e'*min(len(a[1]), 1) + a[1]) else: # no difference s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1] else: s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2 else: raise ValueError("'%s' is unknown mode for printing mpi" % mode) return s def mpci_add(x, y, prec): a, b = x c, d = y return mpi_add(a, c, prec), mpi_add(b, d, prec) def mpci_sub(x, y, prec): a, b = x c, d = y return mpi_sub(a, c, prec), mpi_sub(b, d, prec) def mpci_neg(x, prec=0): a, b = x return mpi_neg(a, prec), mpi_neg(b, prec) def mpci_pos(x, prec): a, b = x return mpi_pos(a, prec), mpi_pos(b, prec) def mpci_mul(x, y, prec): # TODO: optimize for real/imag cases a, b = x c, d = y r1 = mpi_mul(a,c) r2 = mpi_mul(b,d) re = mpi_sub(r1,r2,prec) i1 = mpi_mul(a,d) i2 = mpi_mul(b,c) im = mpi_add(i1,i2,prec) return re, im def mpci_div(x, y, prec): # TODO: optimize for real/imag cases a, b = x c, d = y wp = prec+20 m1 = mpi_square(c) m2 = mpi_square(d) m = mpi_add(m1,m2,wp) re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp) im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp) re = mpi_div(re, m, prec) im = mpi_div(im, m, prec) return re, im def mpci_exp(x, prec): a, b = x wp = prec+20 r = mpi_exp(a, wp) c, s = mpi_cos_sin(b, wp) a = mpi_mul(r, c, prec) b = mpi_mul(r, s, prec) return a, b def mpi_shift(x, n): a, b = x return mpf_shift(a,n), mpf_shift(b,n) def mpi_cosh_sinh(x, prec): # TODO: accuracy for small x wp = prec+20 e1 = mpi_exp(x, wp) e2 = mpi_div(mpi_one, e1, wp) c = mpi_add(e1, e2, prec) s = mpi_sub(e1, e2, prec) c = mpi_shift(c, -1) s = mpi_shift(s, -1) return c, s def mpci_cos(x, prec): a, b = x wp = prec+10 c, s = mpi_cos_sin(a, wp) ch, sh = mpi_cosh_sinh(b, wp) re = mpi_mul(c, ch, prec) im = mpi_mul(s, sh, prec) return re, mpi_neg(im) def mpci_sin(x, prec): a, b = x wp = prec+10 c, s = mpi_cos_sin(a, wp) ch, sh = mpi_cosh_sinh(b, wp) re = mpi_mul(s, ch, prec) im = mpi_mul(c, sh, prec) return re, im def mpci_abs(x, prec): a, b = x if a == mpi_zero: return mpi_abs(b) if b == mpi_zero: return mpi_abs(a) # Important: nonnegative a = mpi_square(a) b = mpi_square(b) t = mpi_add(a, b, prec+20) return mpi_sqrt(t, prec) def mpi_atan2(y, x, prec): ya, yb = y xa, xb = x # Constrained to the real line if ya == yb == fzero: if mpf_ge(xa, fzero): return mpi_zero return mpi_pi(prec) # Right half-plane if mpf_ge(xa, fzero): if mpf_ge(ya, fzero): a = mpf_atan2(ya, xb, prec, round_floor) else: a = mpf_atan2(ya, xa, prec, round_floor) if mpf_ge(yb, fzero): b = mpf_atan2(yb, xa, prec, round_ceiling) else: b = mpf_atan2(yb, xb, prec, round_ceiling) # Upper half-plane elif mpf_ge(ya, fzero): b = mpf_atan2(ya, xa, prec, round_ceiling) if mpf_le(xb, fzero): a = mpf_atan2(yb, xb, prec, round_floor) else: a = mpf_atan2(ya, xb, prec, round_floor) # Lower half-plane elif mpf_le(yb, fzero): a = mpf_atan2(yb, xa, prec, round_floor) if mpf_le(xb, fzero): b = mpf_atan2(ya, xb, prec, round_ceiling) else: b = mpf_atan2(yb, xb, prec, round_ceiling) # Covering the origin else: b = mpf_pi(prec, round_ceiling) a = mpf_neg(b) return a, b def mpci_arg(z, prec): x, y = z return mpi_atan2(y, x, prec) def mpci_log(z, prec): x, y = z re = mpi_log(mpci_abs(z, prec+20), prec) im = mpci_arg(z, prec) return re, im def mpci_pow(x, y, prec): # TODO: recognize/speed up real cases, integer y yre, yim = y if yim == mpi_zero: ya, yb = yre if ya == yb: sign, man, exp, bc = yb if man and exp >= 0: return mpci_pow_int(x, (-1)**sign * int(man<<exp), prec) # x^0 if yb == fzero: return mpci_pow_int(x, 0, prec) wp = prec+20 return mpci_exp(mpci_mul(y, mpci_log(x, wp), wp), prec) def mpci_square(x, prec): a, b = x # (a+bi)^2 = (a^2-b^2) + 2abi re = mpi_sub(mpi_square(a), mpi_square(b), prec) im = mpi_mul(a, b, prec) im = mpi_shift(im, 1) return re, im def mpci_pow_int(x, n, prec): if n < 0: return mpci_div((mpi_one,mpi_zero), mpci_pow_int(x, -n, prec+20), prec) if n == 0: return mpi_one, mpi_zero if n == 1: return mpci_pos(x, prec) if n == 2: return mpci_square(x, prec) wp = prec + 20 result = (mpi_one, mpi_zero) while n: if n & 1: result = mpci_mul(result, x, wp) n -= 1 x = mpci_square(x, wp) n >>= 1 return mpci_pos(result, prec) gamma_min_a = from_float(1.46163214496) gamma_min_b = from_float(1.46163214497) gamma_min = (gamma_min_a, gamma_min_b) gamma_mono_imag_a = from_float(-1.1) gamma_mono_imag_b = from_float(1.1) def mpi_overlap(x, y): a, b = x c, d = y if mpf_lt(d, a): return False if mpf_gt(c, b): return False return True # type = 0 -- gamma # type = 1 -- factorial # type = 2 -- 1/gamma # type = 3 -- log-gamma def mpi_gamma(z, prec, type=0): a, b = z wp = prec+20 if type == 1: return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0) # increasing if mpf_gt(a, gamma_min_b): if type == 0: c = mpf_gamma(a, prec, round_floor) d = mpf_gamma(b, prec, round_ceiling) elif type == 2: c = mpf_rgamma(b, prec, round_floor) d = mpf_rgamma(a, prec, round_ceiling) elif type == 3: c = mpf_loggamma(a, prec, round_floor) d = mpf_loggamma(b, prec, round_ceiling) # decreasing elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a): if type == 0: c = mpf_gamma(b, prec, round_floor) d = mpf_gamma(a, prec, round_ceiling) elif type == 2: c = mpf_rgamma(a, prec, round_floor) d = mpf_rgamma(b, prec, round_ceiling) elif type == 3: c = mpf_loggamma(b, prec, round_floor) d = mpf_loggamma(a, prec, round_ceiling) else: # TODO: reflection formula znew = mpi_add(z, mpi_one, wp) if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec) if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec) if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec) return c, d def mpci_gamma(z, prec, type=0): (a1,a2), (b1,b2) = z # Real case if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)): return mpi_gamma(z, prec, type), mpi_zero # Estimate precision wp = prec+20 if type != 3: amag = a2[2]+a2[3] bmag = b2[2]+b2[3] if a2 != fzero: mag = max(amag, bmag) else: mag = bmag an = abs(to_int(a2)) bn = abs(to_int(b2)) absn = max(an, bn) gamma_size = max(0,absn*mag) wp += bitcount(gamma_size) # Assume type != 1 if type == 1: (a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2) type = 0 # Avoid non-monotonic region near the negative real axis if mpf_lt(a1, gamma_min_b): if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)): # TODO: reflection formula #if mpf_lt(a2, mpf_shift(fone,-1)): # znew = mpci_sub((mpi_one,mpi_zero),z,wp) # ... # Recurrence: # gamma(z) = gamma(z+1)/z znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2) if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec) if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec) if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec) # Use monotonicity (except for a small region close to the # origin and near poles) # upper half-plane if mpf_ge(b1, fzero): minre = mpc_loggamma((a1,b2), wp, round_floor) maxre = mpc_loggamma((a2,b1), wp, round_ceiling) minim = mpc_loggamma((a1,b1), wp, round_floor) maxim = mpc_loggamma((a2,b2), wp, round_ceiling) # lower half-plane elif mpf_le(b2, fzero): minre = mpc_loggamma((a1,b1), wp, round_floor) maxre = mpc_loggamma((a2,b2), wp, round_ceiling) minim = mpc_loggamma((a2,b1), wp, round_floor) maxim = mpc_loggamma((a1,b2), wp, round_ceiling) # crosses real axis else: maxre = mpc_loggamma((a2,fzero), wp, round_ceiling) # stretches more into the lower half-plane if mpf_gt(mpf_neg(b1), b2): minre = mpc_loggamma((a1,b1), wp, round_ceiling) else: minre = mpc_loggamma((a1,b2), wp, round_ceiling) minim = mpc_loggamma((a2,b1), wp, round_floor) maxim = mpc_loggamma((a2,b2), wp, round_floor) w = (minre[0], maxre[0]), (minim[1], maxim[1]) if type == 3: return mpi_pos(w[0], prec), mpi_pos(w[1], prec) if type == 2: w = mpci_neg(w) return mpci_exp(w, prec) def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3) def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3) def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2) def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2) def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1) def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1)
bsd-3-clause
jckarter/swift
test/SourceKit/Inputs/sourcekitd_path_sanitize.py
20
1315
#!/usr/bin/env python # sourcekitd_path_sanitize.py - Cleans up paths from sourcekitd-test output # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors import re import sys SWIFTMODULE_BUNDLE_RE = re.compile( r'key.filepath: ".*[/\\](.*)\.swiftmodule[/\\].*\.swiftmodule"') SWIFTMODULE_RE = re.compile(r'key.filepath: ".*[/\\](.*)\.swiftmodule"') SWIFT_RE = re.compile(r'key.filepath: ".*[/\\](.*)\.swift"') PCM_RE = re.compile(r'key.filepath: ".*[/\\](.*)-[0-9A-Z]*\.pcm"') HEADER_RE = re.compile(r' file=\\".*[/\\](.*)\.h\\"') try: for line in sys.stdin.readlines(): line = re.sub(SWIFTMODULE_BUNDLE_RE, r'key.filepath: \1.swiftmodule', line) line = re.sub(SWIFTMODULE_RE, r'key.filepath: \1.swiftmodule', line) line = re.sub(SWIFT_RE, r'key.filepath: \1.swift', line) line = re.sub(PCM_RE, r'key.filepath: \1.pcm', line) line = re.sub(HEADER_RE, r' file=\1.h', line) sys.stdout.write(line) except KeyboardInterrupt: sys.stdout.flush()
apache-2.0
rajadhva/servo
tests/wpt/harness/wptrunner/browsers/b2g.py
117
8016
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import os import tempfile import shutil import subprocess import fxos_appgen import gaiatest import mozdevice import moznetwork import mozrunner from marionette import expected from marionette.by import By from marionette.wait import Wait from mozprofile import FirefoxProfile, Preferences from .base import get_free_port, BrowserError, Browser, ExecutorBrowser from ..executors.executormarionette import MarionetteTestharnessExecutor from ..hosts import HostsFile, HostsLine from ..environment import hostnames here = os.path.split(__file__)[0] __wptrunner__ = {"product": "b2g", "check_args": "check_args", "browser": "B2GBrowser", "executor": {"testharness": "B2GMarionetteTestharnessExecutor"}, "browser_kwargs": "browser_kwargs", "executor_kwargs": "executor_kwargs", "env_options": "env_options"} def check_args(**kwargs): pass def browser_kwargs(test_environment, **kwargs): return {"prefs_root": kwargs["prefs_root"], "no_backup": kwargs.get("b2g_no_backup", False)} def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs): timeout_multiplier = kwargs["timeout_multiplier"] if timeout_multiplier is None: timeout_multiplier = 2 executor_kwargs = {"server_config": server_config, "timeout_multiplier": timeout_multiplier, "close_after_done": False} if test_type == "reftest": executor_kwargs["cache_manager"] = cache_manager return executor_kwargs def env_options(): return {"host": "web-platform.test", "bind_hostname": "false", "test_server_port": False} class B2GBrowser(Browser): used_ports = set() init_timeout = 180 def __init__(self, logger, prefs_root, no_backup=False): Browser.__init__(self, logger) logger.info("Waiting for device") subprocess.call(["adb", "wait-for-device"]) self.device = mozdevice.DeviceManagerADB() self.marionette_port = get_free_port(2828, exclude=self.used_ports) self.used_ports.add(self.marionette_port) self.cert_test_app = None self.runner = None self.prefs_root = prefs_root self.no_backup = no_backup self.backup_path = None self.backup_paths = [] self.backup_dirs = [] def setup(self): self.logger.info("Running B2G setup") self.backup_path = tempfile.mkdtemp() self.logger.debug("Backing up device to %s" % (self.backup_path,)) if not self.no_backup: self.backup_dirs = [("/data/local", os.path.join(self.backup_path, "local")), ("/data/b2g/mozilla", os.path.join(self.backup_path, "profile"))] self.backup_paths = [("/system/etc/hosts", os.path.join(self.backup_path, "hosts"))] for remote, local in self.backup_dirs: self.device.getDirectory(remote, local) for remote, local in self.backup_paths: self.device.getFile(remote, local) self.setup_hosts() def start(self): profile = FirefoxProfile() profile.set_preferences({"dom.disable_open_during_load": False, "marionette.defaultPrefs.enabled": True}) self.logger.debug("Creating device runner") self.runner = mozrunner.B2GDeviceRunner(profile=profile) self.logger.debug("Starting device runner") self.runner.start() self.logger.debug("Device runner started") def setup_hosts(self): host_ip = moznetwork.get_ip() temp_dir = tempfile.mkdtemp() hosts_path = os.path.join(temp_dir, "hosts") remote_path = "/system/etc/hosts" try: self.device.getFile("/system/etc/hosts", hosts_path) with open(hosts_path) as f: hosts_file = HostsFile.from_file(f) for canonical_hostname in hostnames: hosts_file.set_host(HostsLine(host_ip, canonical_hostname)) with open(hosts_path, "w") as f: hosts_file.to_file(f) self.logger.info("Installing hosts file") self.device.remount() self.device.removeFile(remote_path) self.device.pushFile(hosts_path, remote_path) finally: os.unlink(hosts_path) os.rmdir(temp_dir) def load_prefs(self): prefs_path = os.path.join(self.prefs_root, "prefs_general.js") if os.path.exists(prefs_path): preferences = Preferences.read_prefs(prefs_path) else: self.logger.warning("Failed to find base prefs file in %s" % prefs_path) preferences = [] return preferences def stop(self): pass def on_output(self): raise NotImplementedError def cleanup(self): self.logger.debug("Running browser cleanup steps") self.device.remount() for remote, local in self.backup_dirs: self.device.removeDir(remote) self.device.pushDir(local, remote) for remote, local in self.backup_paths: self.device.removeFile(remote) self.device.pushFile(local, remote) shutil.rmtree(self.backup_path) self.device.reboot(wait=True) def pid(self): return None def is_alive(self): return True def executor_browser(self): return B2GExecutorBrowser, {"marionette_port": self.marionette_port} class B2GExecutorBrowser(ExecutorBrowser): # The following methods are called from a different process def __init__(self, *args, **kwargs): ExecutorBrowser.__init__(self, *args, **kwargs) import sys, subprocess self.device = mozdevice.ADBB2G() self.device.forward("tcp:%s" % self.marionette_port, "tcp:2828") self.executor = None self.marionette = None self.gaia_device = None self.gaia_apps = None def after_connect(self, executor): self.executor = executor self.marionette = executor.marionette self.executor.logger.debug("Running browser.after_connect steps") self.gaia_apps = gaiatest.GaiaApps(marionette=executor.marionette) self.executor.logger.debug("Waiting for homescreen to load") # Moved out of gaia_test temporarily self.executor.logger.info("Waiting for B2G to be ready") self.wait_for_homescreen(timeout=60) self.install_cert_app() self.use_cert_app() def install_cert_app(self): """Install the container app used to run the tests""" if fxos_appgen.is_installed("CertTest App"): self.executor.logger.info("CertTest App is already installed") return self.executor.logger.info("Installing CertTest App") app_path = os.path.join(here, "b2g_setup", "certtest_app.zip") fxos_appgen.install_app("CertTest App", app_path, marionette=self.marionette) self.executor.logger.debug("Install complete") def use_cert_app(self): """Start the app used to run the tests""" self.executor.logger.info("Homescreen loaded") self.gaia_apps.launch("CertTest App") def wait_for_homescreen(self, timeout): self.executor.logger.info("Waiting for home screen to load") Wait(self.marionette, timeout).until(expected.element_present( By.CSS_SELECTOR, '#homescreen[loading-state=false]')) class B2GMarionetteTestharnessExecutor(MarionetteTestharnessExecutor): def after_connect(self): self.browser.after_connect(self) MarionetteTestharnessExecutor.after_connect(self)
mpl-2.0
javiplx/debian-devel
cobbler/config.py
3
7320
""" Config.py is a repository of the Cobbler object model Copyright 2006-2008, Red Hat, Inc Michael DeHaan <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import os import weakref import time import random import string import binascii import item_distro as distro import item_profile as profile import item_system as system import item_repo as repo import item_image as image import item_network as network import collection_distros as distros import collection_profiles as profiles import collection_systems as systems import collection_repos as repos import collection_images as images import collection_networks as networks import modules.serializer_yaml as serializer_yaml import settings import serializer from utils import _ from cexceptions import * class Config: has_loaded = False __shared_state = {} def __init__(self,api): """ Constructor. Manages a definitive copy of all data collections with weakrefs pointing back into the class so they can understand each other's contents """ self.__dict__ = Config.__shared_state if not Config.has_loaded: self.__load(api) def __load(self,api): Config.has_loaded = True self.init_time = time.time() self.current_id = 0 self.api = api self._distros = distros.Distros(weakref.proxy(self)) self._repos = repos.Repos(weakref.proxy(self)) self._profiles = profiles.Profiles(weakref.proxy(self)) self._systems = systems.Systems(weakref.proxy(self)) self._images = images.Images(weakref.proxy(self)) self._networks = networks.Networks(weakref.proxy(self)) self._settings = settings.Settings() # not a true collection def generate_uid(self): """ Cobbler itself does not use this GUID's though they are provided to allow for easier API linkage with other applications. Cobbler uses unique names in each collection as the object id aka primary key """ data = "%s%s" % (time.time(), random.uniform(1,9999999)) return binascii.b2a_base64(data).replace("=","").strip() def generate_random_id(self,length=8): """ Return a random string using ASCII 0..9 and A..z """ return string.join(random.Random().sample(string.letters+string.digits, length),'') def __cmp(self,a,b): return cmp(a.name,b.name) def distros(self): """ Return the definitive copy of the Distros collection """ return self._distros def profiles(self): """ Return the definitive copy of the Profiles collection """ return self._profiles def systems(self): """ Return the definitive copy of the Systems collection """ return self._systems def settings(self): """ Return the definitive copy of the application settings """ return self._settings def repos(self): """ Return the definitive copy of the Repos collection """ return self._repos def images(self): """ Return the definitive copy of the Images collection """ return self._images def networks(self): """ Return the definitive copy of the Networks collection """ return self._networks def new_distro(self,is_subobject=False): """ Create a new distro object with a backreference to this object """ return distro.Distro(weakref.proxy(self),is_subobject=is_subobject) def new_system(self,is_subobject=False): """ Create a new system with a backreference to this object """ return system.System(weakref.proxy(self),is_subobject=is_subobject) def new_profile(self,is_subobject=False): """ Create a new profile with a backreference to this object """ return profile.Profile(weakref.proxy(self),is_subobject=is_subobject) def new_repo(self,is_subobject=False): """ Create a new mirror to keep track of... """ return repo.Repo(weakref.proxy(self),is_subobject=is_subobject) def new_image(self,is_subobject=False): """ Create a new image object... """ return image.Image(weakref.proxy(self),is_subobject=is_subobject) def new_network(self,is_subobject=False): """ Create a new network object... """ return network.Network(weakref.proxy(self),is_subobject=is_subobject) def clear(self): """ Forget about all loaded configuration data """ self._distros.clear(), self._repos.clear(), self._profiles.clear(), self._images.clear() self._systems.clear(), self._networks.clear(), return True def serialize(self): """ Save the object hierarchy to disk, using the filenames referenced in each object. """ serializer.serialize(self._distros) serializer.serialize(self._repos) serializer.serialize(self._profiles) serializer.serialize(self._images) serializer.serialize(self._systems) serializer.serialize(self._networks) return True def serialize_item(self,collection,item): """ Save item in the collection, resaving the whole collection if needed, but ideally just saving the item. """ return serializer.serialize_item(collection,item) def serialize_delete(self,collection,item): """ Erase item from a storage file, if neccessary rewritting the file. """ return serializer.serialize_delete(collection,item) def deserialize(self): """ Load the object hierachy from disk, using the filenames referenced in each object. """ try: serializer.deserialize(self._settings) except: raise CX("/etc/cobbler/settings is not a valid YAML file") serializer.deserialize(self._distros) serializer.deserialize(self._repos) serializer.deserialize(self._profiles) serializer.deserialize(self._images) serializer.deserialize(self._systems) serializer.deserialize(self._networks) return True def deserialize_raw(self,collection_type): """ Get object data from disk, not objects. """ return serializer.deserialize_raw(collection_type) def deserialize_item_raw(self,collection_type,obj_name): """ Get a raw single object. """ return serializer.deserialize_item_raw(collection_type,obj_name)
gpl-2.0
Javantea/satispy
satispy/cnf.py
1
5607
cnfClass = None class Variable(object): def __init__(self, name, inverted=False): self.name = name self.inverted = inverted def __neg__(self): v = Variable(self.name) v.inverted = not self.inverted return v def __and__(self, other): c = cnfClass.create_from(self) return c & other def __or__(self, other): c = cnfClass.create_from(self) return c | other def __xor__(self, other): c = cnfClass.create_from(self) return c ^ other def __rshift__(self, other): # implies c = cnfClass.create_from(self) return -c | other def __str__(self): return ("-" if self.inverted else "") + str(self.name) def __eq__(self, other): return self.name == other.name and self.inverted == other.inverted def __hash__(self): return hash(self.name) ^ hash(self.inverted) def __cmp__(self, other): if self == other: return 0 if (self.name, self.inverted) < (other.name, other.inverted): return -1 else: return 1 class NaiveCnf(object): def __init__(self): self.dis = [] @classmethod def create_from(cls, x): if isinstance(x, Variable): cnf = NaiveCnf() cnf.dis = [frozenset([x])] return cnf elif isinstance(x, cls): return x else: raise Exception("Could not create a Cnf object from %s" % str(type(x))) def __and__(self, other): other = NaiveCnf.create_from(other) result = NaiveCnf() result.dis = self.dis + other.dis return result def __or__(self, other): other = NaiveCnf.create_from(other) if len(self.dis) > 0 and len(other.dis) > 0: new_dis = [] for d1, d2 in [(d1,d2) for d1 in self.dis for d2 in other.dis]: d3 = d1 | d2 new_dis.append(d3) elif len(self.dis) == 0: new_dis = other.dis else: new_dis = self.dis c = NaiveCnf() c.dis = new_dis return c def __xor__(self, other): return (self | other) & (-self | -other) def __neg__(self): cnfs = [] for d in self.dis: c = NaiveCnf() for v in d: c.dis.append(frozenset([-v])) cnfs.append(c) ret = NaiveCnf() for cnf in cnfs: ret |= cnf return ret def __rshift__(self, other): # implies return -self | other def __str__(self): ret = [] for d in self.dis: ret.append(" | ".join(map(str,d))) return "(" + ") & (".join(ret) + ")" def __eq__(self, other): return self.dis == other.dis def __hash__(self): return hash(self.dis) def reduceCnf(cnf): """ I just found a remarkably large bug in my SAT solver and found an interesting solution. Remove all b | -b (-b | b) & (b | -a) & (-b | a) & (a | -a) becomes (b | -a) & (-b | a) Remove all (-e) & (-e) (-e | a) & (-e | a) & (-e | a) & (-e | a) becomes (-e | a) (-b | b | c) becomes nothing, not (c) """ output = Cnf() for x in cnf.dis: dont_add = False for y in x: for z in x: if z == -y: dont_add = True break if dont_add: break if dont_add: continue # TODO: Is this necessary anymore? Probably not. Do statistical analysis. if x not in output.dis: output.dis.append(x) return output #end def reduceCnf(cnf) class Cnf(object): def __init__(self): self.dis = [] @classmethod def create_from(cls, x): if isinstance(x, Variable): cnf = Cnf() cnf.dis = [frozenset([x])] return cnf elif isinstance(x, cls): return x else: raise Exception("Could not create a Cnf object from %s" % str(type(x))) def __and__(self, other): other = Cnf.create_from(other) result = Cnf() result.dis = self.dis + other.dis return result def __or__(self, other): other = Cnf.create_from(other) if len(self.dis) > 0 and len(other.dis) > 0: new_dis = [] for d1, d2 in [(d1,d2) for d1 in self.dis for d2 in other.dis]: d3 = d1 | d2 if d3 not in new_dis: new_dis.append(d3) elif len(self.dis) == 0: new_dis = other.dis else: new_dis = self.dis c = Cnf() c.dis = new_dis return reduceCnf(c) def __xor__(self, other): return reduceCnf((self | other) & (-self | -other)) def __neg__(self): cnfs = [] for d in self.dis: c = Cnf() for v in d: c.dis.append(frozenset([-v])) x = reduceCnf(c) if x not in cnfs: cnfs.append(x) ret = Cnf() for cnf in cnfs: ret |= cnf return ret def __rshift__(self, other): # implies return -self | other def __str__(self): ret = [] for d in self.dis: ret.append(" | ".join(map(str,d))) return "(" + ") & (".join(ret) + ")" def __eq__(self, other): return self.dis == other.dis def __hash__(self): return hash(self.dis) # Change this to NaiveCnf if you want. cnfClass = Cnf
bsd-3-clause
RouxRC/weboob
modules/happn/module.py
4
12367
# -*- coding: utf-8 -*- # Copyright(C) 2014 Roger Philibert # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. import datetime from dateutil.parser import parse as parse_date from dateutil.relativedelta import relativedelta from dateutil.tz import tzlocal from random import randint from weboob.capabilities.base import NotAvailable from weboob.capabilities.messages import CapMessages, CapMessagesPost, Thread, Message from weboob.capabilities.dating import CapDating, Optimization from weboob.capabilities.contact import CapContact, Contact, ProfileNode from weboob.exceptions import BrowserHTTPError from weboob.tools.backend import Module, BackendConfig from weboob.tools.date import local2utc from weboob.tools.value import Value, ValueBackendPassword from weboob.tools.ordereddict import OrderedDict from weboob.tools.log import getLogger from .browser import HappnBrowser, FacebookBrowser __all__ = ['HappnModule'] class ProfilesWalker(Optimization): def __init__(self, sched, storage, browser): super(ProfilesWalker, self).__init__() self._sched = sched self._storage = storage self._browser = browser self._logger = getLogger('walker', browser.logger) self._last_position_update = None self._view_cron = None def start(self): self._view_cron = self._sched.schedule(1, self.view_profile) return True def stop(self): self._sched.cancel(self._view_cron) self._view_cron = None return True def set_config(self, params): pass def is_running(self): return self._view_cron is not None INTERVALS = [(48896403, 2303976), (48820992, 2414698)] def view_profile(self): try: n = 0 for user in self._browser.find_users(): if user['notifier']['my_relation'] > 0: continue self._browser.accept(user['notifier']['id']) self._logger.info('Liked %s %s (%s at %s): https://www.facebook.com/profile.php?id=%s&fref=ufi&pnref=story', user['notifier']['first_name'], user['notifier']['last_name'], user['notifier']['job'], user['notifier']['workplace'], user['notifier']['fb_id']) n += 1 if n > 10: break if n == 0 and (self._last_position_update is None or self._last_position_update + datetime.timedelta(minutes=20) < datetime.datetime.now()): self._logger.info('No more new profiles, updating position...') lat = randint(self.INTERVALS[1][0], self.INTERVALS[0][0])/1000000.0 lng = randint(self.INTERVALS[0][1], self.INTERVALS[1][1])/1000000.0 try: self._browser.set_position(lat, lng) except BrowserHTTPError: self._logger.warning('Unable to update position for now, it will be retried later.') self._logger.warning('NB: don\'t be afraid, happn only allows to update position every 20 minutes.') else: self._logger.info('You are now here: https://www.google.com/maps/place//@%s,%s,17z', lat, lng) self._last_position_update = datetime.datetime.now() for thread in self._browser.get_threads(): other_name = '' for user in thread['participants']: if user['user']['id'] != self._browser.my_id: other_name = user['user']['display_name'] if len(thread['messages']) == 0 and parse_date(thread['creation_date']) < (datetime.datetime.now(tzlocal()) - relativedelta(hours=1)): self._browser.post_message(thread['id'], u'Coucou %s :)' % other_name) self._logger.info(u'Welcome message sent to %s' % other_name) finally: if self._view_cron is not None: self._view_cron = self._sched.schedule(60, self.view_profile) class HappnContact(Contact): def set_profile(self, *args): section = self.profile for arg in args[:-2]: try: s = section[arg] except KeyError: s = section[arg] = ProfileNode(arg, arg.capitalize().replace('_', ' '), OrderedDict(), flags=ProfileNode.SECTION) section = s.value key = args[-2] value = args[-1] section[key] = ProfileNode(key, key.capitalize().replace('_', ' '), value) def __init__(self, info): status = Contact.STATUS_OFFLINE last_seen = parse_date(info['modification_date']) if last_seen >= datetime.datetime.now(tzlocal()) - datetime.timedelta(minutes=30): status = Contact.STATUS_ONLINE super(HappnContact, self).__init__(info['id'], info['display_name'], status) self.summary = info['about'] for photo in info['profiles']: self.set_photo(photo['id'], url=photo['url']) self.status_msg = u'Last seen at %s' % last_seen.strftime('%Y-%m-%d %H:%M:%S') self.url = NotAvailable self.profile = OrderedDict() self.set_profile('info', 'id', info['id']) self.set_profile('info', 'full_name', ' '.join([info['first_name'] or '', info['last_name'] or '']).strip()) self.set_profile('info', 'login', info['login']) if info['fb_id'] is not None: self.set_profile('info', 'facebook', 'https://www.facebook.com/profile.php?id=%s&fref=ufi&pnref=story' % info['fb_id']) if info['twitter_id'] is not None: self.set_profile('info', 'twitter', info['twitter_id']) self.set_profile('stats', 'accepted', info['is_accepted']) self.set_profile('stats', 'charmed', info['is_charmed']) self.set_profile('stats', 'unread_conversations', info['unread_conversations']) self.set_profile('stats', 'credits', info['credits']) if info['last_meet_position'] is not None: self.set_profile('geoloc', 'last_meet', 'https://www.google.com/maps/place//@%s,%s,17z' % (info['last_meet_position']['lat'], info['last_meet_position']['lon'])) if info['distance'] is not None: self.set_profile('geoloc', 'distance', '%.2f km' % (info['distance']/1000.0)) self.set_profile('details', 'gender', info['gender']) self.set_profile('details', 'age', '%s yo' % info['age']) self.set_profile('details', 'birthday', info['birth_date']) self.set_profile('details', 'job', info['job']) self.set_profile('details', 'company', info['workplace']) self.set_profile('details', 'school', info['school']) if info['matching_preferences'] is not None: self.set_profile('settings', 'age_min', '%s yo' % info['matching_preferences']['age_min']) self.set_profile('settings', 'age_max', '%s yo' % info['matching_preferences']['age_max']) self.set_profile('settings', 'distance', '%s m' % info['matching_preferences']['distance']) self.set_profile('settings', 'female', info['matching_preferences']['female']) self.set_profile('settings', 'male', info['matching_preferences']['male']) class HappnModule(Module, CapMessages, CapMessagesPost, CapDating, CapContact): NAME = 'happn' DESCRIPTION = u'Happn dating mobile application' MAINTAINER = u'Roger Philibert' EMAIL = '[email protected]' LICENSE = 'AGPLv3+' VERSION = '1.1' CONFIG = BackendConfig(Value('username', label='Facebook email'), ValueBackendPassword('password', label='Facebook password')) BROWSER = HappnBrowser STORAGE = {'contacts': {}, } def create_default_browser(self): facebook = FacebookBrowser() facebook.login(self.config['username'].get(), self.config['password'].get()) return HappnBrowser(facebook) # ---- CapDating methods ----------------------- def init_optimizations(self): self.add_optimization('PROFILE_WALKER', ProfilesWalker(self.weboob.scheduler, self.storage, self.browser)) # ---- CapMessages methods --------------------- def fill_thread(self, thread, fields): return self.get_thread(thread) def iter_threads(self): for thread in self.browser.get_threads(): t = Thread(thread['id']) t.flags = Thread.IS_DISCUSSION for user in thread['participants']: if user['user']['id'] != self.browser.my_id: t.title = u'Discussion with %s' % user['user']['display_name'] t.date = local2utc(parse_date(thread['modification_date'])) yield t def get_thread(self, thread): if not isinstance(thread, Thread): thread = Thread(thread) thread.flags = Thread.IS_DISCUSSION info = self.browser.get_thread(thread.id) for user in info['participants']: if user['user']['id'] == self.browser.my_id: me = HappnContact(user['user']) else: other = HappnContact(user['user']) thread.title = u'Discussion with %s' % other.name contact = self.storage.get('contacts', thread.id, default={'lastmsg': 0}) child = None for msg in info['messages']: flags = 0 if int(contact['lastmsg']) < int(msg['id']): flags = Message.IS_UNREAD if msg['sender']['id'] == me.id: sender = me receiver = other else: sender = other receiver = me msg = Message(thread=thread, id=msg['id'], title=thread.title, sender=sender.name, receivers=[receiver.name], date=local2utc(parse_date(msg['creation_date'])), content=msg['message'], children=[], parent=None, signature=sender.get_text(), flags=flags) if child: msg.children.append(child) child.parent = msg child = msg thread.root = child return thread def iter_unread_messages(self): for thread in self.iter_threads(): thread = self.get_thread(thread) for message in thread.iter_all_messages(): if message.flags & message.IS_UNREAD: yield message def set_message_read(self, message): contact = self.storage.get('contacts', message.thread.id, default={'lastmsg': 0}) if int(contact['lastmsg']) < int(message.id): contact['lastmsg'] = int(message.id) self.storage.set('contacts', message.thread.id, contact) self.storage.save() # ---- CapMessagesPost methods --------------------- def post_message(self, message): self.browser.post_message(message.thread.id, message.content) # ---- CapContact methods --------------------- def get_contact(self, contact_id): if isinstance(contact_id, Contact): contact_id = contact_id.id info = self.browser.get_contact(contact_id) return HappnContact(info) OBJECTS = {Thread: fill_thread, }
agpl-3.0
geminy/aidear
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/tools/auto_bisect/ttest_test.py
58
4977
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for ttest module.""" import unittest import ttest # This test case accesses private functions of the ttest module. # pylint: disable=W0212 class TTestTest(unittest.TestCase): """Tests for the t-test functions.""" def testWelchsFormula(self): """Tests calculation of the t value.""" # Results can be verified by directly plugging variables into Welch's # equation (e.g. using a calculator or the Python interpreter). self.assertEqual( -0.2796823595120407, ttest._TValue(0.299, 0.307, 0.05, 0.08, 150, 165)) # Note that a negative t value is obtained when the first sample has a # smaller mean than the second, otherwise a positive value is returned. self.assertEqual( 0.2796823595120407, ttest._TValue(0.307, 0.299, 0.08, 0.05, 165, 150)) def testWelchSatterthwaiteFormula(self): """Tests calculation of estimated degrees of freedom.""" # Note that since the Welch-Satterthwaite equation gives an estimate of # degrees of freedom, the result may not be an integer. self.assertEqual( 307.1987997516727, ttest._DegreesOfFreedom(0.05, 0.08, 150, 165)) def testWelchsTTest(self): """Tests the t value and degrees of freedom output of Welch's t-test.""" # The t-value can be checked with scipy.stats.ttest_ind(equal_var=False). t, df, _ = ttest.WelchsTTest([2, 3, 2, 3, 2, 3], [4, 5, 4, 5, 4, 5]) self.assertAlmostEqual(10.0, df) # The t-value produced by scipy.stats.ttest_ind is -6.32455532034. # Our function produces slightly different results. # Possibly due to differences in rounding error? self.assertAlmostEqual(-6.325, t, delta=1.0) def testTTestEqualSamples(self): """Checks that t = 0 and p = 1 when the samples are the same.""" t, _, p = ttest.WelchsTTest([1, 2, 3], [1, 2, 3]) self.assertEqual(0, t) self.assertEqual(1, p) t, _, p = ttest.WelchsTTest([1, 2], [1, 2]) self.assertEqual(0, t) self.assertEqual(1, p) def testTTestVeryDifferentSamples(self): """Checks that p is very low when the samples are clearly different.""" t, _, p = ttest.WelchsTTest( [100, 101, 100, 101, 100], [1, 2, 1, 2, 1, 2, 1, 2]) self.assertGreaterEqual(t, 250) self.assertLessEqual(p, 0.01) def testTTestVariance(self): """Verifies that higher variance -> higher p value.""" _, _, p_low_var = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5]) _, _, p_high_var = ttest.WelchsTTest([1, 4, 1, 4], [3, 6, 3, 6]) self.assertLess(p_low_var, p_high_var) def testTTestSampleSize(self): """Verifies that smaller sample size -> higher p value.""" _, _, p_larger_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5]) _, _, p_smaller_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5]) self.assertLess(p_larger_sample, p_smaller_sample) def testTTestMeanDifference(self): """Verifies that smaller difference between means -> higher p value.""" _, _, p_far_means = ttest.WelchsTTest([2, 3, 2, 3], [5, 6, 5, 6]) _, _, p_near_means = ttest.WelchsTTest([2, 3, 2, 3], [3, 4, 3, 4]) self.assertLess(p_far_means, p_near_means) class LookupTableTest(unittest.TestCase): """Tests for functionality related to lookup of p-values in a table.""" def setUp(self): self.original_TWO_TAIL = ttest.TWO_TAIL self.original_TABLE = ttest.TABLE ttest.TWO_TAIL = [1, 0.2, 0.1, 0.05, 0.02, 0.01] ttest.TABLE = { 1: [0, 6.314, 12.71, 31.82, 63.66, 318.31], 2: [0, 2.920, 4.303, 6.965, 9.925, 22.327], 3: [0, 2.353, 3.182, 4.541, 5.841, 10.215], 4: [0, 2.132, 2.776, 3.747, 4.604, 7.173], } def tearDown(self): ttest.TWO_TAIL = self.original_TWO_TAIL ttest.TABLE = self.original_TABLE def testLookupExactMatch(self): """Tests a lookup when there is an exact match.""" self.assertEqual(0.1, ttest._LookupPValue(3.182, 3)) self.assertEqual(0.1, ttest._LookupPValue(-3.182, 3)) def testLookupAbove(self): """Tests a lookup when the given value is above an entry in the table.""" self.assertEqual(0.2, ttest._LookupPValue(3.1, 2)) self.assertEqual(0.2, ttest._LookupPValue(-3.1, 2)) def testLookupLargeTValue(self): """Tests a lookup when the given t-value is very large.""" self.assertEqual(0.01, ttest._LookupPValue(500.0, 1)) self.assertEqual(0.01, ttest._LookupPValue(-500.0, 1)) def testLookupZeroTValue(self): """Tests a lookup when the given t-value is zero.""" self.assertEqual(1, ttest._LookupPValue(0.0, 1)) self.assertEqual(1, ttest._LookupPValue(0.0, 2)) def testLookupLargeDF(self): """Tests a lookup when the given degrees of freedom is large.""" self.assertEqual(0.02, ttest._LookupPValue(5.0, 50)) if __name__ == '__main__': unittest.main()
gpl-3.0
popcornmix/xbmc
lib/gtest/test/gtest_xml_test_utils.py
1815
8876
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" __author__ = '[email protected] (Sean Mcafee)' import re from xml.dom import minidom, Node import gtest_test_utils GTEST_OUTPUT_FLAG = '--gtest_output' GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' class GTestXMLTestCase(gtest_test_utils.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. Exceptions are any attribute named "time", which needs only be convertible to a floating-point number and any attribute named "type_param" which only has to be non-empty. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node .attributes self.assertEquals( expected_attributes.length, actual_attributes.length, 'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % ( actual_node.tagName, expected_attributes.keys(), actual_attributes.keys())) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_( actual_attr is not None, 'expected attribute %s not found in element %s' % (expected_attr.name, actual_node.tagName)) self.assertEquals( expected_attr.value, actual_attr.value, ' values of attribute %s in element %s differ: %s vs %s' % (expected_attr.name, actual_node.tagName, expected_attr.value, actual_attr.value)) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals( len(expected_children), len(actual_children), 'number of child elements differ in element ' + actual_node.tagName) for child_id, child in expected_children.iteritems(): self.assert_(child_id in actual_children, '<%s> is not in <%s> (in element %s)' % (child_id, actual_children, actual_node.tagName)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { 'testsuites': 'name', 'testsuite': 'name', 'testcase': 'name', 'failure': 'message', } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuites>, <testsuite> and <testcase> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; CDATA sections and non-whitespace text nodes are concatenated into a single CDATA section with ID "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.assert_(child.tagName in self.identifying_attribute, 'Encountered unknown element <%s>' % child.tagName) childID = child.getAttribute(self.identifying_attribute[child.tagName]) self.assert_(childID not in children) children[childID] = child elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: if 'detail' not in children: if (child.nodeType == Node.CDATA_SECTION_NODE or not child.nodeValue.isspace()): children['detail'] = child.ownerDocument.createCDATASection( child.nodeValue) else: children['detail'].nodeValue += child.nodeValue else: self.fail('Encountered unexpected node type %d' % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuites>, <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The "timestamp" attribute of <testsuites> elements is replaced with a single asterisk, if it contains a valid ISO8601 datetime value. * The "type_param" attribute of <testcase> elements is replaced with a single asterisk (if it sn non-empty) as it is the type name returned by the compiler and is platform dependent. * The line info reported in the first line of the "message" attribute and CDATA section of <failure> elements is replaced with the file's basename and a single asterisk for the line number. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName == 'testsuites': timestamp = element.getAttributeNode('timestamp') timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', '*', timestamp.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): time = element.getAttributeNode('time') time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value) type_param = element.getAttributeNode('type_param') if type_param and type_param.value: type_param.value = '*' elif element.tagName == 'failure': source_line_pat = r'^.*[/\\](.*:)\d+\n' # Replaces the source line information with a normalized form. message = element.getAttributeNode('message') message.value = re.sub(source_line_pat, '\\1*\n', message.value) for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Replaces the source line information with a normalized form. cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*', '', cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
gpl-2.0
mrunge/horizon
openstack_dashboard/dashboards/admin/hypervisors/tabs.py
59
1512
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tabs from openstack_dashboard.api import nova from openstack_dashboard.dashboards.admin.hypervisors.compute \ import tabs as cmp_tabs from openstack_dashboard.dashboards.admin.hypervisors import tables class HypervisorTab(tabs.TableTab): table_classes = (tables.AdminHypervisorsTable,) name = _("Hypervisor") slug = "hypervisor" template_name = "horizon/common/_detail_table.html" def get_hypervisors_data(self): hypervisors = [] try: hypervisors = nova.hypervisor_list(self.request) except Exception: exceptions.handle(self.request, _('Unable to retrieve hypervisor information.')) return hypervisors class HypervisorHostTabs(tabs.TabGroup): slug = "hypervisor_info" tabs = (HypervisorTab, cmp_tabs.ComputeHostTab) sticky = True
apache-2.0
garretlh/nimbus
nimbus-drivers/src/main/python/nimbusdrivers/wmr9x8.py
3
25529
# # Copyright (c) 2012 Will Page <[email protected]> # See the file LICENSE.txt for your full rights. # # Derivative of vantage.py and wmr100.py, credit to Tom Keffer """Classes and functions for interfacing with Oregon Scientific WM-918, WMR9x8 and WMR-968 weather stations See http://wx200.planetfall.com/wx200.txt or http://www.qsl.net/zl1vfo/wx200/wx200.txt or http://ed.toton.org/projects/weather/station-protocol.txt for documentation on the WM-918 / WX-200 serial protocol See http://www.netsky.org/WMR/Protocol.htm for documentation on the WMR9x8 serial protocol, and http://code.google.com/p/wmr968/source/browse/trunk/src/edu/washington/apl/weather/packet/ for sample (java) code. """ import time import operator import logging import serial from nimbusdrivers import * from math import exp DRIVER_NAME = 'WMR9x8' DRIVER_VERSION = "3.0" def loader(config_dict): return WMR9x8(**config_dict[DRIVER_NAME]) DEFAULT_PORT = '/dev/ttyS0' class WMR9x8ProtocolError(WeeWxIOError): """Used to signal a protocol error condition""" def channel_decoder(chan): if 1 <= chan <= 2: outchan = chan elif chan == 4: outchan = 3 else: raise WMR9x8ProtocolError("Bad channel number %d" % chan) return outchan # Dictionary that maps a measurement code, to a function that can decode it: # packet_type_decoder_map and packet_type_size_map are filled out using the @<type>_registerpackettype # decorator below wmr9x8_packet_type_decoder_map = {} wmr9x8_packet_type_size_map = {} wm918_packet_type_decoder_map = {} wm918_packet_type_size_map = {} def wmr9x8_registerpackettype(typecode, size): """ Function decorator that registers the function as a handler for a particular packet type. Parameters to the decorator are typecode and size (in bytes). """ def wrap(dispatcher): wmr9x8_packet_type_decoder_map[typecode] = dispatcher wmr9x8_packet_type_size_map[typecode] = size return wrap def wm918_registerpackettype(typecode, size): """ Function decorator that registers the function as a handler for a particular packet type. Parameters to the decorator are typecode and size (in bytes). """ def wrap(dispatcher): wm918_packet_type_decoder_map[typecode] = dispatcher wm918_packet_type_size_map[typecode] = size return wrap class SerialWrapper(object): """Wraps a serial connection returned from package serial""" def __init__(self, port): self.port = port # WMR9x8 specific settings self.serialconfig = { "bytesize": serial.EIGHTBITS, "parity": serial.PARITY_NONE, "stopbits": serial.STOPBITS_ONE, "timeout": None, "rtscts": 1 } def flush_input(self): self.serial_port.flushInput() def queued_bytes(self): return self.serial_port.inWaiting() def read(self, chars=1): _buffer = self.serial_port.read(chars) N = len(_buffer) if N != chars: raise WeeWxIOError("Expected to read %d chars; got %d instead" % (chars, N)) return _buffer def openPort(self): # Open up the port and store it self.serial_port = serial.Serial(self.port, **self.serialconfig) logging.debug("wmr9x8: Opened up serial port %s" % self.port) def closePort(self): self.serial_port.close() #============================================================================== # Class WMR9x8 #============================================================================== class WMR9x8(AbstractDevice): """Class that represents a connection to a Oregon Scientific WMR9x8 console. The connection to the console will be open after initialization""" def __init__(self, **stn_dict): """Initialize an object of type WMR9x8. NAMED ARGUMENTS: model: Which station model is this? [Optional. Default is 'WMR968'] port: The serial port of the WM918/WMR918/WMR968. [Required if serial communication] baudrate: Baudrate of the port. [Optional. Default 9600] timeout: How long to wait before giving up on a response from the serial port. [Optional. Default is 5] """ self.model = stn_dict.get('model', 'WMR968') self.last_totalRain = None # Create the specified port self.port = WMR9x8._port_factory(stn_dict) # Open it up: self.port.openPort() @property def hardware_name(self): return self.model def openPort(self): """Open up the connection to the console""" self.port.openPort() def closePort(self): """Close the connection to the console. """ self.port.closePort() def genLoopPackets(self): """Generator function that continuously returns loop packets""" buf = [] # We keep a buffer the size of the largest supported packet wmr9x8max = max(wmr9x8_packet_type_size_map.items(), key=operator.itemgetter(1))[1] wm918max = max(wm918_packet_type_size_map.items(), key=operator.itemgetter(1))[1] preBufferSize = max(wmr9x8max, wm918max) while True: buf.extend(map(ord, self.port.read(preBufferSize - len(buf)))) # WMR-9x8/968 packets are framed by 0xFF characters if buf[0] == 0xFF and buf[1] == 0xFF and buf[2] in wmr9x8_packet_type_size_map: # Look up packet type, the expected size of this packet type ptype = buf[2] psize = wmr9x8_packet_type_size_map[ptype] # Capture only the data belonging to this packet pdata = buf[0:psize] self.log_packet(pdata) # Validate the checksum sent_checksum = pdata[-1] calc_checksum = reduce(operator.add, pdata[0:-1]) & 0xFF if sent_checksum == calc_checksum: logging.debug("wmr9x8: Received WMR9x8 data packet.") payload = pdata[2:-1] _record = wmr9x8_packet_type_decoder_map[ptype](self, payload) if _record is not None: yield _record # Eliminate all packet data from the buffer buf = buf[psize:] else: logging.debug("wmr9x8: Invalid data packet (%s)." % pdata) # Drop the first byte of the buffer and start scanning again buf.pop(0) # WM-918 packets have no framing elif buf[0] in wm918_packet_type_size_map: # Look up packet type, the expected size of this packet type ptype = buf[0] psize = wm918_packet_type_size_map[ptype] # Capture only the data belonging to this packet pdata = buf[0:psize] # Validate the checksum sent_checksum = pdata[-1] calc_checksum = reduce(operator.add, pdata[0:-1]) & 0xFF if sent_checksum == calc_checksum: logging.debug("wmr9x8: Received WM-918 data packet.") payload = pdata[0:-1] #send all of packet but crc _record = wm918_packet_type_decoder_map[ptype](self, payload) if _record is not None: yield _record # Eliminate all packet data from the buffer buf = buf[psize:] else: logging.debug("wmr9x8: Invalid data packet (%s)." % pdata) # Drop the first byte of the buffer and start scanning again buf.pop(0) else: logging.debug("wmr9x8: Advancing buffer by one for the next potential packet") buf.pop(0) #========================================================================== # Oregon Scientific WMR9x8 utility functions #========================================================================== @staticmethod def _port_factory(stn_dict): """Produce a serial port object""" # Get the connection type. If it is not specified, assume 'serial': connection_type = stn_dict.get('type', 'serial').lower() if connection_type == "serial": port = stn_dict['port'] return SerialWrapper(port) raise UnsupportedFeature(stn_dict['type']) @staticmethod def _get_nibble_data(packet): nibbles = bytearray() for byte in packet: nibbles.extend([(byte & 0x0F), (byte & 0xF0) >> 4]) return nibbles def log_packet(self, packet): packet_str = ','.join(["x%x" % v for v in packet]) logging.debug(packet_str) @wmr9x8_registerpackettype(typecode=0x00, size=11) def _wmr9x8_wind_packet(self, packet): """Decode a wind packet. Wind speed will be in kph""" null, status, dir1, dir10, dir100, gust10th, gust1, gust10, avg10th, avg1, avg10, chillstatus, chill1, chill10 = self._get_nibble_data(packet[1:]) # @UnusedVariable battery = (status & 0x04) >> 2 # The console returns wind speeds in m/s. Our metric system requires kph, # so the result needs to be multiplied by 3.6 _record = { 'windBatteryStatus' : battery, 'windSpeed' : ((avg10th / 10.0) + avg1 + (avg10 * 10)) * 3.6, 'windDir' : dir1 + (dir10 * 10) + (dir100 * 100), 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } # Sometimes the station emits a wind gust that is less than the average wind. # Ignore it if this is the case. windGustSpeed = ((gust10th / 10.0) + gust1 + (gust10 * 10)) * 3.6 if windGustSpeed >= _record['windSpeed']: _record['windGust'] = windGustSpeed # Bit 1 of chillstatus is on if there is no wind chill data; # Bit 2 is on if it has overflowed. Check them both: if chillstatus & 0x6 == 0: chill = chill1 + (10 * chill10) if chillstatus & 0x8: chill = -chill _record['windchill'] = chill else: _record['windchill'] = None return _record @wmr9x8_registerpackettype(typecode=0x01, size=16) def _wmr9x8_rain_packet(self, packet): null, status, cur1, cur10, cur100, tot10th, tot1, tot10, tot100, tot1000, yest1, yest10, yest100, yest1000, totstartmin1, totstartmin10, totstarthr1, totstarthr10, totstartday1, totstartday10, totstartmonth1, totstartmonth10, totstartyear1, totstartyear10 = self._get_nibble_data(packet[1:]) # @UnusedVariable battery = (status & 0x04) >> 2 # station units are mm and mm/hr while the internal metric units are cm and cm/hr # It is reported that total rainfall is biased by +0.5 mm _record = { 'rainBatteryStatus' : battery, 'rainRate' : (cur1 + (cur10 * 10) + (cur100 * 100)) / 10.0, 'yesterdayRain' : (yest1 + (yest10 * 10) + (yest100 * 100) + (yest1000 * 1000)) / 10.0, 'totalRain' : (tot10th / 10.0 + tot1 + 10.0 * tot10 + 100.0 * tot100 + 1000.0 * tot1000) / 10.0, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } # Because the WMR does not offer anything like bucket tips, we must # calculate it by looking for the change in total rain. Of course, this # won't work for the very first rain packet. _record['rain'] = (_record['totalRain'] - self.last_totalRain) if self.last_totalRain is not None else None self.last_totalRain = _record['totalRain'] return _record @wmr9x8_registerpackettype(typecode=0x02, size=9) def _wmr9x8_thermohygro_packet(self, packet): chan, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10 = self._get_nibble_data(packet[1:]) chan = channel_decoder(chan) battery = (status & 0x04) >> 2 _record = { 'dateTime' : int(time.time() + 0.5), 'units' : METRIC, 'batteryStatusTH%d' % chan : battery } _record['extraHumid%d' % chan] = hum1 + (hum10 * 10) tempoverunder = temp100etc & 0x04 if not tempoverunder: temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100) if temp100etc & 0x08: temp = -temp _record['extraTemp%d' % chan] = temp else: _record['extraTemp%d' % chan] = None dewunder = bool(status & 0x01) # If dew point is valid, save it. if not dewunder: _record['dewpoint%d' % chan] = dew1 + (dew10 * 10) return _record @wmr9x8_registerpackettype(typecode=0x03, size=9) def _wmr9x8_mushroom_packet(self, packet): _, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10 = self._get_nibble_data(packet[1:]) battery = (status & 0x04) >> 2 _record = { 'dateTime' : int(time.time() + 0.5), 'units' : METRIC, 'outTempBatteryStatus' : battery, 'outHumidity' : hum1 + (hum10 * 10) } tempoverunder = temp100etc & 0x04 if not tempoverunder: temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100) if temp100etc & 0x08: temp = -temp _record['outTemp'] = temp else: _record['outTemp'] = None dewunder = bool(status & 0x01) # If dew point is valid, save it. if not dewunder: _record['dewpoint'] = dew1 + (dew10 * 10) return _record @wmr9x8_registerpackettype(typecode=0x04, size=7) def _wmr9x8_therm_packet(self, packet): chan, status, temp10th, temp1, temp10, temp100etc = self._get_nibble_data(packet[1:]) chan = channel_decoder(chan) battery = (status & 0x04) >> 2 _record = {'dateTime' : int(time.time() + 0.5), 'units' : METRIC, 'batteryStatusT%d' % chan : battery} temp = temp10th / 10.0 + temp1 + 10.0 * temp10 + 100.0 * (temp100etc & 0x03) if temp100etc & 0x08: temp = -temp tempoverunder = temp100etc & 0x04 _record['extraTemp%d' % chan] = temp if not tempoverunder else None return _record @wmr9x8_registerpackettype(typecode=0x05, size=13) def _wmr9x8_in_thermohygrobaro_packet(self, packet): null, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10, baro1, baro10, wstatus, null2, slpoff10th, slpoff1, slpoff10, slpoff100 = self._get_nibble_data(packet[1:]) # @UnusedVariable battery = (status & 0x04) >> 2 hum = hum1 + (hum10 * 10) tempoverunder = bool(temp100etc & 0x04) if not tempoverunder: temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100) if temp100etc & 0x08: temp = -temp else: temp = None dewunder = bool(status & 0x01) if not dewunder: dew = dew1 + (dew10 * 10) else: dew = None rawsp = ((baro10 & 0xF) << 4) | baro1 sp = rawsp + 795 pre_slpoff = (slpoff10th / 10.0) + slpoff1 + (slpoff10 * 10) + (slpoff100 * 100) slpoff = (1000 + pre_slpoff) if pre_slpoff < 400.0 else pre_slpoff _record = { 'inTempBatteryStatus' : battery, 'inHumidity' : hum, 'inTemp' : temp, 'dewpoint' : dew, 'barometer' : rawsp + slpoff, 'pressure' : sp, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } return _record @wmr9x8_registerpackettype(typecode=0x06, size=14) def _wmr9x8_in_ext_thermohygrobaro_packet(self, packet): null, status, temp10th, temp1, temp10, temp100etc, hum1, hum10, dew1, dew10, baro1, baro10, baro100, wstatus, null2, slpoff10th, slpoff1, slpoff10, slpoff100, slpoff1000 = self._get_nibble_data(packet[1:]) # @UnusedVariable battery = (status & 0x04) >> 2 hum = hum1 + (hum10 * 10) tempoverunder = bool(temp100etc & 0x04) if not tempoverunder: temp = (temp10th / 10.0) + temp1 + (temp10 * 10) + ((temp100etc & 0x03) * 100) if temp100etc & 0x08: temp = -temp else: temp = None dewunder = bool(status & 0x01) if not dewunder: dew = dew1 + (dew10 * 10) else: dew = None rawsp = ((baro100 & 0x01) << 8) | ((baro10 & 0xF) << 4) | baro1 sp = rawsp + 600 slpoff = (slpoff10th / 10.0) + slpoff1 + (slpoff10 * 10) + (slpoff100 * 100) + (slpoff1000 * 1000) _record = { 'inTempBatteryStatus' : battery, 'inHumidity' : hum, 'inTemp' : temp, 'inDewpoint' : dew, 'barometer' : rawsp+slpoff, 'pressure' : sp, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } return _record @wmr9x8_registerpackettype(typecode=0x0e, size=5) def _wmr9x8_time_packet(self, packet): """The (partial) time packet is not used by weewx. However, the last time is saved in case getTime() is called.""" min1, min10 = self._get_nibble_data(packet[1:]) minutes = min1 + ((min10 & 0x07) * 10) cur = time.gmtime() self.last_time = time.mktime( (cur.tm_year, cur.tm_mon, cur.tm_mday, cur.tm_hour, minutes, 0, cur.tm_wday, cur.tm_yday, cur.tm_isdst)) return None @wmr9x8_registerpackettype(typecode=0x0f, size=9) def _wmr9x8_clock_packet(self, packet): """The clock packet is not used by weewx. However, the last time is saved in case getTime() is called.""" min1, min10, hour1, hour10, day1, day10, month1, month10, year1, year10 = self._get_nibble_data(packet[1:]) year = year1 + (year10 * 10) # The station initializes itself to "1999" as the first year # Thus 99 = 1999, 00 = 2000, 01 = 2001, etc. year += 1900 if year == 99 else 2000 month = month1 + (month10 * 10) day = day1 + (day10 * 10) hour = hour1 + (hour10 * 10) minutes = min1 + ((min10 & 0x07) * 10) cur = time.gmtime() # TODO: not sure if using tm_isdst is correct here self.last_time = time.mktime( (year, month, day, hour, minutes, 0, cur.tm_wday, cur.tm_yday, cur.tm_isdst)) return None @wm918_registerpackettype(typecode=0xcf, size=27) def _wm918_wind_packet(self, packet): """Decode a wind packet. Wind speed will be in m/s""" gust10th, gust1, gust10, dir1, dir10, dir100, avg10th, avg1, avg10, avgdir1, avgdir10, avgdir100 = self._get_nibble_data(packet[1:7]) _chill10, _chill1 = self._get_nibble_data(packet[16:17]) # The console returns wind speeds in m/s. Our metric system requires kph, # so the result needs to be multiplied by 3.6 _record = { 'windSpeed' : ((avg10th / 10.0) + avg1 + (avg10*10)) * 3.6, 'windDir' : avgdir1 + (avgdir10 * 10) + (avgdir100 * 100), 'windGust' : ((gust10th / 10.0) + gust1 + (gust10 * 10)) * 3.6, 'windGustDir' : dir1 + (dir10 * 10) + (dir100 * 100), 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } # Sometimes the station emits a wind gust that is less than the average wind. # Ignore it if this is the case. if _record['windGust'] < _record['windSpeed']: _record['windGust'] = _record['windSpeed'] # Save the windspeed to be used for windchill and apparent temperature self.last_windSpeed = _record['windSpeed'] return _record @wm918_registerpackettype(typecode=0xbf, size=14) def _wm918_rain_packet(self, packet): cur1, cur10, cur100, _stat, yest1, yest10, yest100, yest1000, tot1, tot10, tot100, tot1000 = self._get_nibble_data(packet[1:7]) # It is reported that total rainfall is biased by +0.5 mm _record = { 'rainRate' : (cur1 + (cur10 * 10) + (cur100 * 100)) / 10.0, 'yesterdayRain' : (yest1 + (yest10 * 10) + (yest100 * 100) + (yest1000 * 1000)) / 10.0, 'totalRain' : (tot1 + (tot10 * 10) + (tot100 * 100) + (tot1000 * 1000)) / 10.0, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } # Because the WM does not offer anything like bucket tips, we must # calculate it by looking for the change in total rain. Of course, this # won't work for the very first rain packet. # the WM reports rain rate as rain_rate, rain yesterday (updated by wm at midnight) and total rain since last reset # weewx needs rain since last packet we need to divide by 10 to mimic Vantage reading _record['rain'] = (_record['totalRain'] - self.last_totalRain) if self.last_totalRain is not None else None self.last_totalRain = _record['totalRain'] return _record @wm918_registerpackettype(typecode=0x8f, size=35) def _wm918_humidity_packet(self, packet): hum1, hum10 = self._get_nibble_data(packet[8:9]) humout1, humout10 = self._get_nibble_data(packet[20:21]) hum = hum1 + (hum10 * 10) humout = humout1 + (humout10 * 10) _record = { 'outHumidity' : humout, 'inHumidity' : hum, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } self.last_outHumidity = _record['outHumidity'] # save the humidity for the heat index and apparent temp calculation return _record @wm918_registerpackettype(typecode=0x9f, size=34) def _wm918_therm_packet(self, packet): temp10th, temp1, temp10, null = self._get_nibble_data(packet[1:3]) # @UnusedVariable tempout10th, tempout1, tempout10, null = self._get_nibble_data(packet[16:18]) # @UnusedVariable temp = (temp10th / 10.0) + temp1 + ((temp10 & 0x7) * 10) temp *= -1 if (temp10 & 0x08) else 1 tempout = (tempout10th / 10.0) + tempout1 + ((tempout10 & 0x7) * 10) tempout *= -1 if (tempout10 & 0x08) else 1 _record = { 'inTemp' : temp, 'outTemp' : tempout } try: _record['apparentTemp'] = tempout + 0.33 * ((self.last_outHumidity / 100.0) * 6.105 * exp(17.27 * tempout / (237.7 + tempout))) -0.70 * (self.last_windSpeed / 3.6) - 4.00 except AttributeError: _record['apparentTemp'] = None _record['dateTime'] = int(time.time() + 0.5) _record['units'] = METRIC return _record @wm918_registerpackettype(typecode=0xaf, size=31) def _wm918_baro_dew_packet(self, packet): baro1, baro10, baro100, baro1000, slp10th, slp1, slp10, slp100, slp1000, fmt, prediction, trend, dewin1, dewin10 = self._get_nibble_data(packet[1:8]) # @UnusedVariable dewout1, dewout10 = self._get_nibble_data(packet[18:19]) # @UnusedVariable #dew = dewin1 + (dewin10 * 10) #dewout = dewout1 + (dewout10 *10) sp = baro1 + (baro10 * 10) + (baro100 * 100) + (baro1000 * 1000) slp = (slp10th / 10.0) + slp1 + (slp10 * 10) + (slp100 * 100) + (slp1000 * 1000) _record = { 'barometer' : slp, 'pressure' : sp, #'inDewpoint' : dew, #'outDewpoint' : dewout, #'dewpoint' : dewout, 'dateTime' : int(time.time() + 0.5), 'units' : METRIC } return _record # Define a main entry point for basic testing without the weewx engine. # Invoke this as follows from the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/wmr9x8.py if __name__ == '__main__': import optparse usage = """Usage: %prog --help %prog --version %prog --gen-packets [--port=PORT]""" parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='Display driver version') parser.add_option('--port', dest='port', metavar='PORT', help='The port to use. Default is %s' % DEFAULT_PORT, default=DEFAULT_PORT) parser.add_option('--gen-packets', dest='gen_packets', action='store_true', help="Generate packets indefinitely") (options, args) = parser.parse_args() if options.version: print "WMR9x8 driver version %s" % DRIVER_VERSION exit(0) if options.gen_packets: logging.debug("wmr9x8: Running genLoopPackets()") stn_dict={'port': options.port} stn = WMR9x8(**stn_dict) for packet in stn.genLoopPackets(): print packet
gpl-3.0
lmyrefelt/CouchPotatoServer
libs/requests/packages/charade/sbcsgroupprober.py
2936
3291
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber class SBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), ] hebrewProber = HebrewProber() logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber) visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) self.reset()
gpl-3.0
yitian134/chromium
media/tools/layout_tests/layouttest_analyzer_runner.py
27
8716
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Main function to run the layout test analyzer. The purpose of this script is to run the layout test analyzer for various teams based on the run configuration file in CSV format. The CSV file is based on https://sites.google.com/a/chromium.org/dev/developers/testing/ webkit-layout-tests/layout-test-stats-1. """ import csv import optparse import os import shutil from subprocess import Popen import sys DEFAULT_RUNNER_CONFIG_FILE = os.path.join('runner_config', 'runner_config.csv') # Predefined result/graph directory. DEFAULT_RESULT_DIR = 'result' DEFAULT_GRAPH_DIR = 'graph' DEFAULT_ANNO_DIR = 'anno' def ParseOption(): """Parse command-line options using OptionParser. Returns: an object containing all command-line option information. """ option_parser = optparse.OptionParser() option_parser.add_option('-c', '--runner-config-file-location', dest='runner_config_file_location', help=('Location of the bug annotation file; ' 'file is expected to be in CSV format ' '(default to %default)'), default=DEFAULT_RUNNER_CONFIG_FILE) option_parser.add_option('-x', '--test-group-name', dest='test_group_name', help='A name of test group.') option_parser.add_option('-d', '--result-directory-location', dest='result_directory_location', help=('Name of result directory location ' '(default to %default)'), default=DEFAULT_RESULT_DIR) option_parser.add_option('-p', '--graph-directory-location', dest='graph_directory_location', help=('Name of graph directory location ' '(default to %default)'), default=DEFAULT_GRAPH_DIR) option_parser.add_option('-a', '--anno-directory-location', dest='annotation_directory_location', help=('Name of annotation directory location; ' 'each annotation file should be the same ' 'as test group name with replacement of "/"' 'with "_" (default to %default)'), default=DEFAULT_ANNO_DIR) option_parser.add_option('-b', '--email-appended-text-file-location', dest='email_appended_text_file_location', help=('File location of the email appended text. ' 'The text is appended in the status email. ' '(default to %default and no text is ' 'appended in that case.)'), default=None) option_parser.add_option('-e', '--email-only-change-mode', dest='email_only_change_mode', help=('With this mode, email is sent out ' 'only when there is a change in the ' 'analyzer result compared to the previous ' 'result (off by default)'), action='store_true', default=False) option_parser.add_option('-z', '--issue-detail-mode', dest='issue_detail_mode', help=('With this mode, email includes issue details' ' including links to the flakiness dashboard' ' (off by default)'), action='store_true', default=False) return option_parser.parse_args()[0] def GenerateDashboardHTMLFile(file_name, test_group_list): """Generate dashboard HTML file. Currently, it is simple table that shows all the analyzer results. Args: file_name: the file name of the dashboard. test_group_list: a list of test group names such as 'media' or 'composite'. """ file_object = open(file_name, 'wb') legend_txt = """ <style type="text/css"> th { width: 30px; overflow: hidden; } tr.d0 td { background-color: #CC9999; color: black; text-align: right; width: 30px; overflow: hidden; } tr.d1 td { background-color: #9999CC; color: black; text-align: right; width: 30px; overflow: hidden; } </style> <h2>Chromium Layout Test Analyzer Result</h2> Legend: <ul> <li>#Tests: the number of tests for the given test group <li>#Skipped Tests: the number of tests that are skipped in the <a href='http://svn.webkit.org/repository/webkit/trunk/LayoutTests/platform/\ chromium/test_expectations.txt'>test expectaion file</a> (e.g., BUGWK60877 SKIP : loader/navigation-while-deferring-loads.html = FAIL) <li>#Non-Skipped Failing Tests: the number of tests that appeared in the test expectation file and were not skipped. <li>Failing rate: #NonSkippedFailing / (#Tests - #Skipped) <li>Passing rate: 100 - (Failing rate) </ul> """ file_object.write(legend_txt) file_object.write('<table border="1">') file_object.write('<tr><th>Base Directory</th>') file_object.write('<th>Trend Graph</th>') file_object.write('<th>#Tests</th>') file_object.write('<th>#Skipped Tests</th>') file_object.write('<th>#Non-Skipped Failing Tests</th>') file_object.write('<th>Failing Rate</th>') file_object.write('<th>Passing Rate</th>') file_object.write('<th>Last Revision Number</th>') file_object.write('<th>Last Revision Date</th>') file_object.write('<th>Owner Email</th>') file_object.write('<th>Bug Information</th></tr>\n') test_group_list.sort() for i, test_group in enumerate(test_group_list): file_object.write('<tr class="d' + str(i % 2) + '">\n') file_object.write('<td>' + test_group + '</td>\n') file_object.write('</tr>\n') file_object.write('</table>') file_object.close() def main(): """A main function for the analyzer runner.""" options = ParseOption() run_config_map = {} try: file_object = open(options.runner_config_file_location) except IOError: print 'cannot open runner configuration file %s. Exiting.' % ( options.runner_config_file_location) sys.exit() data = csv.reader(file_object) # Skip the first row since it is a comment/header line. data.next() for row in data: run_config_map[row[0]] = (row[1], row[2]) file_object.close() if options.test_group_name: test_group_list = [options.test_group_name] else: test_group_list = run_config_map.keys() dashboard_file_location = os.path.join(options.graph_directory_location, 'index.html') if not os.path.exists(dashboard_file_location): GenerateDashboardHTMLFile(dashboard_file_location, test_group_list) for test_group in test_group_list: # Prepare the result if it does not exist. # The directory name should be changed to avoid collision # with the file separator. test_group_name_for_data = test_group.replace('/', '_') result_dir = os.path.join(options.result_directory_location, test_group_name_for_data) if not os.path.exists(result_dir): os.mkdir(result_dir) graph_file = os.path.join(options.graph_directory_location, test_group_name_for_data + '.html') if not os.path.exists(graph_file): # Copy the template file. shutil.copy(os.path.join('graph', 'graph.html'), graph_file) os.chmod(graph_file, 0744) anno_file = os.path.join(options.annotation_directory_location, test_group_name_for_data + '.csv') cmd = ('python layouttest_analyzer.py -x %s -d %s -t %s' ' -q %s -a %s ') % ( test_group, result_dir, graph_file, dashboard_file_location, anno_file) if run_config_map[test_group][0]: cmd += '-n ' + run_config_map[test_group][0] + ' ' if run_config_map[test_group][1]: cmd += '-r ' + run_config_map[test_group][1] + ' ' if options.email_appended_text_file_location: cmd += ' -b ' + options.email_appended_text_file_location if options.email_only_change_mode: cmd += ' -c ' if options.issue_detail_mode: cmd += ' -z ' print 'Running ' + cmd proc = Popen(cmd, shell=True) proc.communicate() if '__main__' == __name__: main()
bsd-3-clause
adamlwgriffiths/Pyglet
doc/ext/autosummary/__init__.py
5
17735
# -*- coding: utf-8 -*- """ sphinx.ext.autosummary ~~~~~~~~~~~~~~~~~~~~~~ Sphinx extension that adds an autosummary:: directive, which can be used to generate function/method/attribute/etc. summary lists, similar to those output eg. by Epydoc and other API doc generation tools. An :autolink: role is also provided. autosummary directive --------------------- The autosummary directive has the form:: .. autosummary:: :nosignatures: :toctree: generated/ module.function_1 module.function_2 ... and it generates an output table (containing signatures, optionally) ======================== ============================================= module.function_1(args) Summary line from the docstring of function_1 module.function_2(args) Summary line from the docstring ... ======================== ============================================= If the :toctree: option is specified, files matching the function names are inserted to the toctree with the given prefix: generated/module.function_1 generated/module.function_2 ... Note: The file names contain the module:: or currentmodule:: prefixes. .. seealso:: autosummary_generate.py autolink role ------------- The autolink role functions as ``:obj:`` when the name referred can be resolved to a Python object, and otherwise it becomes simple emphasis. This can be used as the default role to make links 'smart'. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import sys import inspect import posixpath from docutils.parsers.rst import directives from docutils.statemachine import ViewList from docutils import nodes from sphinx import addnodes from sphinx.util.compat import Directive # -- autosummary_toc node ------------------------------------------------------ class autosummary_toc(nodes.comment): pass def process_autosummary_toc(app, doctree): """Insert items described in autosummary:: to the TOC tree, but do not generate the toctree:: list. """ env = app.builder.env crawled = {} def crawl_toc(node, depth=1): crawled[node] = True for j, subnode in enumerate(node): try: if (isinstance(subnode, autosummary_toc) and isinstance(subnode[0], addnodes.toctree)): env.note_toctree(env.docname, subnode[0]) continue except IndexError: continue if not isinstance(subnode, nodes.section): continue if subnode not in crawled: crawl_toc(subnode, depth+1) crawl_toc(doctree) def autosummary_toc_visit_html(self, node): """Hide autosummary toctree list in HTML output.""" raise nodes.SkipNode def autosummary_noop(self, node): pass # -- autosummary_table node ---------------------------------------------------- class autosummary_table(nodes.comment): pass def autosummary_table_visit_html(self, node): """Make the first column of the table non-breaking.""" try: tbody = node[0][0][-1] for row in tbody: col1_entry = row[0] par = col1_entry[0] for j, subnode in enumerate(list(par)): if isinstance(subnode, nodes.Text): new_text = unicode(subnode.astext()) new_text = new_text.replace(u" ", u"\u00a0") par[j] = nodes.Text(new_text) except IndexError: pass # -- autodoc integration ------------------------------------------------------- class FakeDirective: env = {} genopt = {} def get_documenter(obj, parent): """Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to. """ from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \ ModuleDocumenter if inspect.ismodule(obj): # ModuleDocumenter.can_document_member always returns False return ModuleDocumenter # Construct a fake documenter for *parent* if parent is not None: parent_doc_cls = get_documenter(parent, None) else: parent_doc_cls = ModuleDocumenter if hasattr(parent, '__name__'): parent_doc = parent_doc_cls(FakeDirective(), parent.__name__) else: parent_doc = parent_doc_cls(FakeDirective(), "") # Get the corrent documenter class for *obj* classes = [cls for cls in AutoDirective._registry.values() if cls.can_document_member(obj, '', False, parent_doc)] if classes: classes.sort(key=lambda cls: cls.priority) return classes[-1] else: return DataDocumenter # -- .. autosummary:: ---------------------------------------------------------- class Autosummary(Directive): """ Pretty table containing short signatures and summaries of functions etc. autosummary can also optionally generate a hidden toctree:: node. """ required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False has_content = True option_spec = { 'toctree': directives.unchanged, 'nosignatures': directives.flag, 'template': directives.unchanged, } def warn(self, msg): self.warnings.append(self.state.document.reporter.warning( msg, line=self.lineno)) def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: suffix = env.config.source_suffix dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) if docname.endswith(suffix): docname = docname[:-len(suffix)] docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes def get_items(self, names): """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) items = [] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: real_name, obj, parent = import_by_name(name, prefixes=prefixes) except ImportError: self.warn('failed to import %s' % name) items.append((name, '', '', name)) continue # NB. using real_name here is important, since Documenters # handle module prefixes slightly differently documenter = get_documenter(obj, parent)(self, real_name) if not documenter.parse_name(): self.warn('failed to parse name %s' % real_name) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): self.warn('failed to import object %s' % real_name) items.append((display_name, '', '', real_name)) continue # -- Grab the signature sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) sig = sig.replace('*', r'\*') # -- Grab the summary doc = list(documenter.process_doc(documenter.get_doc())) while doc and not doc[0].strip(): doc.pop(0) m = re.search(r"^([A-Z][^A-Z]*?\.\s)", " ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' items.append((display_name, sig, summary, real_name)) return items def get_table(self, items): """Generate a proper list of table nodes for autosummary:: directive. *items* is a list produced by :meth:`get_items`. """ table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'll' table = autosummary_table('') real_table = nodes.table('', classes=['longtable']) table.append(real_table) group = nodes.tgroup('', cols=2) real_table.append(group) group.append(nodes.colspec('', colwidth=10)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row) for name, sig, summary, real_name in items: qualifier = 'obj' if 'nosignatures' not in self.options: col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, sig) else: col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name) col2 = summary append_row(col1, col2) return [table_spec, table] def mangle_signature(sig, max_chars=30): """Reformat a function signature to a more compact form.""" s = re.sub(r"^\((.*)\)$", r"\1", sig).strip() # Strip strings (which can contain things that confuse the code below) s = re.sub(r"\\\\", "", s) s = re.sub(r"\\'", "", s) s = re.sub(r"'[^']*'", "", s) # Parse the signature to arguments + options args = [] opts = [] opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=") while s: m = opt_re.search(s) if not m: # The rest are arguments args = s.split(', ') break opts.insert(0, m.group(2)) s = m.group(1)[:-2] # Produce a more compact signature sig = limited_join(", ", args, max_chars=max_chars-2) if opts: if not sig: sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4) elif len(sig) < max_chars - 4 - 2 - 3: sig += "[, %s]" % limited_join(", ", opts, max_chars=max_chars-len(sig)-4-2) return u"(%s)" % sig def limited_join(sep, items, max_chars=30, overflow_marker="..."): """Join a number of strings to one, limiting the length to *max_chars*. If the string overflows this limit, replace the last fitting item by *overflow_marker*. Returns: joined_string """ full_str = sep.join(items) if len(full_str) < max_chars: return full_str n_chars = 0 n_items = 0 for j, item in enumerate(items): n_chars += len(item) + len(sep) if n_chars < max_chars - len(overflow_marker): n_items += 1 else: break return sep.join(list(items[:n_items]) + [overflow_marker]) # -- Importing items ----------------------------------------------------------- def get_import_prefixes_from_env(env): """ Obtain current Python import prefixes (for `import_by_name`) from ``document.env`` """ prefixes = [None] currmodule = env.temp_data.get('py:module') if currmodule: prefixes.insert(0, currmodule) currclass = env.temp_data.get('py:class') if currclass: if currmodule: prefixes.insert(0, currmodule + "." + currclass) else: prefixes.insert(0, currclass) return prefixes def import_by_name(name, prefixes=[None]): """Import a Python object that has the given *name*, under one of the *prefixes*. The first name that succeeds is used. """ tried = [] for prefix in prefixes: try: if prefix: prefixed_name = '.'.join([prefix, name]) else: prefixed_name = name obj, parent = _import_by_name(prefixed_name) return prefixed_name, obj, parent except ImportError: tried.append(prefixed_name) raise ImportError('no module named %s' % ' or '.join(tried)) def _import_by_name(name): """Import a Python object given its full name.""" try: name_parts = name.split('.') # try first interpret `name` as MODNAME.OBJ modname = '.'.join(name_parts[:-1]) if modname: try: __import__(modname) mod = sys.modules[modname] return getattr(mod, name_parts[-1]), mod except (ImportError, IndexError, AttributeError): pass # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... last_j = 0 modname = None for j in reversed(range(1, len(name_parts)+1)): last_j = j modname = '.'.join(name_parts[:j]) try: __import__(modname) except:# ImportError: continue if modname in sys.modules: break if last_j < len(name_parts): parent = None obj = sys.modules[modname] for obj_name in name_parts[last_j:]: parent = obj obj = getattr(obj, obj_name) return obj, parent else: return sys.modules[modname], None except (ValueError, ImportError, AttributeError, KeyError), e: raise ImportError(*e.args) # -- :autolink: (smart default role) ------------------------------------------- def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]): """Smart linking role. Expands to ':obj:`text`' if `text` is an object that can be imported; otherwise expands to '*text*'. """ env = inliner.document.settings.env r = env.get_domain('py').role('obj')( 'obj', rawtext, etext, lineno, inliner, options, content) pnode = r[0][0] prefixes = get_import_prefixes_from_env(env) try: name, obj, parent = import_by_name(pnode['reftarget'], prefixes) except ImportError: content = pnode[0] r[0][0] = nodes.emphasis(rawtext, content[0].astext(), classes=content['classes']) return r def process_generate_options(app): genfiles = app.config.autosummary_generate ext = app.config.source_suffix if genfiles and not hasattr(genfiles, '__len__'): env = app.builder.env genfiles = [x + ext for x in env.found_docs if os.path.isfile(env.doc2path(x))] if not genfiles: return from generate import generate_autosummary_docs genfiles = [genfile + (not genfile.endswith(ext) and ext or '') for genfile in genfiles] generate_autosummary_docs(genfiles, builder=app.builder, warn=app.warn, info=app.info, suffix=ext, base_path=app.srcdir) def setup(app): # I need autodoc app.setup_extension('sphinx.ext.autodoc') app.add_node(autosummary_toc, html=(autosummary_toc_visit_html, autosummary_noop), latex=(autosummary_noop, autosummary_noop), text=(autosummary_noop, autosummary_noop), man=(autosummary_noop, autosummary_noop), texinfo=(autosummary_noop, autosummary_noop)) app.add_node(autosummary_table, html=(autosummary_table_visit_html, autosummary_noop), latex=(autosummary_noop, autosummary_noop), text=(autosummary_noop, autosummary_noop), man=(autosummary_noop, autosummary_noop), texinfo=(autosummary_noop, autosummary_noop)) app.add_directive('autosummary', Autosummary) app.add_role('autolink', autolink_role) app.connect('doctree-read', process_autosummary_toc) app.connect('builder-inited', process_generate_options) app.add_config_value('autosummary_generate', [], True)
bsd-3-clause
tmxdyf/CouchPotatoServer
libs/pyutil/cache.py
106
27000
# Copyright (c) 2002-2010 Zooko "Zooko" Wilcox-O'Hearn """ This module offers three implementations of an LRUCache, which is a dict that drops items according to a Least-Recently-Used policy if the dict exceeds a fixed maximum size. Warning: if -O optimizations are not turned on then LRUCache performs extensive self-analysis in every function call, which can take minutes and minutes for a large cache. Turn on -O, or comment out ``assert self._assert_invariants()`` """ import operator from assertutil import _assert, precondition from humanreadable import hr class LRUCache: """ An efficient least-recently-used cache. It keeps an LRU queue, and when the number of items in the cache reaches maxsize, it removes the least recently used item. "Looking" at an item, key, or value such as with "has_key()" makes that item become the most recently used item. You can also use "refresh()" to explicitly make an item become the most recently used item. Adding an item that is already in the dict *does* make it the most- recently-used item although it does not change the state of the dict itself. See also SmallLRUCache (below), which is faster in some cases. """ class ItemIterator: def __init__(self, c): self.c = c self.i = c.d[c.hs][2] def __iter__(self): return self def next(self): if self.i is self.c.ts: raise StopIteration k = self.i precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c) (v, p, n,) = self.c.d[k] self.i = n return (k, v,) class KeyIterator: def __init__(self, c): self.c = c self.i = c.d[c.hs][2] def __iter__(self): return self def next(self): if self.i is self.c.ts: raise StopIteration k = self.i precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c) (v, p, n,) = self.c.d[k] self.i = n return k class ValIterator: def __init__(self, c): self.c = c self.i = c.d[c.hs][2] def __iter__(self): return self def next(self): if self.i is self.c.ts: raise StopIteration precondition(self.c.d.has_key(self.i), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) (v, p, n,) = self.c.d[self.i] self.i = n return v class Sentinel: def __init__(self, msg): self.msg = msg def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.msg,) def __init__(self, initialdata={}, maxsize=128): precondition(maxsize > 0) self.m = maxsize+2 # The +2 is for the head and tail nodes. self.d = {} # k: k, v: [v, prev, next,] # the dict self.hs = LRUCache.Sentinel("hs") self.ts = LRUCache.Sentinel("ts") self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. self.update(initialdata) assert self._assert_invariants() def __repr_n__(self, n=None): s = ["{",] try: iter = self.iteritems() x = iter.next() s.append(str(x[0])); s.append(": "); s.append(str(x[1])) i = 1 while (n is None) or (i < n): x = iter.next() s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1])) except StopIteration: pass s.append("}") return ''.join(s) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),) def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),) def _assert_invariants(self): _assert(len(self.d) <= self.m, "Size is required to be <= maxsize.", len(self.d), self.m) _assert((len(self.d) > 2) == (self.d[self.hs][2] is not self.ts) == (self.d[self.ts][1] is not self.hs), "Head and tail point to something other than each other if and only if there is at least one element in the dictionary.", self.hs, self.ts, len(self.d)) foundprevsentinel = 0 foundnextsentinel = 0 for (k, (v, p, n,)) in self.d.iteritems(): _assert(v not in (self.hs, self.ts,)) _assert(p is not self.ts, "A reference to the tail sentinel may not appear in prev.", k, v, p, n) _assert(n is not self.hs, "A reference to the head sentinel may not appear in next.", k, v, p, n) _assert(p in self.d, "Each prev is required to appear as a key in the dict.", k, v, p, n) _assert(n in self.d, "Each next is required to appear as a key in the dict.", k, v, p, n) if p is self.hs: foundprevsentinel += 1 _assert(foundprevsentinel <= 2, "No more than two references to the head sentinel may appear as a prev.", k, v, p, n) if n is self.ts: foundnextsentinel += 1 _assert(foundnextsentinel <= 2, "No more than one reference to the tail sentinel may appear as a next.", k, v, p, n) _assert(foundprevsentinel == 2, "A reference to the head sentinel is required appear as a prev (plus a self-referential reference).") _assert(foundnextsentinel == 2, "A reference to the tail sentinel is required appear as a next (plus a self-referential reference).") count = 0 for (k, v,) in self.iteritems(): _assert(k not in (self.hs, self.ts,)) count += 1 _assert(count == len(self.d)-2, count, len(self.d)) # -2 for the sentinels return True def freshen(self, k, strictkey=False): assert self._assert_invariants() if not self.d.has_key(k): if strictkey: raise KeyError, k return node = self.d[k] # relink self.d[node[1]][2] = node[2] self.d[node[2]][1] = node[1] # move to front hnode = self.d[self.hs] node[1] = self.hs node[2] = hnode[2] hnode[2] = k self.d[node[2]][1] = k assert self._assert_invariants() def iteritems(self): return LRUCache.ItemIterator(self) def itervalues(self): return LRUCache.ValIterator(self) def iterkeys(self): return self.__iter__() def __iter__(self): return LRUCache.KeyIterator(self) def __getitem__(self, key, default=None, strictkey=True): node = self.d.get(key) if not node: if strictkey: raise KeyError, key return default self.freshen(key) return node[0] def __setitem__(self, k, v=None): assert self._assert_invariants() node = self.d.get(k) if node: node[0] = v self.freshen(k) return if len(self.d) == self.m: # If this insert is going to increase the size of the cache to # bigger than maxsize. self.pop() hnode = self.d[self.hs] n = hnode[2] self.d[k] = [v, self.hs, n,] hnode[2] = k self.d[n][1] = k assert self._assert_invariants() return v def __delitem__(self, key, default=None, strictkey=True): """ @param strictkey: True if you want a KeyError in the case that key is not there, False if you want a reference to default in the case that key is not there @param default: the object to return if key is not there; This is ignored if strictkey. @return: the value removed or default if there is not item by that key and strictkey is False """ assert self._assert_invariants() if self.d.has_key(key): node = self.d[key] # relink self.d[node[1]][2] = node[2] self.d[node[2]][1] = node[1] del self.d[key] assert self._assert_invariants() return node[0] elif strictkey: assert self._assert_invariants() raise KeyError, key else: assert self._assert_invariants() return default def has_key(self, key): assert self._assert_invariants() if self.d.has_key(key): self.freshen(key) assert self._assert_invariants() return True else: assert self._assert_invariants() return False def clear(self): assert self._assert_invariants() self.d.clear() self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. assert self._assert_invariants() def update(self, otherdict): """ @return: self """ assert self._assert_invariants() if len(otherdict) >= (self.m-2): # -2 for the sentinel nodes # optimization self.clear() assert self._assert_invariants() i = otherdict.iteritems() try: while len(self.d) < self.m: (k, v,) = i.next() assert self._assert_invariants() self[k] = v assert self._assert_invariants() return self except StopIteration: _assert(False, "Internal error -- this should never have happened since the while loop should have terminated first.") return self for (k, v,) in otherdict.iteritems(): assert self._assert_invariants() self[k] = v assert self._assert_invariants() def pop(self): assert self._assert_invariants() if len(self.d) < 2: # the +2 is for the sentinels raise KeyError, 'popitem(): dictionary is empty' k = self.d[self.ts][1] self.remove(k) assert self._assert_invariants() return k def popitem(self): assert self._assert_invariants() if len(self.d) < 2: # the +2 is for the sentinels raise KeyError, 'popitem(): dictionary is empty' k = self.d[self.ts][1] val = self.remove(k) assert self._assert_invariants() return (k, val,) def keys_unsorted(self): assert self._assert_invariants() t = self.d.copy() del t[self.hs] del t[self.ts] assert self._assert_invariants() return t.keys() def keys(self): res = [None] * len(self) i = 0 for k in self.iterkeys(): res[i] = k i += 1 return res def values_unsorted(self): assert self._assert_invariants() t = self.d.copy() del t[self.hs] del t[self.ts] assert self._assert_invariants() return map(operator.__getitem__, t.values(), [0]*len(t)) def values(self): res = [None] * len(self) i = 0 for v in self.itervalues(): res[i] = v i += 1 return res def items(self): res = [None] * len(self) i = 0 for it in self.iteritems(): res[i] = it i += 1 return res def __len__(self): return len(self.d) - 2 def insert(self, key, val=None): assert self._assert_invariants() result = self.__setitem__(key, val) assert self._assert_invariants() return result def setdefault(self, key, default=None): assert self._assert_invariants() if not self.has_key(key): self[key] = default assert self._assert_invariants() return self[key] def get(self, key, default=None): return self.__getitem__(key, default, strictkey=False) def remove(self, key, default=None, strictkey=True): assert self._assert_invariants() result = self.__delitem__(key, default, strictkey) assert self._assert_invariants() return result class SmallLRUCache(dict): """ SmallLRUCache is faster than LRUCache for small sets. How small? That depends on your machine and which operations you use most often. Use performance profiling to determine whether the cache class that you are using makes any difference to the performance of your program, and if it does, then run "quick_bench()" in test/test_cache.py to see which cache implementation is faster for the size of your datasets. A simple least-recently-used cache. It keeps an LRU queue, and when the number of items in the cache reaches maxsize, it removes the least recently used item. "Looking" at an item or a key such as with "has_key()" makes that item become the most recently used item. You can also use "refresh()" to explicitly make an item become the most recently used item. Adding an item that is already in the dict *does* make it the most- recently-used item although it does not change the state of the dict itself. """ class ItemIterator: def __init__(self, c): self.c = c self.i = 0 def __iter__(self): return self def next(self): precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c) if self.i == len(self.c._lru): raise StopIteration k = self.i self.i += 1 return (k, dict.__getitem__(self.c, k),) class KeyIterator: def __init__(self, c): self.c = c self.i = 0 def __iter__(self): return self def next(self): precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c) if self.i == len(self.c._lru): raise StopIteration k = self.i self.i += 1 return k class ValueIterator: def __init__(self, c): self.c = c self.i = 0 def __iter__(self): return self def next(self): precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c) if self.i == len(self.c._lru): raise StopIteration k = self.i self.i += 1 return dict.__getitem__(self.c, k) def __init__(self, initialdata={}, maxsize=128): dict.__init__(self, initialdata) self._lru = initialdata.keys() # contains keys self._maxsize = maxsize over = len(self) - self._maxsize if over > 0: map(dict.__delitem__, [self]*over, self._lru[:over]) del self._lru[:over] assert self._assert_invariants() def _assert_invariants(self): _assert(len(self._lru) <= self._maxsize, "Size is required to be <= maxsize.") _assert(len(filter(lambda x: dict.has_key(self, x), self._lru)) == len(self._lru), "Each key in self._lru is required to be in dict.", filter(lambda x: not dict.has_key(self, x), self._lru), len(self._lru), self._lru, len(self), self) _assert(len(filter(lambda x: x in self._lru, self.keys())) == len(self), "Each key in dict is required to be in self._lru.", filter(lambda x: x not in self._lru, self.keys()), len(self._lru), self._lru, len(self), self) _assert(len(self._lru) == len(self), "internal consistency", filter(lambda x: x not in self.keys(), self._lru), len(self._lru), self._lru, len(self), self) _assert(len(self._lru) <= self._maxsize, "internal consistency", len(self._lru), self._lru, self._maxsize) return True def insert(self, key, item=None): assert self._assert_invariants() result = self.__setitem__(key, item) assert self._assert_invariants() return result def setdefault(self, key, default=None): assert self._assert_invariants() if not self.has_key(key): self[key] = default assert self._assert_invariants() return self[key] def __setitem__(self, key, item=None): assert self._assert_invariants() if dict.has_key(self, key): self._lru.remove(key) else: if len(self._lru) == self._maxsize: # If this insert is going to increase the size of the cache to bigger than maxsize: killkey = self._lru.pop(0) dict.__delitem__(self, killkey) dict.__setitem__(self, key, item) self._lru.append(key) assert self._assert_invariants() return item def remove(self, key, default=None, strictkey=True): assert self._assert_invariants() result = self.__delitem__(key, default, strictkey) assert self._assert_invariants() return result def __delitem__(self, key, default=None, strictkey=True): """ @param strictkey: True if you want a KeyError in the case that key is not there, False if you want a reference to default in the case that key is not there @param default: the object to return if key is not there; This is ignored if strictkey. @return: the object removed or default if there is not item by that key and strictkey is False """ assert self._assert_invariants() if dict.has_key(self, key): val = dict.__getitem__(self, key) dict.__delitem__(self, key) self._lru.remove(key) assert self._assert_invariants() return val elif strictkey: assert self._assert_invariants() raise KeyError, key else: assert self._assert_invariants() return default def clear(self): assert self._assert_invariants() dict.clear(self) self._lru = [] assert self._assert_invariants() def update(self, otherdict): """ @return: self """ assert self._assert_invariants() if len(otherdict) > self._maxsize: # Handling this special case here makes it possible to implement the # other more common cases faster below. dict.clear(self) self._lru = [] if self._maxsize > (len(otherdict) - self._maxsize): dict.update(self, otherdict) while len(self) > self._maxsize: dict.popitem(self) else: for k, v, in otherdict.iteritems(): if len(self) == self._maxsize: break dict.__setitem__(self, k, v) self._lru = dict.keys(self) assert self._assert_invariants() return self for k in otherdict.iterkeys(): if dict.has_key(self, k): self._lru.remove(k) self._lru.extend(otherdict.keys()) dict.update(self, otherdict) over = len(self) - self._maxsize if over > 0: map(dict.__delitem__, [self]*over, self._lru[:over]) del self._lru[:over] assert self._assert_invariants() return self def has_key(self, key): assert self._assert_invariants() if dict.has_key(self, key): assert key in self._lru, "key: %s, self._lru: %s" % tuple(map(hr, (key, self._lru,))) self._lru.remove(key) self._lru.append(key) assert self._assert_invariants() return True else: assert self._assert_invariants() return False def refresh(self, key, strictkey=True): """ @param strictkey: raise a KeyError exception if key isn't present """ assert self._assert_invariants() if not dict.has_key(self, key): if strictkey: raise KeyError, key return self._lru.remove(key) self._lru.append(key) def popitem(self): if not self._lru: raise KeyError, 'popitem(): dictionary is empty' k = self._lru[-1] obj = self.remove(k) return (k, obj,) class LinkedListLRUCache: """ This is slower and less featureful than LRUCache. It is included here for comparison purposes. Implementation of a length-limited O(1) LRU queue. Built for and used by PyPE: http://pype.sourceforge.net original Copyright 2003 Josiah Carlson. useful methods and _assert_invariant added by Zooko for testing and benchmarking purposes """ class Node: def __init__(self, prev, me): self.prev = prev self.me = me self.next = None def __init__(self, initialdata={}, maxsize=128): self._maxsize = max(maxsize, 1) self.d = {} self.first = None self.last = None for key, value in initialdata.iteritems(): self[key] = value def clear(self): self.d = {} self.first = None self.last = None def update(self, otherdict): for (k, v,) in otherdict.iteritems(): self[k] = v def setdefault(self, key, default=None): if not self.has_key(key): self[key] = default return self[key] def _assert_invariants(self): def lliterkeys(self): cur = self.first while cur != None: cur2 = cur.next yield cur.me[0] cur = cur2 def lllen(self): # Ugh. acc = 0 for x in lliterkeys(self): acc += 1 return acc def llhaskey(self, key): # Ugh. for x in lliterkeys(self): if x is key: return True return False for k in lliterkeys(self): _assert(self.d.has_key(k), "Each key in the linked list is required to be in the dict.", k) for k in self.d.iterkeys(): _assert(llhaskey(self, k), "Each key in the dict is required to be in the linked list.", k) _assert(lllen(self) == len(self.d), "internal consistency", self, self.d) _assert(len(self.d) <= self._maxsize, "Size is required to be <= maxsize.") return True def __contains__(self, obj): return obj in self.d def has_key(self, key): return self.__contains__(key) def __getitem__(self, obj): a = self.d[obj].me self[a[0]] = a[1] return a[1] def get(self, key, default=None, strictkey=False): if not self.has_key(key) and strictkey: raise KeyError, key if self.has_key(key): return self.__getitem__(key) else: return default def __setitem__(self, obj, val): if obj in self.d: del self[obj] nobj = self.Node(self.last, (obj, val)) if self.first is None: self.first = nobj if self.last: self.last.next = nobj self.last = nobj self.d[obj] = nobj if len(self.d) > self._maxsize: if self.first == self.last: self.first = None self.last = None return a = self.first a.next.prev = None self.first = a.next a.next = None del self.d[a.me[0]] del a def insert(self, key, item=None): return self.__setitem__(key, item) def __delitem__(self, obj, default=None, strictkey=True): if self.d.has_key(obj): nobj = self.d[obj] if nobj.prev: nobj.prev.next = nobj.next else: self.first = nobj.next if nobj.next: nobj.next.prev = nobj.prev else: self.last = nobj.prev val = self.d[obj] del self.d[obj] return val.me[1] elif strictkey: raise KeyError, obj else: return default def remove(self, obj, default=None, strictkey=True): return self.__delitem__(obj, default=default, strictkey=strictkey) def __iter__(self): cur = self.first while cur != None: cur2 = cur.next yield cur.me[1] cur = cur2 def iteritems(self): cur = self.first while cur != None: cur2 = cur.next yield cur.me cur = cur2 def iterkeys(self): return iter(self.d) def itervalues(self): for i,j in self.iteritems(): yield j def values(self): l = [] for v in self.itervalues(): l.append(v) return l def keys(self): return self.d.keys() def __len__(self): return self.d.__len__() def popitem(self): i = self.last.me obj = self.remove(i[0]) return obj
gpl-3.0
lihui7115/ChromiumGStreamerBackend
components/test/data/password_manager/automated_tests/environment.py
33
11064
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The testing Environment class. It holds the WebsiteTest instances, provides them with credentials, provides clean browser environment, runs the tests, and gathers the results. """ import os import shutil import time from xml.etree import ElementTree from selenium import webdriver from selenium.webdriver.chrome.options import Options # Message strings to look for in chrome://password-manager-internals. MESSAGE_ASK = "Message: Decision: ASK the user" MESSAGE_SAVE = "Message: Decision: SAVE the password" INTERNALS_PAGE_URL = "chrome://password-manager-internals/" class Environment: """Sets up the testing Environment. """ def __init__(self, chrome_path, chromedriver_path, profile_path, passwords_path, enable_automatic_password_saving): """Creates a new testing Environment, starts Chromedriver. Args: chrome_path: The chrome binary file. chromedriver_path: The chromedriver binary file. profile_path: The chrome testing profile folder. passwords_path: The usernames and passwords file. enable_automatic_password_saving: If True, the passwords are going to be saved without showing the prompt. Raises: IOError: When the passwords file cannot be accessed. ParseError: When the passwords file cannot be parsed. Exception: An exception is raised if |profile_path| folder could not be removed. """ # Cleaning the chrome testing profile folder. if os.path.exists(profile_path): shutil.rmtree(profile_path) options = Options() if enable_automatic_password_saving: options.add_argument("enable-automatic-password-saving") # TODO(vabr): show_prompt is used in WebsiteTest for asserting that # Chrome set-up corresponds to the test type. Remove that knowledge # about Environment from the WebsiteTest. self.show_prompt = not enable_automatic_password_saving options.binary_location = chrome_path options.add_argument("user-data-dir=%s" % profile_path) # The webdriver. It's possible to choose the port the service is going to # run on. If it's left to 0, a free port will be found. self.driver = webdriver.Chrome(chromedriver_path, 0, options) # Password internals page tab/window handle. self.internals_window = self.driver.current_window_handle # An xml tree filled with logins and passwords. self.passwords_tree = ElementTree.parse(passwords_path).getroot() self.website_window = self._OpenNewTab() self.websitetests = [] # Map messages to the number of their appearance in the log. self.message_count = { MESSAGE_ASK: 0, MESSAGE_SAVE: 0 } # A list of (test_name, test_type, test_success, failure_log). self.tests_results = [] def AddWebsiteTest(self, websitetest): """Adds a WebsiteTest to the testing Environment. TODO(vabr): Currently, this is only called at most once for each Environment instance. That is because to run all tests efficiently in parallel, each test gets its own process spawned (outside of Python). That makes sense, but then we should flatten the hierarchy of calls and consider making the 1:1 relation of environment to tests more explicit. Args: websitetest: The WebsiteTest instance to be added. """ websitetest.environment = self # TODO(vabr): Make driver a property of WebsiteTest. websitetest.driver = self.driver if not websitetest.username: username_tag = (self.passwords_tree.find( ".//*[@name='%s']/username" % websitetest.name)) websitetest.username = username_tag.text if not websitetest.password: password_tag = (self.passwords_tree.find( ".//*[@name='%s']/password" % websitetest.name)) websitetest.password = password_tag.text self.websitetests.append(websitetest) def _ClearBrowserDataInit(self): """Opens and resets the chrome://settings/clearBrowserData dialog. It unchecks all checkboxes, and sets the time range to the "beginning of time". """ self.driver.get("chrome://settings-frame/clearBrowserData") time_range_selector = "#clear-browser-data-time-period" # TODO(vabr): Wait until time_range_selector is displayed instead. time.sleep(2) set_time_range = ( "var range = document.querySelector('{0}');".format( time_range_selector) + "range.value = 4" # 4 == the beginning of time ) self.driver.execute_script(set_time_range) all_cboxes_selector = ( "#clear-data-checkboxes [type=\"checkbox\"]") uncheck_all = ( "var checkboxes = document.querySelectorAll('{0}');".format( all_cboxes_selector ) + "for (var i = 0; i < checkboxes.length; ++i) {" " checkboxes[i].checked = false;" "}" ) self.driver.execute_script(uncheck_all) def _ClearDataForCheckbox(self, selector): """Causes the data associated with |selector| to be cleared. Opens chrome://settings/clearBrowserData, unchecks all checkboxes, then checks the one described by |selector|, then clears the corresponding browsing data for the full time range. Args: selector: describes the checkbox through which to delete the data. """ self._ClearBrowserDataInit() check_cookies_and_submit = ( "document.querySelector('{0}').checked = true;".format(selector) + "document.querySelector('#clear-browser-data-commit').click();" ) self.driver.execute_script(check_cookies_and_submit) def _EnablePasswordSaving(self): """Make sure that password manager is enabled.""" # TODO(melandory): We should check why it's off in a first place. # TODO(melandory): Investigate, maybe there is no need to enable it that # often. self.driver.get("chrome://settings-frame") script = "document.getElementById('advanced-settings-expander').click();" self.driver.execute_script(script) # TODO(vabr): Wait until element is displayed instead. time.sleep(2) script = ( "if (!document.querySelector('#password-manager-enabled').checked) {" " document.querySelector('#password-manager-enabled').click();" "}") self.driver.execute_script(script) time.sleep(2) def _OpenNewTab(self): """Open a new tab, and loads the internals page in the old tab. Returns: A handle to the new tab. """ number_old_tabs = len(self.driver.window_handles) # There is no straightforward way to open a new tab with chromedriver. # One work-around is to go to a website, insert a link that is going # to be opened in a new tab, and click on it. self.driver.get("about:blank") a = self.driver.execute_script( "var a = document.createElement('a');" "a.target = '_blank';" "a.href = 'about:blank';" "a.innerHTML = '.';" "document.body.appendChild(a);" "return a;") a.click() while number_old_tabs == len(self.driver.window_handles): time.sleep(1) # Wait until the new tab is opened. new_tab = self.driver.window_handles[-1] self.driver.get(INTERNALS_PAGE_URL) self.driver.switch_to_window(new_tab) return new_tab def _DidStringAppearUntilTimeout(self, strings, timeout): """Checks whether some of |strings| appeared in the current page. Waits for up to |timeout| seconds until at least one of |strings| is shown in the current page. Updates self.message_count with the current number of occurrences of the shown string. Assumes that at most one of |strings| is newly shown. Args: strings: A list of strings to look for. timeout: If any such string does not appear within the first |timeout| seconds, it is considered a no-show. Returns: True if one of |strings| is observed until |timeout|, False otherwise. """ log = self.driver.find_element_by_css_selector("#log-entries") while timeout: for string in strings: count = log.text.count(string) if count > self.message_count[string]: self.message_count[string] = count return True time.sleep(1) timeout -= 1 return False def CheckForNewString(self, strings, string_should_show_up, error): """Checks that |strings| show up on the internals page as it should. Switches to the internals page and looks for a new instances of |strings| being shown up there. It checks that |string_should_show_up| is true if and only if at leas one string from |strings| shows up, and throws an Exception if that check fails. Args: strings: A list of strings to look for in the internals page. string_should_show_up: Whether or not at least one string from |strings| is expected to be shown. error: Error message for the exception. Raises: Exception: (See above.) """ self.driver.switch_to_window(self.internals_window) try: if (self._DidStringAppearUntilTimeout(strings, 15) != string_should_show_up): raise Exception(error) finally: self.driver.switch_to_window(self.website_window) def DeleteCookies(self): """Deletes cookies via the settings page.""" self._ClearDataForCheckbox("#delete-cookies-checkbox") def RunTestsOnSites(self, test_case_name): """Runs the specified test on the known websites. Also saves the test results in the environment. Note that test types differ in their requirements on whether the save password prompt should be displayed. Make sure that such requirements are consistent with the enable_automatic_password_saving argument passed to |self| on construction. Args: test_case_name: A test name which is a method of WebsiteTest. """ self.DeleteCookies() self._ClearDataForCheckbox("#delete-passwords-checkbox") self._EnablePasswordSaving() for websitetest in self.websitetests: successful = True error = "" try: # TODO(melandory): Implement a decorator for WesiteTest methods # which allows to mark them as test cases. And then add a check if # test_case_name is a valid test case. getattr(websitetest, test_case_name)() except Exception as e: successful = False # httplib.CannotSendRequest doesn't define a message, # so type(e).__name__ will at least log exception name as a reason. # TODO(melandory): logging.exception(e) produces meaningful result # for httplib.CannotSendRequest, so we can try to propagate information # that reason is an exception to the logging phase. error = "Exception %s %s" % (type(e).__name__, e) self.tests_results.append( (websitetest.name, test_case_name, successful, error)) def Quit(self): """Shuts down the driver.""" self.driver.quit()
bsd-3-clause
dklyle/trove-dashboard
trove_dashboard/content/database_clusters/panel.py
4
1044
# Copyright (c) 2014 eBay Software Foundation # Copyright 2015 HP Software, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon from openstack_dashboard.dashboards.project import dashboard class Clusters(horizon.Panel): name = _("Clusters") slug = 'database_clusters' permissions = ('openstack.services.database', 'openstack.services.object-store',) dashboard.Project.register(Clusters)
apache-2.0
classgrade/classgrade
classgrade/gradapp/tasks.py
1
8924
# coding=utf-8 import csv import logging from random import shuffle from django.core.mail import send_mail from django.core.exceptions import ObjectDoesNotExist from django.contrib.auth.models import User from xkcdpass import xkcd_password as xp from unidecode import unidecode from classgrade import settings from gradapp.models import Assignmentype, Evalassignment, Evalquestion from gradapp.models import Student, Assignment logger = logging.getLogger(__name__) def get_students(csv_file): """ :param csv_file: csv file with list of students.\ Each row contains: first_name, last_name, email :type csv_file: str :rtype: 2 lists existing_students and new_students [[username, email], ..] """ with open(csv_file) as ff: reader = csv.reader(ff, delimiter=',') existing_students = [] new_students = [] for i, row in enumerate(reader): row = [unidecode(x.strip()) for x in row[:3]] username = "_".join(row[:2]) username = username.replace(" ", "_") email = row[2] try: u = User.objects.get(username=username) Student.objects.get(user=u) existing_students.append([u.username, u.email]) except ObjectDoesNotExist: new_students.append([username, email]) return existing_students, new_students def create_assignment(assignmentype_pk, existing_students, new_students): """ For an assignmentype, create new students; and for new+existing students create their assignment row. :param assignmentype_pk: id of the assignmentype :param existing_students: list with existing students, each element is a list with student username and email. :param new_students: list with new students, each element is a list with student username and email. :type assignmentype_pk: integer :type existing_students: list of list of 2 strings :type new_students: list of list of 2 strings """ words = xp.locate_wordfile() mywords = xp.generate_wordlist(wordfile=words, min_length=4, max_length=6) assignmentype = Assignmentype.objects.get(id=assignmentype_pk) for assignment in assignmentype.assignment_set.all(): assignment.delete() for st in existing_students: student = Student.objects.get(user__username=st[0]) # Get an existing assignment or create it Assignment.objects.get_or_create(student=student, assignmentype=assignmentype) for st in new_students: password = xp.generate_xkcdpassword(mywords, numwords=4) u = User.objects.create_user(st[0], st[1], password) student = Student.objects.create(user=u) # Send email try: email_new_student(u.email, u.username, password) except Exception as e: if hasattr(e, 'traceback'): message = str(e.traceback) else: message = repr(e) logger.error('Not possible to email new student %s: %s' % (u.username, message)) # Create the assignment Assignment.objects.create(student=student, assignmentype=assignmentype) # Create associated Evalassignment log = create_evalassignment(assignmentype.title) logger.info(log) def create_evalassignment(assignmentype_title): """ Create gradapp.models.evalassignment for a assignmentype (given its title) """ try: assignmentype = Assignmentype.objects.\ filter(title=assignmentype_title).first() nb_grading = assignmentype.nb_grading nb_questions = assignmentype.nb_questions assignments = [a for a in assignmentype.assignment_set.all()] shuffle(assignments) nb_assignments = len(assignments) for i, assignment in enumerate(assignments): for igrade in range(nb_grading): to_be_evaluated = assignments[(i + 1 + igrade) % nb_assignments] if to_be_evaluated.student == assignment.student: return 'Oups... too few students compare to the number'\ 'of assignment each student must evaluate' e = Evalassignment.objects.create(assignment=to_be_evaluated, evaluator=assignment.student. user) for iq in range(nb_questions): Evalquestion.objects.create(evalassignment=e, question=(iq + 1)) return 'Evalassignments create for assignment %s' % assignmentype_title except Exception as e: return 'Oups... ' + str(e) def email_new_student(student_email, student_login, student_password): """ Send an email when creating a new student. This email contains his/her login and password, which need to be reset """ subject = 'Peergrade Telecom' message = (u'Bonjour et bienvenu sur Peergrade Telecom.\n\n' u'Pour vous connecter à Peergrade Telecom, votre login et votre ' u'mot de passe sont:\n %s\n %s\nPour plus de sécurité, ' u'modifier votre mot de passe:%s\n\nAdresse du site:%s\n' u'Bon travail,\n' u"L'équipe enseignante." % (student_login, student_password, settings.SITE_URL + 'accounts-reset/recover/', settings.SITE_URL)) send_mail(subject, message, settings.EMAIL_HOST_USER, [student_email], fail_silently=False) def email_confirm_upload_assignment(student_email, assignmentype_title, assignment_filename, deadline_submission): """ Send an email when student uploads a new assignment file """ subject = u'%s: Votre devoir a bien été soumis' % assignmentype_title message = (u'Bien reçu votre devoir %s.\nSi vous le souhaitez, ' u"vous pouvez resoumettre une nouvelle version jusqu'au %s.\n\n" u"Bonne journée,\nL'équipe enseignante" % (assignment_filename, deadline_submission)) send_mail(subject, message, settings.EMAIL_HOST_USER, [student_email], fail_silently=False) def compute_grades_assignmentype(assignmentype_id): """ Compute grades of an assignmentype if question coefficients have been set """ try: assignmentype = Assignmentype.objects.get(id=assignmentype_id) if assignmentype.questions_coeff: questions_coeff = assignmentype.questions_coeff for assignment in assignmentype.assignment_set.all(): for evalassignment in assignment.evalassignment_set.all(): if evalassignment.is_questions_graded: questions_grade = [evalq.grade for evalq in evalassignment. evalquestion_set.all(). order_by('question')] evalassignment.grade_assignment =\ compute_grade(questions_coeff, questions_grade) evalassignment.save() else: return 'Question coeff are not defined' except Exception as e: return 'Oups... ' + str(e) def compute_grade_evalassignment(evalassignment_id): """ Compute evalassignment.grade_assignment of evalassignment(id=evalassignment_id) if question coeff of associated assignmentype have been defined """ try: evalassignment = Evalassignment.objects.get(id=evalassignment_id) questions_coeff = evalassignment.assignment.assignmentype.\ questions_coeff if questions_coeff: questions_grade = [evalq.grade for evalq in evalassignment. evalquestion_set.all().order_by('question')] evalassignment.grade_assignment = compute_grade(questions_coeff, questions_grade) evalassignment.save() else: return 'Question coeff are not defined' except Exception as e: return 'Oups... ' + str(e) def compute_grade(questions_coeff, questions_grade): """ Compute a grade from grade for each question (/2) and associated coefficients :param questions_coeff: list of coefficients for each question :param questions_grade: list of grade for each question """ assign_grade = 0 sum_coeff = 0 for coeff, grade in zip(questions_coeff, questions_grade): assign_grade += coeff * grade sum_coeff += coeff return int(assign_grade * 10 * 100 / sum_coeff) / 100
mit
linearregression/phoenix-1
bin/phoenix_utils.py
9
6567
#!/usr/bin/env python ############################################################################ # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################ import os import fnmatch def find(pattern, classPaths): paths = classPaths.split(os.pathsep) # for each class path for path in paths: # remove * if it's at the end of path if ((path is not None) and (len(path) > 0) and (path[-1] == '*')) : path = path[:-1] for root, dirs, files in os.walk(path): # sort the file names so *-client always precedes *-thin-client files.sort() for name in files: if fnmatch.fnmatch(name, pattern): return os.path.join(root, name) return "" def findFileInPathWithoutRecursion(pattern, path): if not os.path.exists(path): return "" files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))] # sort the file names so *-client always precedes *-thin-client files.sort() for name in files: if fnmatch.fnmatch(name, pattern): return os.path.join(path, name) return "" def setPath(): PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar" PHOENIX_THIN_CLIENT_JAR_PATTERN = "phoenix-*-thin-client.jar" PHOENIX_QUERYSERVER_JAR_PATTERN = "phoenix-server-*-runnable.jar" PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar" # Backward support old env variable PHOENIX_LIB_DIR replaced by PHOENIX_CLASS_PATH global phoenix_class_path phoenix_class_path = os.getenv('PHOENIX_LIB_DIR','') if phoenix_class_path == "": phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','') global hbase_conf_dir # if HBASE_CONF_DIR set explicitly, use that hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH')) if not hbase_conf_dir: # else fall back to HBASE_HOME if os.getenv('HBASE_HOME'): hbase_conf_dir = os.path.join(os.getenv('HBASE_HOME'), "conf") else: # default to pwd hbase_conf_dir = '.' global hbase_conf_path # keep conf_path around for backward compatibility hbase_conf_path = hbase_conf_dir global current_dir current_dir = os.path.dirname(os.path.abspath(__file__)) global phoenix_jar_path phoenix_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*") global phoenix_client_jar phoenix_client_jar = find("phoenix-*-client.jar", phoenix_jar_path) if phoenix_client_jar == "": phoenix_client_jar = findFileInPathWithoutRecursion(PHOENIX_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..")) if phoenix_client_jar == "": phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_class_path) global phoenix_test_jar_path phoenix_test_jar_path = os.path.join(current_dir, "..", "phoenix-core", "target","*") global hadoop_common_jar_path hadoop_common_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*") global hadoop_common_jar hadoop_common_jar = find("hadoop-common*.jar", hadoop_common_jar_path) global hadoop_hdfs_jar_path hadoop_hdfs_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*") global hadoop_hdfs_jar hadoop_hdfs_jar = find("hadoop-hdfs*.jar", hadoop_hdfs_jar_path) global testjar testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_test_jar_path) if testjar == "": testjar = findFileInPathWithoutRecursion(PHOENIX_TESTS_JAR_PATTERN, os.path.join(current_dir, "..")) if testjar == "": testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_class_path) global phoenix_queryserver_jar phoenix_queryserver_jar = find(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server", "target", "*")) if phoenix_queryserver_jar == "": phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib")) if phoenix_queryserver_jar == "": phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..")) global phoenix_thin_client_jar phoenix_thin_client_jar = find(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server-client", "target", "*")) if phoenix_thin_client_jar == "": phoenix_thin_client_jar = findFileInPathWithoutRecursion(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..")) return "" def shell_quote(args): """ Return the platform specific shell quoted string. Handles Windows and *nix platforms. :param args: array of shell arguments :return: shell quoted string """ if os.name == 'nt': import subprocess return subprocess.list2cmdline(args) else: # pipes module isn't available on Windows import pipes return " ".join([pipes.quote(v) for v in args]) if __name__ == "__main__": setPath() print "phoenix_class_path:", phoenix_class_path print "hbase_conf_dir:", hbase_conf_dir print "hbase_conf_path:", hbase_conf_path print "current_dir:", current_dir print "phoenix_jar_path:", phoenix_jar_path print "phoenix_client_jar:", phoenix_client_jar print "phoenix_test_jar_path:", phoenix_test_jar_path print "hadoop_common_jar_path:", hadoop_common_jar_path print "hadoop_common_jar:", hadoop_common_jar print "hadoop_hdfs_jar_path:", hadoop_hdfs_jar_path print "hadoop_hdfs_jar:", hadoop_hdfs_jar print "testjar:", testjar print "phoenix_queryserver_jar:", phoenix_queryserver_jar print "phoenix_thin_client_jar:", phoenix_thin_client_jar
apache-2.0
hehongliang/tensorflow
tensorflow/python/data/experimental/kernel_tests/serialization/csv_dataset_serialization_test.py
21
2558
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the CsvDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import readers from tensorflow.python.platform import test class CsvDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def setUp(self): self._num_cols = 7 self._num_rows = 10 self._num_epochs = 14 self._num_outputs = self._num_rows * self._num_epochs inputs = [ ",".join(str(self._num_cols * j + i) for i in range(self._num_cols)) for j in range(self._num_rows) ] contents = "\n".join(inputs).encode("utf-8") self._filename = os.path.join(self.get_temp_dir(), "file.csv") self._compressed = os.path.join(self.get_temp_dir(), "comp.csv") # GZip compressed with open(self._filename, "wb") as f: f.write(contents) with gzip.GzipFile(self._compressed, "wb") as f: f.write(contents) def ds_func(self, **kwargs): compression_type = kwargs.get("compression_type", None) if compression_type == "GZIP": filename = self._compressed elif compression_type is None: filename = self._filename else: raise ValueError("Invalid compression type:", compression_type) return readers.CsvDataset(filename, **kwargs).repeat(self._num_epochs) def testSerializationCore(self): defs = [[0]] * self._num_cols self.run_core_tests( lambda: self.ds_func(record_defaults=defs, buffer_size=2), lambda: self.ds_func(record_defaults=defs, buffer_size=12), self._num_outputs) if __name__ == "__main__": test.main()
apache-2.0
glatard/nipype
nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py
9
1165
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.converters import OrientScalarVolume def test_OrientScalarVolume_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', position=-2, ), orientation=dict(argstr='--orientation %s', ), outputVolume=dict(argstr='%s', hash_files=False, position=-1, ), terminal_output=dict(nohash=True, ), ) inputs = OrientScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_OrientScalarVolume_outputs(): output_map = dict(outputVolume=dict(position=-1, ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
A-HostMobile/MobileApp
node_modules/node-gyp/gyp/pylib/gyp/__init__.py
1524
22178
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import gyp.input import optparse import os.path import re import shlex import sys import traceback from gyp.common import GypError # Default debug modes for GYP debug = {} # List of "official" debug modes, but you can use anything you like. DEBUG_GENERAL = 'general' DEBUG_VARIABLES = 'variables' DEBUG_INCLUDES = 'includes' def DebugOutput(mode, message, *args): if 'all' in gyp.debug or mode in gyp.debug: ctx = ('unknown', 0, 'unknown') try: f = traceback.extract_stack(limit=2) if f: ctx = f[0][:3] except: pass if args: message %= args print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]), ctx[1], ctx[2], message) def FindBuildFiles(): extension = '.gyp' files = os.listdir(os.getcwd()) build_files = [] for file in files: if file.endswith(extension): build_files.append(file) return build_files def Load(build_files, format, default_variables={}, includes=[], depth='.', params=None, check=False, circular_check=True, duplicate_basename_check=True): """ Loads one or more specified build files. default_variables and includes will be copied before use. Returns the generator for the specified format and the data returned by loading the specified build files. """ if params is None: params = {} if '-' in format: format, params['flavor'] = format.split('-', 1) default_variables = copy.copy(default_variables) # Default variables provided by this program and its modules should be # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace, # avoiding collisions with user and automatic variables. default_variables['GENERATOR'] = format default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '') # Format can be a custom python file, or by default the name of a module # within gyp.generator. if format.endswith('.py'): generator_name = os.path.splitext(format)[0] path, generator_name = os.path.split(generator_name) # Make sure the path to the custom generator is in sys.path # Don't worry about removing it once we are done. Keeping the path # to each generator that is used in sys.path is likely harmless and # arguably a good idea. path = os.path.abspath(path) if path not in sys.path: sys.path.insert(0, path) else: generator_name = 'gyp.generator.' + format # These parameters are passed in order (as opposed to by key) # because ActivePython cannot handle key parameters to __import__. generator = __import__(generator_name, globals(), locals(), generator_name) for (key, val) in generator.generator_default_variables.items(): default_variables.setdefault(key, val) # Give the generator the opportunity to set additional variables based on # the params it will receive in the output phase. if getattr(generator, 'CalculateVariables', None): generator.CalculateVariables(default_variables, params) # Give the generator the opportunity to set generator_input_info based on # the params it will receive in the output phase. if getattr(generator, 'CalculateGeneratorInputInfo', None): generator.CalculateGeneratorInputInfo(params) # Fetch the generator specific info that gets fed to input, we use getattr # so we can default things and the generators only have to provide what # they need. generator_input_info = { 'non_configuration_keys': getattr(generator, 'generator_additional_non_configuration_keys', []), 'path_sections': getattr(generator, 'generator_additional_path_sections', []), 'extra_sources_for_rules': getattr(generator, 'generator_extra_sources_for_rules', []), 'generator_supports_multiple_toolsets': getattr(generator, 'generator_supports_multiple_toolsets', False), 'generator_wants_static_library_dependencies_adjusted': getattr(generator, 'generator_wants_static_library_dependencies_adjusted', True), 'generator_wants_sorted_dependencies': getattr(generator, 'generator_wants_sorted_dependencies', False), 'generator_filelist_paths': getattr(generator, 'generator_filelist_paths', None), } # Process the input specific to this generator. result = gyp.input.Load(build_files, default_variables, includes[:], depth, generator_input_info, check, circular_check, duplicate_basename_check, params['parallel'], params['root_targets']) return [generator] + result def NameValueListToDict(name_value_list): """ Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary of the pairs. If a string is simply NAME, then the value in the dictionary is set to True. If VALUE can be converted to an integer, it is. """ result = { } for item in name_value_list: tokens = item.split('=', 1) if len(tokens) == 2: # If we can make it an int, use that, otherwise, use the string. try: token_value = int(tokens[1]) except ValueError: token_value = tokens[1] # Set the variable to the supplied value. result[tokens[0]] = token_value else: # No value supplied, treat it as a boolean and set it. result[tokens[0]] = True return result def ShlexEnv(env_name): flags = os.environ.get(env_name, []) if flags: flags = shlex.split(flags) return flags def FormatOpt(opt, value): if opt.startswith('--'): return '%s=%s' % (opt, value) return opt + value def RegenerateAppendFlag(flag, values, predicate, env_name, options): """Regenerate a list of command line flags, for an option of action='append'. The |env_name|, if given, is checked in the environment and used to generate an initial list of options, then the options that were specified on the command line (given in |values|) are appended. This matches the handling of environment variables and command line flags where command line flags override the environment, while not requiring the environment to be set when the flags are used again. """ flags = [] if options.use_environment and env_name: for flag_value in ShlexEnv(env_name): value = FormatOpt(flag, predicate(flag_value)) if value in flags: flags.remove(value) flags.append(value) if values: for flag_value in values: flags.append(FormatOpt(flag, predicate(flag_value))) return flags def RegenerateFlags(options): """Given a parsed options object, and taking the environment variables into account, returns a list of flags that should regenerate an equivalent options object (even in the absence of the environment variables.) Any path options will be normalized relative to depth. The format flag is not included, as it is assumed the calling generator will set that as appropriate. """ def FixPath(path): path = gyp.common.FixIfRelativePath(path, options.depth) if not path: return os.path.curdir return path def Noop(value): return value # We always want to ignore the environment when regenerating, to avoid # duplicate or changed flags in the environment at the time of regeneration. flags = ['--ignore-environment'] for name, metadata in options._regeneration_metadata.iteritems(): opt = metadata['opt'] value = getattr(options, name) value_predicate = metadata['type'] == 'path' and FixPath or Noop action = metadata['action'] env_name = metadata['env_name'] if action == 'append': flags.extend(RegenerateAppendFlag(opt, value, value_predicate, env_name, options)) elif action in ('store', None): # None is a synonym for 'store'. if value: flags.append(FormatOpt(opt, value_predicate(value))) elif options.use_environment and env_name and os.environ.get(env_name): flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name)))) elif action in ('store_true', 'store_false'): if ((action == 'store_true' and value) or (action == 'store_false' and not value)): flags.append(opt) elif options.use_environment and env_name: print >>sys.stderr, ('Warning: environment regeneration unimplemented ' 'for %s flag %r env_name %r' % (action, opt, env_name)) else: print >>sys.stderr, ('Warning: regeneration unimplemented for action %r ' 'flag %r' % (action, opt)) return flags class RegeneratableOptionParser(optparse.OptionParser): def __init__(self): self.__regeneratable_options = {} optparse.OptionParser.__init__(self) def add_option(self, *args, **kw): """Add an option to the parser. This accepts the same arguments as OptionParser.add_option, plus the following: regenerate: can be set to False to prevent this option from being included in regeneration. env_name: name of environment variable that additional values for this option come from. type: adds type='path', to tell the regenerator that the values of this option need to be made relative to options.depth """ env_name = kw.pop('env_name', None) if 'dest' in kw and kw.pop('regenerate', True): dest = kw['dest'] # The path type is needed for regenerating, for optparse we can just treat # it as a string. type = kw.get('type') if type == 'path': kw['type'] = 'string' self.__regeneratable_options[dest] = { 'action': kw.get('action'), 'type': type, 'env_name': env_name, 'opt': args[0], } optparse.OptionParser.add_option(self, *args, **kw) def parse_args(self, *args): values, args = optparse.OptionParser.parse_args(self, *args) values._regeneration_metadata = self.__regeneratable_options return values, args def gyp_main(args): my_name = os.path.basename(sys.argv[0]) parser = RegeneratableOptionParser() usage = 'usage: %s [options ...] [build_file ...]' parser.set_usage(usage.replace('%s', '%prog')) parser.add_option('--build', dest='configs', action='append', help='configuration for build after project generation') parser.add_option('--check', dest='check', action='store_true', help='check format of gyp files') parser.add_option('--config-dir', dest='config_dir', action='store', env_name='GYP_CONFIG_DIR', default=None, help='The location for configuration files like ' 'include.gypi.') parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE', action='append', default=[], help='turn on a debugging ' 'mode for debugging GYP. Supported modes are "variables", ' '"includes" and "general" or "all" for all of them.') parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL', env_name='GYP_DEFINES', help='sets variable VAR to value VAL') parser.add_option('--depth', dest='depth', metavar='PATH', type='path', help='set DEPTH gyp variable to a relative path to PATH') parser.add_option('-f', '--format', dest='formats', action='append', env_name='GYP_GENERATORS', regenerate=False, help='output formats to generate') parser.add_option('-G', dest='generator_flags', action='append', default=[], metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS', help='sets generator flag FLAG to VAL') parser.add_option('--generator-output', dest='generator_output', action='store', default=None, metavar='DIR', type='path', env_name='GYP_GENERATOR_OUTPUT', help='puts generated build files under DIR') parser.add_option('--ignore-environment', dest='use_environment', action='store_false', default=True, regenerate=False, help='do not read options from environment variables') parser.add_option('-I', '--include', dest='includes', action='append', metavar='INCLUDE', type='path', help='files to include in all loaded .gyp files') # --no-circular-check disables the check for circular relationships between # .gyp files. These relationships should not exist, but they've only been # observed to be harmful with the Xcode generator. Chromium's .gyp files # currently have some circular relationships on non-Mac platforms, so this # option allows the strict behavior to be used on Macs and the lenient # behavior to be used elsewhere. # TODO(mark): Remove this option when http://crbug.com/35878 is fixed. parser.add_option('--no-circular-check', dest='circular_check', action='store_false', default=True, regenerate=False, help="don't check for circular relationships between files") # --no-duplicate-basename-check disables the check for duplicate basenames # in a static_library/shared_library project. Visual C++ 2008 generator # doesn't support this configuration. Libtool on Mac also generates warnings # when duplicate basenames are passed into Make generator on Mac. # TODO(yukawa): Remove this option when these legacy generators are # deprecated. parser.add_option('--no-duplicate-basename-check', dest='duplicate_basename_check', action='store_false', default=True, regenerate=False, help="don't check for duplicate basenames") parser.add_option('--no-parallel', action='store_true', default=False, help='Disable multiprocessing') parser.add_option('-S', '--suffix', dest='suffix', default='', help='suffix to add to generated files') parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store', default=None, metavar='DIR', type='path', help='directory to use as the root of the source tree') parser.add_option('-R', '--root-target', dest='root_targets', action='append', metavar='TARGET', help='include only TARGET and its deep dependencies') options, build_files_arg = parser.parse_args(args) build_files = build_files_arg # Set up the configuration directory (defaults to ~/.gyp) if not options.config_dir: home = None home_dot_gyp = None if options.use_environment: home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None) if home_dot_gyp: home_dot_gyp = os.path.expanduser(home_dot_gyp) if not home_dot_gyp: home_vars = ['HOME'] if sys.platform in ('cygwin', 'win32'): home_vars.append('USERPROFILE') for home_var in home_vars: home = os.getenv(home_var) if home != None: home_dot_gyp = os.path.join(home, '.gyp') if not os.path.exists(home_dot_gyp): home_dot_gyp = None else: break else: home_dot_gyp = os.path.expanduser(options.config_dir) if home_dot_gyp and not os.path.exists(home_dot_gyp): home_dot_gyp = None if not options.formats: # If no format was given on the command line, then check the env variable. generate_formats = [] if options.use_environment: generate_formats = os.environ.get('GYP_GENERATORS', []) if generate_formats: generate_formats = re.split(r'[\s,]', generate_formats) if generate_formats: options.formats = generate_formats else: # Nothing in the variable, default based on platform. if sys.platform == 'darwin': options.formats = ['xcode'] elif sys.platform in ('win32', 'cygwin'): options.formats = ['msvs'] else: options.formats = ['make'] if not options.generator_output and options.use_environment: g_o = os.environ.get('GYP_GENERATOR_OUTPUT') if g_o: options.generator_output = g_o options.parallel = not options.no_parallel for mode in options.debug: gyp.debug[mode] = 1 # Do an extra check to avoid work when we're not debugging. if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, 'running with these options:') for option, value in sorted(options.__dict__.items()): if option[0] == '_': continue if isinstance(value, basestring): DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value) else: DebugOutput(DEBUG_GENERAL, " %s: %s", option, value) if not build_files: build_files = FindBuildFiles() if not build_files: raise GypError((usage + '\n\n%s: error: no build_file') % (my_name, my_name)) # TODO(mark): Chromium-specific hack! # For Chromium, the gyp "depth" variable should always be a relative path # to Chromium's top-level "src" directory. If no depth variable was set # on the command line, try to find a "src" directory by looking at the # absolute path to each build file's directory. The first "src" component # found will be treated as though it were the path used for --depth. if not options.depth: for build_file in build_files: build_file_dir = os.path.abspath(os.path.dirname(build_file)) build_file_dir_components = build_file_dir.split(os.path.sep) components_len = len(build_file_dir_components) for index in xrange(components_len - 1, -1, -1): if build_file_dir_components[index] == 'src': options.depth = os.path.sep.join(build_file_dir_components) break del build_file_dir_components[index] # If the inner loop found something, break without advancing to another # build file. if options.depth: break if not options.depth: raise GypError('Could not automatically locate src directory. This is' 'a temporary Chromium feature that will be removed. Use' '--depth as a workaround.') # If toplevel-dir is not set, we assume that depth is the root of our source # tree. if not options.toplevel_dir: options.toplevel_dir = options.depth # -D on the command line sets variable defaults - D isn't just for define, # it's for default. Perhaps there should be a way to force (-F?) a # variable's value so that it can't be overridden by anything else. cmdline_default_variables = {} defines = [] if options.use_environment: defines += ShlexEnv('GYP_DEFINES') if options.defines: defines += options.defines cmdline_default_variables = NameValueListToDict(defines) if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, "cmdline_default_variables: %s", cmdline_default_variables) # Set up includes. includes = [] # If ~/.gyp/include.gypi exists, it'll be forcibly included into every # .gyp file that's loaded, before anything else is included. if home_dot_gyp != None: default_include = os.path.join(home_dot_gyp, 'include.gypi') if os.path.exists(default_include): print 'Using overrides found in ' + default_include includes.append(default_include) # Command-line --include files come after the default include. if options.includes: includes.extend(options.includes) # Generator flags should be prefixed with the target generator since they # are global across all generator runs. gen_flags = [] if options.use_environment: gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS') if options.generator_flags: gen_flags += options.generator_flags generator_flags = NameValueListToDict(gen_flags) if DEBUG_GENERAL in gyp.debug.keys(): DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags) # Generate all requested formats (use a set in case we got one format request # twice) for format in set(options.formats): params = {'options': options, 'build_files': build_files, 'generator_flags': generator_flags, 'cwd': os.getcwd(), 'build_files_arg': build_files_arg, 'gyp_binary': sys.argv[0], 'home_dot_gyp': home_dot_gyp, 'parallel': options.parallel, 'root_targets': options.root_targets, 'target_arch': cmdline_default_variables.get('target_arch', '')} # Start with the default variables from the command line. [generator, flat_list, targets, data] = Load( build_files, format, cmdline_default_variables, includes, options.depth, params, options.check, options.circular_check, options.duplicate_basename_check) # TODO(mark): Pass |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. # NOTE: flat_list is the flattened dependency graph specifying the order # that targets may be built. Build systems that operate serially or that # need to have dependencies defined before dependents reference them should # generate targets in the order specified in flat_list. generator.GenerateOutput(flat_list, targets, data, params) if options.configs: valid_configs = targets[flat_list[0]]['configurations'].keys() for conf in options.configs: if conf not in valid_configs: raise GypError('Invalid config specified via --build: %s' % conf) generator.PerformBuild(data, options.configs, params) # Done return 0 def main(args): try: return gyp_main(args) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return 1 # NOTE: setuptools generated console_scripts calls function with no arguments def script_main(): return main(sys.argv[1:]) if __name__ == '__main__': sys.exit(script_main())
apache-2.0
jzoldak/edx-platform
common/djangoapps/student/tests/test_auto_auth.py
9
11047
from django.test import TestCase from django.test.client import Client from django.contrib.auth.models import User from django.conf import settings from django_comment_common.models import ( Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_STUDENT) from django_comment_common.utils import seed_permissions_roles from student.models import anonymous_id_for_user, CourseEnrollment, UserProfile from util.testing import UrlResetMixin from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locator import CourseLocator from mock import patch import ddt import json class AutoAuthTestCase(UrlResetMixin, TestCase): """ Base class for AutoAuth Tests that properly resets the urls.py """ URLCONF_MODULES = ['student.urls'] @ddt.ddt class AutoAuthEnabledTestCase(AutoAuthTestCase): """ Tests for the Auto auth view that we have for load testing. """ COURSE_ID_MONGO = 'edX/Test101/2014_Spring' COURSE_ID_SPLIT = 'course-v1:edX+Test101+2014_Spring' COURSE_IDS_DDT = ( (COURSE_ID_MONGO, SlashSeparatedCourseKey.from_deprecated_string(COURSE_ID_MONGO)), (COURSE_ID_SPLIT, SlashSeparatedCourseKey.from_deprecated_string(COURSE_ID_SPLIT)), (COURSE_ID_MONGO, CourseLocator.from_string(COURSE_ID_MONGO)), (COURSE_ID_SPLIT, CourseLocator.from_string(COURSE_ID_SPLIT)), ) @patch.dict("django.conf.settings.FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": True}) def setUp(self): # Patching the settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] # value affects the contents of urls.py, # so we need to call super.setUp() which reloads urls.py (because # of the UrlResetMixin) super(AutoAuthEnabledTestCase, self).setUp() self.url = '/auto_auth' self.client = Client() def test_create_user(self): """ Test that user gets created when visiting the page. """ self._auto_auth() self.assertEqual(User.objects.count(), 1) user = User.objects.all()[0] self.assertTrue(user.is_active) self.assertFalse(user.profile.requires_parental_consent()) def test_create_same_user(self): self._auto_auth({'username': 'test'}) self._auto_auth({'username': 'test'}) self.assertEqual(User.objects.count(), 1) def test_create_multiple_users(self): """ Test to make sure multiple users are created. """ self._auto_auth() self.client.logout() self._auto_auth() self.assertEqual(User.objects.all().count(), 2) def test_create_defined_user(self): """ Test that the user gets created with the correct attributes when they are passed as parameters on the auto-auth page. """ self._auto_auth({ 'username': 'robot', 'password': 'test', 'email': '[email protected]', 'full_name': "Robot Name" }) # Check that the user has the correct info user = User.objects.get(username='robot') self.assertEqual(user.username, 'robot') self.assertTrue(user.check_password('test')) self.assertEqual(user.email, '[email protected]') # Check that the user has a profile user_profile = UserProfile.objects.get(user=user) self.assertEqual(user_profile.name, "Robot Name") # By default, the user should not be global staff self.assertFalse(user.is_staff) def test_create_staff_user(self): # Create a staff user self._auto_auth({'username': 'test', 'staff': 'true'}) user = User.objects.get(username='test') self.assertTrue(user.is_staff) # Revoke staff privileges self._auto_auth({'username': 'test', 'staff': 'false'}) user = User.objects.get(username='test') self.assertFalse(user.is_staff) @ddt.data(*COURSE_IDS_DDT) @ddt.unpack def test_course_enrollment(self, course_id, course_key): # Create a user and enroll in a course self._auto_auth({'username': 'test', 'course_id': course_id}) # Check that a course enrollment was created for the user self.assertEqual(CourseEnrollment.objects.count(), 1) enrollment = CourseEnrollment.objects.get(course_id=course_key) self.assertEqual(enrollment.user.username, "test") @ddt.data(*COURSE_IDS_DDT) @ddt.unpack def test_double_enrollment(self, course_id, course_key): # Create a user and enroll in a course self._auto_auth({'username': 'test', 'course_id': course_id}) # Make the same call again, re-enrolling the student in the same course self._auto_auth({'username': 'test', 'course_id': course_id}) # Check that only one course enrollment was created for the user self.assertEqual(CourseEnrollment.objects.count(), 1) enrollment = CourseEnrollment.objects.get(course_id=course_key) self.assertEqual(enrollment.user.username, "test") @ddt.data(*COURSE_IDS_DDT) @ddt.unpack def test_set_roles(self, course_id, course_key): seed_permissions_roles(course_key) course_roles = dict((r.name, r) for r in Role.objects.filter(course_id=course_key)) self.assertEqual(len(course_roles), 4) # sanity check # Student role is assigned by default on course enrollment. self._auto_auth({'username': 'a_student', 'course_id': course_id}) user = User.objects.get(username='a_student') user_roles = user.roles.all() self.assertEqual(len(user_roles), 1) self.assertEqual(user_roles[0], course_roles[FORUM_ROLE_STUDENT]) self.client.logout() self._auto_auth({'username': 'a_moderator', 'course_id': course_id, 'roles': 'Moderator'}) user = User.objects.get(username='a_moderator') user_roles = user.roles.all() self.assertEqual( set(user_roles), set([course_roles[FORUM_ROLE_STUDENT], course_roles[FORUM_ROLE_MODERATOR]])) # check multiple roles work. self.client.logout() self._auto_auth({ 'username': 'an_admin', 'course_id': course_id, 'roles': '{},{}'.format(FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR) }) user = User.objects.get(username='an_admin') user_roles = user.roles.all() self.assertEqual( set(user_roles), set([course_roles[FORUM_ROLE_STUDENT], course_roles[FORUM_ROLE_MODERATOR], course_roles[FORUM_ROLE_ADMINISTRATOR]])) @ddt.data(*COURSE_IDS_DDT) @ddt.unpack def test_json_response(self, course_id, course_key): # pylint: disable=unused-argument """Verify that we can get JSON back from the auto_auth page.""" response = self._auto_auth(HTTP_ACCEPT='application/json') response_data = json.loads(response.content) for key in ['created_status', 'username', 'email', 'password', 'user_id', 'anonymous_id']: self.assertIn(key, response_data) user = User.objects.get(username=response_data['username']) self.assertDictContainsSubset( { 'created_status': "Logged in", 'anonymous_id': anonymous_id_for_user(user, None), }, response_data ) @ddt.data(*COURSE_IDS_DDT) @ddt.unpack def test_redirect_to_course(self, course_id, course_key): # Create a user and enroll in a course response = self._auto_auth({ 'username': 'test', 'course_id': course_id, 'redirect': True, 'staff': 'true', }, status_code=302) # Check that a course enrollment was created for the user self.assertEqual(CourseEnrollment.objects.count(), 1) enrollment = CourseEnrollment.objects.get(course_id=course_key) self.assertEqual(enrollment.user.username, "test") # Check that the redirect was to the course info/outline page if settings.ROOT_URLCONF == 'lms.urls': url_pattern = '/info' else: url_pattern = '/course/{}'.format(unicode(course_key)) self.assertTrue(response.url.endswith(url_pattern)) # pylint: disable=no-member def test_redirect_to_main(self): # Create user and redirect to 'home' (cms) or 'dashboard' (lms) response = self._auto_auth({ 'username': 'test', 'redirect': True, 'staff': 'true', }, status_code=302) # Check that the redirect was to either /dashboard or /home if settings.ROOT_URLCONF == 'lms.urls': url_pattern = '/dashboard' else: url_pattern = '/home' self.assertTrue(response.url.endswith(url_pattern)) # pylint: disable=no-member def test_redirect_to_specified(self): # Create user and redirect to specified url url_pattern = '/u/test#about_me' response = self._auto_auth({ 'username': 'test', 'redirect_to': url_pattern, 'staff': 'true', }, status_code=302) self.assertTrue(response.url.endswith(url_pattern)) # pylint: disable=no-member def _auto_auth(self, params=None, status_code=None, **kwargs): """ Make a request to the auto-auth end-point and check that the response is successful. Arguments: params (dict): Dict of params to pass to the auto_auth view kwargs: Passed directly to the test client's get method. Returns response: The response object for the auto_auth page. """ params = params or {} response = self.client.get(self.url, params, **kwargs) expected_status_code = status_code if status_code else 200 self.assertEqual(response.status_code, expected_status_code) # Check that session and CSRF are set in the response for cookie in ['csrftoken', 'sessionid']: self.assertIn(cookie, response.cookies) # pylint: disable=maybe-no-member self.assertTrue(response.cookies[cookie].value) # pylint: disable=maybe-no-member return response class AutoAuthDisabledTestCase(AutoAuthTestCase): """ Test that the page is inaccessible with default settings """ @patch.dict("django.conf.settings.FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": False}) def setUp(self): # Patching the settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] # value affects the contents of urls.py, # so we need to call super.setUp() which reloads urls.py (because # of the UrlResetMixin) super(AutoAuthDisabledTestCase, self).setUp() self.url = '/auto_auth' self.client = Client() def test_auto_auth_disabled(self): """ Make sure automatic authentication is disabled. """ response = self.client.get(self.url) self.assertEqual(response.status_code, 404)
agpl-3.0
drhee/toxoMine
intermine/python/tests/test_templates.py
2
7133
import unittest from test import WebserviceTest from intermine.webservice import * from intermine.query import Template from intermine.constraints import TemplateConstraint class TestTemplates(WebserviceTest): # pragma: no cover def setUp(self): self.service = Service(self.get_test_root()) def testGetTemplate(self): """Should be able to get a template from the webservice, if it exists, and get its results""" self.assertEqual(len(self.service.templates), 12) t = self.service.get_template("MultiValueConstraints") self.assertTrue(isinstance(t, Template)) expected = "[<TemplateMultiConstraint: Employee.name ONE OF [u'Dick', u'Jane', u'Timmy, the Loyal German-Shepherd'] (editable, locked)>]" self.assertEqual(t.editable_constraints.__repr__(), expected) expected = [[u'foo', u'bar', u'baz'], [123, 1.23, -1.23], [True, False, None]] attempts = 0 def do_tests(error=None): if attempts < 5: try: self.assertEqual(t.get_results_list("list"), expected) except IOError, e: do_tests(e) else: raise RuntimeError("Error connecting to " + self.query.service.root, error) do_tests() try: self.service.get_template("Non_Existant") self.fail("No ServiceError raised by non-existant template") except ServiceError, ex: self.assertEqual(ex.message, "There is no template called 'Non_Existant' at this service") def testIrrelevantSO(self): """Should fix up bad sort orders and logic when parsing from xml""" model = self.service.model xml = '''<template name="bad_so"><query name="bad_so" model="testmodel" view="Employee.name Employee.age" sortOrder="Employee.fullTime ASC"/></template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_sort_order()), "Employee.name asc") xml = '''<template name="bad_so"><query name="bad_so" model="testmodel" view="Employee.name Employee.age" sortOrder="Employee.fullTime"/></template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_sort_order()), "Employee.name asc") def testIrrelevantConstraintLogic(self): """Should fix up bad logic""" model = self.service.model xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A and B and C"/></template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A and B or (D and E) and C"/></template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A or B or (D and E) and C"> <constraint path="Employee.name" op="IS NULL"/><constraint path="Employee.age" op="IS NOT NULL"/> </query> </template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "A or B") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A or B or (D and E) and C"> <constraint path="Employee.name" op="IS NULL"/><constraint path="Employee.age" op="IS NOT NULL"/><constraint path="Employee.fullTime" op="=" value="true"/> </query> </template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "(A or B) and C") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A or B or (D and E) or C"> <constraint path="Employee.name" op="IS NULL"/><constraint path="Employee.age" op="IS NOT NULL"/><constraint path="Employee.fullTime" op="=" value="true"/> </query> </template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "A or B or C") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A or B and (D and E) or C"> <constraint path="Employee.name" op="IS NULL"/><constraint path="Employee.age" op="IS NOT NULL"/><constraint path="Employee.fullTime" op="=" value="true"/> </query> </template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "(A or B) and C") xml = '''<template name="bad_cl"><query name="bad_cl" model="testmodel" view="Employee.name Employee.age" constraintLogic="A or B or (D and E) and C"> <constraint path="Employee.name" op="IS NULL"/><constraint path="Employee.age" op="IS NOT NULL"/> <constraint path="Employee.fullTime" op="=" value="true"/><constraint path="Employee.name" op="IS NULL"/> </query> </template>''' t = Template.from_xml(xml, model) self.assertEqual(str(t.get_logic()), "(A or B or D) and C") def testTemplateConstraintParsing(self): """Should be able to parse template constraints""" t = self.service.get_template("UneditableConstraints") self.assertEqual(len(t.constraints), 2) self.assertEqual(len(t.editable_constraints), 1) expected = '[<TemplateBinaryConstraint: Company.name = Woolies (editable, locked)>]' self.assertEqual(expected, repr(t.editable_constraints)) self.assertEqual('<TemplateBinaryConstraint: Company.departments.name = Farm Supplies (non-editable, locked)>', repr(t.get_constraint("B"))) t2 = self.service.get_template("SwitchableConstraints") self.assertEqual(len(t2.editable_constraints), 3) con = t2.get_constraint("A") self.assertTrue(con.editable and con.required and con.switched_on) con = t2.get_constraint("B") self.assertTrue(con.editable and con.optional and con.switched_on) self.assertEqual('<TemplateBinaryConstraint: Company.departments.name = Farm Supplies (editable, on)>', repr(con)) con.switch_off() self.assertTrue(con.editable and con.optional and con.switched_off) self.assertEqual('<TemplateBinaryConstraint: Company.departments.name = Farm Supplies (editable, off)>', repr(con)) con.switch_on() self.assertTrue(con.editable and con.optional and con.switched_on) con = t2.get_constraint("C") self.assertTrue(con.editable and con.optional and con.switched_off) self.assertRaises(ValueError, lambda: t2.get_constraint("A").switch_off()) self.assertRaises(ValueError, lambda: t2.get_constraint("A").switch_on()) def testBadTemplateConstraint(self): self.assertRaises(TypeError, lambda: TemplateConstraint(True, "BAD_VALUE"))
lgpl-2.1
abloomston/sympy
sympy/matrices/expressions/matadd.py
65
3328
from __future__ import print_function, division from sympy.core.compatibility import reduce from operator import add from sympy.core import Add, Basic, sympify from sympy.functions import adjoint from sympy.matrices.matrices import MatrixBase from sympy.matrices.expressions.transpose import transpose from sympy.strategies import (rm_id, unpack, flatten, sort, condition, exhaust, do_one, glom) from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError, ZeroMatrix from sympy.utilities import default_sort_key, sift class MatAdd(MatrixExpr): """A Sum of Matrix Expressions MatAdd inherits from and operates like SymPy Add >>> from sympy import MatAdd, MatrixSymbol >>> A = MatrixSymbol('A', 5, 5) >>> B = MatrixSymbol('B', 5, 5) >>> C = MatrixSymbol('C', 5, 5) >>> MatAdd(A, B, C) A + B + C """ is_MatAdd = True def __new__(cls, *args, **kwargs): args = list(map(sympify, args)) check = kwargs.get('check', True) obj = Basic.__new__(cls, *args) if check: validate(*args) return obj @property def shape(self): return self.args[0].shape def _entry(self, i, j): return Add(*[arg._entry(i, j) for arg in self.args]) def _eval_transpose(self): return MatAdd(*[transpose(arg) for arg in self.args]).doit() def _eval_adjoint(self): return MatAdd(*[adjoint(arg) for arg in self.args]).doit() def _eval_trace(self): from .trace import trace return Add(*[trace(arg) for arg in self.args]).doit() def doit(self, **kwargs): deep = kwargs.get('deep', True) if deep: args = [arg.doit(**kwargs) for arg in self.args] else: args = self.args return canonicalize(MatAdd(*args)) def validate(*args): if not all(arg.is_Matrix for arg in args): raise TypeError("Mix of Matrix and Scalar symbols") A = args[0] for B in args[1:]: if A.shape != B.shape: raise ShapeError("Matrices %s and %s are not aligned"%(A, B)) factor_of = lambda arg: arg.as_coeff_mmul()[0] matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1]) def combine(cnt, mat): if cnt == 1: return mat else: return cnt * mat def merge_explicit(matadd): """ Merge explicit MatrixBase arguments >>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint >>> from sympy.matrices.expressions.matadd import merge_explicit >>> A = MatrixSymbol('A', 2, 2) >>> B = eye(2) >>> C = Matrix([[1, 2], [3, 4]]) >>> X = MatAdd(A, B, C) >>> pprint(X) A + [1 0] + [1 2] [ ] [ ] [0 1] [3 4] >>> pprint(merge_explicit(X)) A + [2 2] [ ] [3 5] """ groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase)) if len(groups[True]) > 1: return MatAdd(*(groups[False] + [reduce(add, groups[True])])) else: return matadd rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)), unpack, flatten, glom(matrix_of, factor_of, combine), merge_explicit, sort(default_sort_key)) canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd), do_one(*rules)))
bsd-3-clause
CapOM/ChromiumGStreamerBackend
tools/telemetry/third_party/webpagereplay/third_party/dns/reversename.py
248
2931
# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS Reverse Map Names. @var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa. @type ipv4_reverse_domain: dns.name.Name object @var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa. @type ipv6_reverse_domain: dns.name.Name object """ import dns.name import dns.ipv6 import dns.ipv4 ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.') ipv6_reverse_domain = dns.name.from_text('ip6.arpa.') def from_address(text): """Convert an IPv4 or IPv6 address in textual form into a Name object whose value is the reverse-map domain name of the address. @param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1', '::1') @type text: str @rtype: dns.name.Name object """ try: parts = list(dns.ipv6.inet_aton(text).encode('hex_codec')) origin = ipv6_reverse_domain except: parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)] origin = ipv4_reverse_domain parts.reverse() return dns.name.from_text('.'.join(parts), origin=origin) def to_address(name): """Convert a reverse map domain name into textual address form. @param name: an IPv4 or IPv6 address in reverse-map form. @type name: dns.name.Name object @rtype: str """ if name.is_subdomain(ipv4_reverse_domain): name = name.relativize(ipv4_reverse_domain) labels = list(name.labels) labels.reverse() text = '.'.join(labels) # run through inet_aton() to check syntax and make pretty. return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text)) elif name.is_subdomain(ipv6_reverse_domain): name = name.relativize(ipv6_reverse_domain) labels = list(name.labels) labels.reverse() parts = [] i = 0 l = len(labels) while i < l: parts.append(''.join(labels[i:i+4])) i += 4 text = ':'.join(parts) # run through inet_aton() to check syntax and make pretty. return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text)) else: raise dns.exception.SyntaxError('unknown reverse-map address family')
bsd-3-clause
spbguru/repo1
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py
69
22336
""" Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF driver library. """ from __future__ import division try: import pyemf except ImportError: raise ImportError('You must first install pyemf from http://pyemf.sf.net') import os,sys,math,re from matplotlib import verbose, __version__, rcParams from matplotlib._pylab_helpers import Gcf from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureManagerBase, FigureCanvasBase from matplotlib.figure import Figure from matplotlib.transforms import Bbox from matplotlib.font_manager import findfont, FontProperties from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED # Font handling stuff snarfed from backend_ps, but only using TTF fonts _fontd = {} # Debug print stuff debugHandle = False debugPrint = False debugText = False # Hashable font properties class. In EMF, angle of rotation is a part # of the font properties, so a handle to a new font must be obtained # if the rotation changes. class EMFFontProperties(FontProperties): def __init__(self,other,angle): FontProperties.__init__(self,other.get_family(), other.get_style(), other.get_variant(), other.get_weight(), other.get_stretch(), other.get_size()) self.__angle=angle def __hash__(self): return hash( (FontProperties.__hash__(self), self.__angle)) def __str__(self): return str( (FontProperties.__str__(self), self.__angle)) def set_angle(self,angle): self.__angle=angle # Hashable pen (line style) properties. class EMFPen: def __init__(self,emf,gc): self.emf=emf self.gc=gc r,g,b=gc.get_rgb() self.r=int(r*255) self.g=int(g*255) self.b=int(b*255) self.width=int(gc.get_linewidth()) self.style=0 self.set_linestyle() if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b) def __hash__(self): return hash((self.style,self.width,self.r,self.g,self.b)) def set_linestyle(self): # Hack. Negative width lines will not get drawn. if self.width<0: self.style=pyemf.PS_NULL else: styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH, 'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT} #style=styles.get(self.gc.get_linestyle('solid')) style=self.gc.get_linestyle('solid') if debugHandle: print "EMFPen: style=%d" % style if style in styles: self.style=styles[style] else: self.style=pyemf.PS_SOLID def get_handle(self): handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b)) return handle # Hashable brush (fill style) properties. class EMFBrush: def __init__(self,emf,rgb): self.emf=emf r,g,b=rgb self.r=int(r*255) self.g=int(g*255) self.b=int(b*255) if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b) def __hash__(self): return hash((self.r,self.g,self.b)) def get_handle(self): handle=self.emf.CreateSolidBrush((self.r,self.g,self.b)) return handle class RendererEMF(RendererBase): """ The renderer handles drawing/rendering operations through a pyemf.EMF instance. """ def __init__(self, outfile, width, height, dpi): "Initialize the renderer with a gd image instance" self.outfile = outfile # a map from get_color args to colors self._cached = {} # dict of hashed properties to already created font handles self._fontHandle = {} self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1} self.emf=pyemf.EMF(width,height,dpi,'in') self.width=int(width*dpi) self.height=int(height*dpi) self.dpi = dpi self.pointstodpi = dpi/72.0 self.hackPointsForMathExponent = 2.0 # set background transparent for text self.emf.SetBkMode(pyemf.TRANSPARENT) # set baseline for text to be bottom left corner self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT) if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi) def save(self): self.emf.save(self.outfile) def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation): """ Draw an arc using GraphicsContext instance gcEdge, centered at x,y, with width and height and angles from 0.0 to 360.0 0 degrees is at 3-o'clock positive angles are anti-clockwise If the color rgbFace is not None, fill the arc with it. """ if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height) pen=self.select_pen(gcEdge) brush=self.select_brush(rgbFace) # This algorithm doesn't work very well on small circles # because of rounding error. This shows up most obviously on # legends where the circles are small anyway, and it is # compounded by the fact that it puts several circles right # next to each other so the differences are obvious. hw=width/2 hh=height/2 x1=int(x-width/2) y1=int(y-height/2) if brush: self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh))) else: self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh))) def draw_image(self, x, y, im, bbox): """ Draw the Image instance into the current axes; x is the distance in pixels from the left hand side of the canvas. y is the distance from the origin. That is, if origin is upper, y is the distance from top. If origin is lower, y is the distance from bottom bbox is a matplotlib.transforms.BBox instance for clipping, or None """ # pyemf2 currently doesn't support bitmaps. pass def draw_line(self, gc, x1, y1, x2, y2): """ Draw a single line from x1,y1 to x2,y2 """ if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2) if self.select_pen(gc): self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))]) else: if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2) def draw_lines(self, gc, x, y): """ x and y are equal length arrays, draw lines connecting each point in x, y """ if debugPrint: print "draw_lines: %d points" % len(str(x)) # optimize away anything that won't actually be drawn. Edge # style must not be PS_NULL for it to appear on screen. if self.select_pen(gc): points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))] self.emf.Polyline(points) def draw_point(self, gc, x, y): """ Draw a single point at x,y Where 'point' is a device-unit point (or pixel), not a matplotlib point """ if debugPrint: print "draw_point: (%f,%f)" % (x,y) # don't cache this pen pen=EMFPen(self.emf,gc) self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b)) def draw_polygon(self, gcEdge, rgbFace, points): """ Draw a polygon using the GraphicsContext instance gc. points is a len vertices tuple, each element giving the x,y coords a vertex If the color rgbFace is not None, fill the polygon with it """ if debugPrint: print "draw_polygon: %d points" % len(points) # optimize away anything that won't actually draw. Either a # face color or edge style must be defined pen=self.select_pen(gcEdge) brush=self.select_brush(rgbFace) if pen or brush: points = [(long(x), long(self.height-y)) for x,y in points] self.emf.Polygon(points) else: points = [(long(x), long(self.height-y)) for x,y in points] if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points)) def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height): """ Draw a non-filled rectangle using the GraphicsContext instance gcEdge, with lower left at x,y with width and height. If rgbFace is not None, fill the rectangle with it. """ if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height) # optimize away anything that won't actually draw. Either a # face color or edge style must be defined pen=self.select_pen(gcEdge) brush=self.select_brush(rgbFace) if pen or brush: self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height)) else: if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height) def draw_text(self, gc, x, y, s, prop, angle, ismath=False): """ Draw the text.Text instance s at x,y (display coords) with font properties instance prop at angle in degrees, using GraphicsContext gc **backend implementers note** When you are trying to determine if you have gotten your bounding box right (which is what enables the text layout/alignment to work properly), it helps to change the line in text.py if 0: bbox_artist(self, renderer) to if 1, and then the actual bounding box will be blotted along with your text. """ if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s) if ismath: self.draw_math_text(gc,x,y,s,prop,angle) else: self.draw_plain_text(gc,x,y,s,prop,angle) def draw_plain_text(self, gc, x, y, s, prop, angle): """ Draw a text string verbatim; no conversion is done. """ if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s) if debugText: print " properties:\n"+str(prop) self.select_font(prop,angle) # haxor follows! The subtleties of text placement in EMF # still elude me a bit. It always seems to be too high on the # page, about 10 pixels too high on a 300dpi resolution image. # So, I'm adding this hack for the moment: hackoffsetper300dpi=10 xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0 yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0 self.emf.TextOut(long(x+xhack),long(y+yhack),s) def draw_math_text(self, gc, x, y, s, prop, angle): """ Draw a subset of TeX, currently handles exponents only. Since pyemf doesn't have any raster functionality yet, the texmanager.get_rgba won't help. """ if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s) s = s[1:-1] # strip the $ from front and back match=re.match("10\^\{(.+)\}",s) if match: exp=match.group(1) if debugText: print " exponent=%s" % exp font = self._get_font_ttf(prop) font.set_text("10", 0.0) w, h = font.get_width_height() w /= 64.0 # convert from subpixels h /= 64.0 self.draw_plain_text(gc,x,y,"10",prop,angle) propexp=prop.copy() propexp.set_size(prop.get_size_in_points()*.8) self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle) else: # if it isn't an exponent, then render the raw TeX string. self.draw_plain_text(gc,x,y,s,prop,angle) def get_math_text_width_height(self, s, prop): """ get the width and height in display coords of the string s with FontPropertry prop, ripped right out of backend_ps. This method must be kept in sync with draw_math_text. """ if debugText: print "get_math_text_width_height:" s = s[1:-1] # strip the $ from front and back match=re.match("10\^\{(.+)\}",s) if match: exp=match.group(1) if debugText: print " exponent=%s" % exp font = self._get_font_ttf(prop) font.set_text("10", 0.0) w1, h1 = font.get_width_height() propexp=prop.copy() propexp.set_size(prop.get_size_in_points()*.8) fontexp=self._get_font_ttf(propexp) fontexp.set_text(exp, 0.0) w2, h2 = fontexp.get_width_height() w=w1+w2 h=h1+(h2/2) w /= 64.0 # convert from subpixels h /= 64.0 w+=self.points_to_pixels(self.hackPointsForMathExponent) if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h) else: w,h=self.get_text_width_height(s,prop,False) return w, h def flipy(self): """return true if y small numbers are top for renderer Is used for drawing text (text.py) and images (image.py) only """ return True def get_canvas_width_height(self): """ return the canvas width and height in display coords """ return self.width,self.height def set_handle(self,type,handle): """ Update the EMF file with the current handle, but only if it isn't the same as the last one. Don't want to flood the file with duplicate info. """ if self.lastHandle[type] != handle: self.emf.SelectObject(handle) self.lastHandle[type]=handle def get_font_handle(self, prop, angle): """ Look up the handle for the font based on the dict of properties *and* the rotation angle, since in EMF the font rotation is a part of the font definition. """ prop=EMFFontProperties(prop,angle) size=int(prop.get_size_in_points()*self.pointstodpi) face=prop.get_name() key = hash(prop) handle = self._fontHandle.get(key) if handle is None: handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10, pyemf.FW_NORMAL, 0, 0, 0, pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS, pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY, pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face); if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size) self._fontHandle[key]=handle if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size) self.set_handle("font",handle) return handle def select_font(self,prop,angle): handle=self.get_font_handle(prop,angle) self.set_handle("font",handle) def select_pen(self, gc): """ Select a pen that includes the color, line width and line style. Return the pen if it will draw a line, or None if the pen won't produce any output (i.e. the style is PS_NULL) """ pen=EMFPen(self.emf,gc) key=hash(pen) handle=self._fontHandle.get(key) if handle is None: handle=pen.get_handle() self._fontHandle[key]=handle if debugHandle: print " found pen handle %d" % handle self.set_handle("pen",handle) if pen.style != pyemf.PS_NULL: return pen else: return None def select_brush(self, rgb): """ Select a fill color, and return the brush if the color is valid or None if this won't produce a fill operation. """ if rgb is not None: brush=EMFBrush(self.emf,rgb) key=hash(brush) handle=self._fontHandle.get(key) if handle is None: handle=brush.get_handle() self._fontHandle[key]=handle if debugHandle: print " found brush handle %d" % handle self.set_handle("brush",handle) return brush else: return None def _get_font_ttf(self, prop): """ get the true type font properties, used because EMFs on windows will use true type fonts. """ key = hash(prop) font = _fontd.get(key) if font is None: fname = findfont(prop) if debugText: print "_get_font_ttf: name=%s" % fname font = FT2Font(str(fname)) _fontd[key] = font font.clear() size = prop.get_size_in_points() font.set_size(size, self.dpi) return font def get_text_width_height(self, s, prop, ismath): """ get the width and height in display coords of the string s with FontPropertry prop, ripped right out of backend_ps """ if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop)) if ismath: if debugText: print " MATH TEXT! = %s" % str(ismath) w,h = self.get_math_text_width_height(s, prop) return w,h font = self._get_font_ttf(prop) font.set_text(s, 0.0) w, h = font.get_width_height() w /= 64.0 # convert from subpixels h /= 64.0 if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h) return w, h def new_gc(self): return GraphicsContextEMF() def points_to_pixels(self, points): # if backend doesn't have dpi, eg, postscript or svg #return points # elif backend assumes a value for pixels_per_inch #return points/72.0 * self.dpi.get() * pixels_per_inch/72.0 # else return points/72.0 * self.dpi class GraphicsContextEMF(GraphicsContextBase): """ The graphics context provides the color, line styles, etc... See the gtk and postscript backends for examples of mapping the graphics context attributes (cap styles, join styles, line widths, colors) to a particular backend. In GTK this is done by wrapping a gtk.gdk.GC object and forwarding the appropriate calls to it using a dictionary mapping styles to gdk constants. In Postscript, all the work is done by the renderer, mapping line styles to postscript calls. If it's more appropriate to do the mapping at the renderer level (as in the postscript backend), you don't need to override any of the GC methods. If it's more appropriate to wrap an instance (as in the GTK backend) and do the mapping here, you'll need to override several of the setter methods. The base GraphicsContext stores colors as a RGB tuple on the unit interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors appropriate for your backend. """ pass ######################################################################## # # The following functions and classes are for pylab and implement # window/figure managers, etc... # ######################################################################## def draw_if_interactive(): """ For image backends - is not required For GUI backends - this should be overriden if drawing should be done in interactive python mode """ pass def show(): """ For image backends - is not required For GUI backends - show() is usually the last line of a pylab script and tells the backend that it is time to draw. In interactive mode, this may be a do nothing func. See the GTK backend for an example of how to handle interactive versus batch mode """ for manager in Gcf.get_all_fig_managers(): # do something to display the GUI pass def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ # if a main-level app must be created, this is the usual place to # do it -- see backend_wx, backend_wxagg and backend_tkagg for # examples. Not all GUIs require explicit instantiation of a # main-level app (egg backend_gtk, backend_gtkagg) for pylab FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) canvas = FigureCanvasEMF(thisFig) manager = FigureManagerEMF(canvas, num) return manager class FigureCanvasEMF(FigureCanvasBase): """ The canvas the figure renders into. Calls the draw and print fig methods, creates the renderers, etc... Public attribute figure - A Figure instance """ def draw(self): """ Draw the figure using the renderer """ pass filetypes = {'emf': 'Enhanced Metafile'} def print_emf(self, filename, dpi=300, **kwargs): width, height = self.figure.get_size_inches() renderer = RendererEMF(filename,width,height,dpi) self.figure.draw(renderer) renderer.save() def get_default_filetype(self): return 'emf' class FigureManagerEMF(FigureManagerBase): """ Wrap everything up into a window for the pylab interface For non interactive backends, the base class does all the work """ pass ######################################################################## # # Now just provide the standard names that backend.__init__ is expecting # ######################################################################## FigureManager = FigureManagerEMF
gpl-3.0
CTSRD-SOAAP/chromium-42.0.2311.135
third_party/yasm/source/patched-yasm/modules/arch/x86/gen_x86_insn.py
18
292359
#! /usr/bin/env python # x86 instructions and prefixes data and code generation # # Copyright (C) 2002-2007 Peter Johnson # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND OTHER CONTRIBUTORS ``AS IS'' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR OTHER CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # NOTE: operands are arranged in NASM / Intel order (e.g. dest, src) import os import sys from sys import stdout, version_info scriptname = "gen_x86_insn.py" scriptrev = "HEAD" ordered_cpus = [ "086", "186", "286", "386", "486", "586", "686", "K6", "Athlon", "P3", "P4", "IA64", "Hammer"] ordered_cpu_features = [ "FPU", "Cyrix", "AMD", "MMX", "3DNow", "SMM", "SSE", "SSE2", "SSE3", "SVM", "PadLock", "SSSE3", "SSE41", "SSE42", "SSE4a", "SSE5", "AVX", "FMA", "AES", "CLMUL", "MOVBE", "XOP", "FMA4", "F16C", "FSGSBASE", "RDRAND", "XSAVEOPT", "EPTVPID", "SMX", "AVX2", "BMI1", "BMI2", "INVPCID", "LZCNT"] unordered_cpu_features = ["Priv", "Prot", "Undoc", "Obs"] # Predefined VEX prefix field values VEXW0 = 0xC0 VEXW1 = 0xC8 VEXL0 = 0xC0 VEXL1 = 0xC4 VEXpp = 0xC0 # OR with value # Predefined XOP prefix field values XOPW0 = 0x80 XOPW1 = 0x88 XOPL0 = 0x80 XOPL1 = 0x84 XOPpp = 0x80 # OR with value def lprint(s, f = stdout, e = '\n') : f.write(s + e) def cpu_lcd(cpu1, cpu2): """Find the lowest common denominator of two CPU sets.""" retval = set() # CPU cpu1cpus = set(ordered_cpus) & set(cpu1) if not cpu1cpus: cpu1cpus.add("086") cpu1mincpu = min(ordered_cpus.index(x) for x in cpu1cpus) cpu2cpus = set(ordered_cpus) & set(cpu2) if not cpu2cpus: cpu2cpus.add("086") cpu2mincpu = min(ordered_cpus.index(x) for x in cpu1cpus) cpumin = ordered_cpus[min(cpu1mincpu, cpu2mincpu)] if cpumin == "086": cpumin = "Any" if cpumin != "Any": retval.add(cpumin) # Feature cpu1features = set(ordered_cpu_features) & set(cpu1) if not cpu1features: cpu1minfeature = -1 else: cpu1minfeature = min(ordered_cpu_features.index(x) for x in cpu1features) cpu2features = set(ordered_cpu_features) & set(cpu2) if not cpu2features: cpu2minfeature = -1 else: cpu2minfeature = min(ordered_cpu_features.index(x) for x in cpu2features) if cpu1minfeature != -1 and cpu2minfeature != -1: featuremin = ordered_cpu_features[min(cpu1minfeature, cpu2minfeature)] retval.add(featuremin) # Unordered features for feature in set(unordered_cpu_features) & set(cpu1) & set(cpu2): retval.add(feature) return retval class Operand(object): def __init__(self, **kwargs): self.type = kwargs.pop("type") self.size = kwargs.pop("size", "Any") self.relaxed = kwargs.pop("relaxed", False) self.dest = kwargs.pop("dest", None) self.tmod = kwargs.pop("tmod", None) self.opt = kwargs.pop("opt", None) if kwargs: for arg in kwargs: lprint("Warning: unrecognized arg %s" % arg) def __str__(self): return "{"+ ", ".join(["OPT_%s" % self.type, "OPS_%s" % self.size, "%d" % self.relaxed, self.dest == "EA64" and "1" or "0", "OPTM_%s" % self.tmod, "OPA_%s" % (self.dest == "EA64" and "EA" or self.dest), "OPAP_%s" % self.opt]) + "}" def __eq__(self, other): return (self.type == other.type and self.size == other.size and self.relaxed == other.relaxed and self.dest == other.dest and self.tmod == other.tmod and self.opt == other.opt) def __ne__(self, other): return (self.type != other.type or self.size != other.size or self.relaxed != other.relaxed or self.dest != other.dest or self.tmod != other.tmod or self.opt != other.opt) class GroupForm(object): def __init__(self, **kwargs): # Parsers self.parsers = set(kwargs.pop("parsers", ["gas", "nasm"])) # CPU feature flags initialization self.cpu = set(kwargs.pop("cpu", [])) # Misc flags self.misc_flags = set(kwargs.pop("misc_flags", [])) if kwargs.pop("only64", False): self.misc_flags.add("ONLY_64") if kwargs.pop("not64", False): self.misc_flags.add("NOT_64") if kwargs.pop("onlyavx", False): self.misc_flags.add("ONLY_AVX") if kwargs.pop("notavx", False): self.misc_flags.add("NOT_AVX") # Operation size self.opersize = kwargs.pop("opersize", 0) if self.opersize == 8: self.opersize = 0 if self.opersize == 64: self.misc_flags.add("ONLY_64") elif self.opersize == 32 and "ONLY_64" not in self.misc_flags: self.cpu.add("386") # Default operation size in 64-bit mode self.def_opersize_64 = kwargs.pop("def_opersize_64", 0) # GAS suffix self.gen_suffix = kwargs.pop("gen_suffix", True) self.suffixes = kwargs.pop("suffixes", None) suffix = kwargs.pop("suffix", None) if suffix is not None: self.suffixes = [suffix] req_suffix = kwargs.pop("req_suffix", False) if not req_suffix: if self.suffixes is None: self.suffixes = ["Z"] else: self.suffixes.append("Z") if self.suffixes is not None: self.suffixes = set(x.upper() for x in self.suffixes) # Special instruction prefix self.special_prefix = "0" if "prefix" in kwargs: self.special_prefix = "0x%02X" % kwargs.pop("prefix") # VEX prefix if "vex" in kwargs: self.misc_flags.add("ONLY_AVX") vexW = kwargs.pop("vexw", 0) if vexW not in [0, 1]: raise ValueError("VEX.W must be 0 or 1") vexL = kwargs.pop("vex") if vexL == 128 or vexL == 0: vexL = 0 elif vexL == 256: vexL = 1 else: raise ValueError("VEX.L must be 128 or 256") if self.special_prefix in ["0", "0x00"]: vexpp = 0 elif self.special_prefix == "0x66": vexpp = 1 elif self.special_prefix == "0xF3": vexpp = 2 elif self.special_prefix == "0xF2": vexpp = 3 else: raise ValueError("Cannot combine VEX and special prefix %s" % self.special_prefix) self.special_prefix = "0x%02X" % (0xC0 + vexW*8 + vexL*4 + vexpp) # XOP prefix if "xop" in kwargs: xopW = kwargs.pop("xopw", 0) if xopW not in [0, 1]: raise ValueError("XOP.W must be 0 or 1") xopL = kwargs.pop("xop") if xopL == 128 or xopL == 0: xopL = 0 elif xopL == 256: xopL = 1 else: raise ValueError("XOP.L must be 128 or 256") # XOPpp is currently reserved (0) xoppp = 0 if self.special_prefix not in ["0", "0x00"]: raise ValueError("Cannot combine XOP and special prefix %s" % self.special_prefix) self.special_prefix = "0x%02X" % (0x80 + xopW*8 + xopL*4 + xoppp) # Spare value self.spare = kwargs.pop("spare", 0) # Build opcodes string (C array initializer) if "opcode" in kwargs: # Usual case, just a single opcode self.opcode = kwargs.pop("opcode") self.opcode_len = len(self.opcode) elif "opcode1" in kwargs and "opcode2" in kwargs: # Two opcode case; the first opcode is the "optimized" opcode, # the second is the "relaxed" opcode. For this to work, an # opt="foo" must be set for one of the operands. self.opcode1 = kwargs.pop("opcode1") self.opcode2 = kwargs.pop("opcode2") self.opcode_len = len(self.opcode1) else: raise KeyError("missing opcode") # Build operands string (C array initializer) self.operands = kwargs.pop("operands") for op in self.operands: if op.type in ["Reg", "RM", "Areg", "Creg", "Dreg"]: if op.size == 64: self.misc_flags.add("ONLY_64") elif op.size == 32 and "ONLY_64" not in self.misc_flags: self.cpu.add("386") if op.type in ["Imm", "ImmNotSegOff"]: if op.size == 64: self.misc_flags.add("ONLY_64") elif op.size == 32 and "ONLY_64" not in self.misc_flags: self.cpu.add("386") if op.type in ["FS", "GS"] and "ONLY_64" not in self.misc_flags: self.cpu.add("386") if op.type in ["CR4"] and "ONLY_64" not in self.misc_flags: self.cpu.add("586") if op.dest == "EA64": self.misc_flags.add("ONLY_64") # Modifiers self.modifiers = kwargs.pop("modifiers", []) # GAS flags self.gas_only = ("nasm" not in self.parsers) self.gas_illegal = ("gas" not in self.parsers) self.gas_no_rev = (kwargs.pop("gas_no_reverse", False) or kwargs.pop("gas_no_rev", False)) # CPU feature flags finalization # Remove redundancies maxcpu = -1 maxcpu_set = self.cpu & set(ordered_cpus) if maxcpu_set: maxcpu = max(ordered_cpus.index(x) for x in maxcpu_set) if maxcpu != -1: for cpu in ordered_cpus[0:maxcpu]: self.cpu.discard(cpu) if kwargs: for arg in kwargs: lprint("Warning: unrecognized arg %s" % arg) def __str__(self): if hasattr(self, "opcode"): opcodes_str = ["0x%02X" % x for x in self.opcode] elif hasattr(self, "opcode1") and hasattr(self, "opcode2"): opcodes_str = ["0x%02X" % x for x in self.opcode1] opcodes_str.extend("0x%02X" % x for x in self.opcode2) # Ensure opcodes initializer string is 3 long opcodes_str.extend(["0", "0", "0"]) opcodes_str = "{" + ', '.join(opcodes_str[0:3]) + "}" cpus_str = "|".join("CPU_%s" % x for x in sorted(self.cpu)) if len(self.modifiers) > 3: raise ValueError("too many modifiers: %s" % (self.modifiers,)) cpus_str = [] if self.cpu is not None: if len(self.cpu) > 3: raise ValueError("too many CPUs: %s" % (self.cpu,)) # Ensure CPUs initializer string is at least 3 long cpus_str.extend("CPU_%s" % x for x in sorted(self.cpu)) # Ensure cpus initializer string is 3 long; 0=CPU_Any cpus_str.extend(["0", "0", "0"]) mods = ["MOD_%s" % x for x in self.modifiers] # Ensure mods initializer string is 3 long mods.extend(["0", "0", "0"]) mod_str = "{" + ', '.join(mods[0:3]) + "}" gas_flags = [] if self.gas_only: gas_flags.append("GAS_ONLY") if self.gas_illegal: gas_flags.append("GAS_ILLEGAL") if self.gas_no_rev: gas_flags.append("GAS_NO_REV") if self.suffixes: gas_flags.extend("SUF_%s" % x for x in sorted(self.suffixes)) gas_flags = "|".join(gas_flags) # Build instruction info structure initializer return "{ "+ ", ".join([gas_flags or "0", "|".join(self.misc_flags) or "0", cpus_str[0], cpus_str[1], cpus_str[2], mod_str, "%d" % (self.opersize or 0), "%d" % (self.def_opersize_64 or 0), self.special_prefix or "0", "%d" % self.opcode_len, opcodes_str, "%d" % (self.spare or 0), "%d" % len(self.operands), "%d" % self.all_operands_index]) + " }" groups = {} groupnames_ordered = [] def add_group(name, **kwargs): forms = groups.setdefault(name, []) forms.append(GroupForm(**kwargs)) groupnames_ordered.append(name) class Insn(object): def __init__(self, groupname, suffix=None, parser=None, modifiers=None, cpu=None, misc_flags=None, only64=False, not64=False, avx=False): self.groupname = groupname if suffix is None: self.suffix = None else: self.suffix = suffix.upper() self.parsers = None if suffix is not None: self.parsers = set(["gas"]) if parser is not None: self.parsers = set([parser]) if modifiers is None: self.modifiers = [] else: self.modifiers = modifiers if cpu is None: self.cpu = None else: self.cpu = set(cpu) if misc_flags is None: self.misc_flags = None else: self.misc_flags = set([x for x in misc_flags]) if only64: if self.misc_flags is None: self.misc_flags = set() self.misc_flags.add("ONLY_64") if not64: if self.misc_flags is None: self.misc_flags = set() self.misc_flags.add("NOT_64") if avx: if self.misc_flags is None: self.misc_flags = set() self.misc_flags.add("ONLY_AVX") if self.cpu is None: self.cpu = set(["AVX"]) def auto_cpu(self, parser): """Determine lowest common denominator CPU from group and suffix. Does nothing if CPU is already set.""" if self.cpu is not None: return # Scan through group, matching parser and suffix for form in groups[self.groupname]: if parser not in form.parsers: continue if (self.suffix is not None and len(self.suffix) == 1 and (form.suffixes is None or self.suffix not in form.suffixes)): continue if self.cpu is None: self.cpu = set(form.cpu) else: self.cpu = cpu_lcd(self.cpu, form.cpu) def auto_misc_flags(self, parser): """Determine lowest common denominator flags from group and suffix. Does nothing if flags is already set.""" if self.misc_flags is not None: return # Scan through group, matching parser and suffix for form in groups[self.groupname]: if parser not in form.parsers: continue if (self.suffix is not None and len(self.suffix) == 1 and (form.suffixes is None or self.suffix not in form.suffixes)): continue if self.misc_flags is None: self.misc_flags = set(form.misc_flags) else: self.misc_flags &= form.misc_flags def copy(self): """Return a shallow copy.""" return Insn(self.groupname, suffix=self.suffix, modifiers=self.modifiers, cpu=self.cpu, misc_flags=self.misc_flags) def __str__(self): if self.suffix is None: suffix_str = "SUF_Z" elif len(self.suffix) == 1: suffix_str = "SUF_" + self.suffix else: suffix_str = self.suffix cpus_str = [] if self.cpu is not None: if len(self.cpu) > 3: raise ValueError("too many CPUs: %s" % (self.cpu,)) cpus_str.extend("CPU_%s" % x for x in sorted(self.cpu)) # Ensure cpus initializer string is 3 long cpus_str.extend(["0", "0", "0"]) if len(self.modifiers) > 3: raise ValueError("too many modifiers") mods_str = ["0x%02X" % x for x in self.modifiers] # Ensure modifiers is at least 3 long mods_str.extend(["0", "0", "0"]) return ",\t".join(["%s_insn" % self.groupname, "%d" % len(groups[self.groupname]), suffix_str, mods_str[0], mods_str[1], mods_str[2], "|".join(self.misc_flags or []) or "0", cpus_str[0], cpus_str[1], cpus_str[2]]) insns = {} def add_insn(name, groupname, **kwargs): opts = insns.setdefault(name, []) opts.append(Insn(groupname, **kwargs)) class Prefix(object): def __init__(self, groupname, value, only64=False): self.groupname = groupname self.value = value self.only64 = only64 def __str__(self): return ",\t".join(["NULL", "X86_%s>>8" % self.groupname, "0x%02X" % self.value, "0", "0", "0", self.only64 and "ONLY_64" or "0", "0", "0", "0"]) gas_insns = {} nasm_insns = {} def add_prefix(name, groupname, value, parser=None, **kwargs): prefix = Prefix(groupname, value, **kwargs) if parser is None or parser == "gas": gas_insns[name] = prefix if parser is None or parser == "nasm": nasm_insns[name] = prefix def finalize_insns(): unused_groups = set(groups.keys()) for name in insns: for insn in insns[name]: group = groups[insn.groupname] unused_groups.discard(insn.groupname) parsers = set() for form in group: parsers |= form.parsers if insn.parsers is not None: parsers &= insn.parsers if "gas" in parsers: suffixes = set() if insn.suffix is None: for form in group: if form.gen_suffix and form.suffixes is not None: suffixes |= form.suffixes if not suffixes: suffixes.add("Z") for suffix in suffixes: if suffix == "Z": keyword = name else: keyword = name+suffix keyword = keyword.lower() if keyword in gas_insns: raise ValueError("duplicate gas instruction %s" % keyword) newinsn = insn.copy() if insn.suffix is None: newinsn.suffix = suffix newinsn.auto_cpu("gas") newinsn.auto_misc_flags("gas") gas_insns[keyword] = newinsn if "nasm" in parsers: keyword = name if keyword in nasm_insns: raise ValueError("duplicate nasm instruction %s" % keyword) newinsn = insn.copy() newinsn.auto_cpu("nasm") newinsn.auto_misc_flags("nasm") nasm_insns[keyword] = newinsn unused_groups.discard("empty") unused_groups.discard("not64") if unused_groups: lprint("warning: unused groups: %s" % ", ".join(unused_groups)) def output_insns(f, parser, insns): lprint("/* Generated by %s r%s, do not edit */" % \ (scriptname, scriptrev), f) lprint("""%%ignore-case %%language=ANSI-C %%compare-strncmp %%readonly-tables %%enum %%struct-type %%define hash-function-name insnprefix_%s_hash %%define lookup-function-name insnprefix_%s_find struct insnprefix_parse_data; %%%%""" % (parser, parser), f) for keyword in sorted(insns): lprint("%s,\t%s" % (keyword.lower(), insns[keyword]), f) def output_gas_insns(f): output_insns(f, "gas", gas_insns) def output_nasm_insns(f): output_insns(f, "nasm", nasm_insns) def output_groups(f): # Merge all operand lists into single list # Sort by number of operands to shorten output all_operands = [] if version_info[0] == 2: gi = groups.itervalues() else: gi = groups.values() for form in sorted((form for g in gi for form in g), key=lambda x:len(x.operands), reverse=True): num_operands = len(form.operands) for i in range(len(all_operands)): if all_operands[i:i+num_operands] == form.operands: form.all_operands_index = i break else: form.all_operands_index = len(all_operands) all_operands.extend(form.operands) # Output operands list lprint("/* Generated by %s r%s, do not edit */" % \ (scriptname, scriptrev), f) lprint("static const x86_info_operand insn_operands[] = {", f) lprint(" ", f, '') lprint(",\n ".join(str(x) for x in all_operands), f) lprint("};\n", f) # Output groups seen = set() for name in groupnames_ordered: if name in seen: continue seen.add(name) lprint("static const x86_insn_info %s_insn[] = {" % name, f) lprint(" ", f, '') lprint(",\n ".join(str(x) for x in groups[name]), f) lprint("};\n", f) ##################################################################### # General instruction groupings ##################################################################### # # Empty instruction # add_group("empty", opcode=[], operands=[]) # # Placeholder for instructions invalid in 64-bit mode # add_group("not64", opcode=[], operands=[], not64=True) # # One byte opcode instructions with no operands # add_group("onebyte", modifiers=["Op0Add", "OpSizeR", "DOpS64R"], opcode=[0x00], operands=[]) # # One byte opcode instructions with "special" prefix with no operands # add_group("onebyte_prefix", modifiers=["PreAdd", "Op0Add"], prefix=0x00, opcode=[0x00], operands=[]) # # Two byte opcode instructions with no operands # add_group("twobyte", gen_suffix=False, suffixes=["l", "q"], modifiers=["Op0Add", "Op1Add"], opcode=[0x00, 0x00], operands=[]) # # Three byte opcode instructions with no operands # add_group("threebyte", modifiers=["Op0Add", "Op1Add", "Op2Add"], opcode=[0x00, 0x00, 0x00], operands=[]) # # One byte opcode instructions with general memory operand # add_group("onebytemem", gen_suffix=False, suffixes=["l", "q", "s"], modifiers=["SpAdd", "Op0Add"], opcode=[0x00], spare=0, operands=[Operand(type="Mem", dest="EA")]) # # Two byte opcode instructions with general memory operand # add_group("twobytemem", gen_suffix=False, suffixes=["w", "l", "q", "s"], modifiers=["SpAdd", "Op0Add", "Op1Add"], opcode=[0x00, 0x00], spare=0, operands=[Operand(type="Mem", relaxed=True, dest="EA")]) # # mov # # Absolute forms for non-64-bit mode for sfx, sz in zip("bwl", [8, 16, 32]): add_group("mov", suffix=sfx, not64=True, opersize=sz, opcode=[0xA0+(sz!=8)], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="MemOffs", size=sz, relaxed=True, dest="EA")]) for sfx, sz in zip("bwl", [8, 16, 32]): add_group("mov", suffix=sfx, not64=True, opersize=sz, opcode=[0xA2+(sz!=8)], operands=[Operand(type="MemOffs", size=sz, relaxed=True, dest="EA"), Operand(type="Areg", size=sz, dest=None)]) # 64-bit absolute forms for 64-bit mode. Disabled for GAS, see movabs for sz in (8, 16, 32, 64): add_group("mov", opersize=sz, opcode=[0xA0+(sz!=8)], only64=True, operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="MemOffs", size=sz, relaxed=True, dest="EA64")]) for sz in (8, 16, 32, 64): add_group("mov", only64=True, opersize=sz, opcode=[0xA2+(sz!=8)], operands=[Operand(type="MemOffs", size=sz, relaxed=True, dest="EA64"), Operand(type="Areg", size=sz, dest=None)]) # General 32-bit forms using Areg / short absolute option for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("mov", suffix=sfx, opersize=sz, opcode1=[0x88+(sz!=8)], opcode2=[0xA2+(sz!=8)], operands=[ Operand(type="RM", size=sz, relaxed=True, dest="EA", opt="ShortMov"), Operand(type="Areg", size=sz, dest="Spare")]) # General 32-bit forms for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0x88+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) # General 32-bit forms using Areg / short absolute option for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("mov", suffix=sfx, opersize=sz, opcode1=[0x8A+(sz!=8)], opcode2=[0xA0+(sz!=8)], operands=[Operand(type="Areg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA", opt="ShortMov")]) # General 32-bit forms for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0x8A+(sz!=8)], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) # Segment register forms add_group("mov", suffix="w", opcode=[0x8C], operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA"), Operand(type="SegReg", size=16, relaxed=True, dest="Spare")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0x8C], operands=[ Operand(type="Reg", size=sz, dest="EA"), Operand(type="SegReg", size=16, relaxed=True, dest="Spare")]) add_group("mov", suffix="w", opcode=[0x8E], operands=[Operand(type="SegReg", size=16, relaxed=True, dest="Spare"), Operand(type="RM", size=16, relaxed=True, dest="EA")]) for sfx, sz in zip("lq", [32, 64]): add_group("mov", suffix=sfx, opcode=[0x8E], operands=[ Operand(type="SegReg", size=16, relaxed=True, dest="Spare"), Operand(type="Reg", size=sz, dest="EA")]) # Immediate forms add_group("mov", suffix="b", opcode=[0xB0], operands=[Operand(type="Reg", size=8, dest="Op0Add"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for sfx, sz in zip("wl", [16, 32]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0xB8], operands=[Operand(type="Reg", size=sz, dest="Op0Add"), Operand(type="Imm", size=sz, relaxed=True, dest="Imm")]) # 64-bit forced size form add_group("mov", parsers=["nasm"], opersize=64, opcode=[0xB8], operands=[Operand(type="Reg", size=64, dest="Op0Add"), Operand(type="Imm", size=64, dest="Imm")]) add_group("mov", suffix="q", opersize=64, opcode1=[0xB8], opcode2=[0xC7], operands=[Operand(type="Reg", size=64, dest="Op0Add"), Operand(type="Imm", size=64, relaxed=True, dest="Imm", opt="SImm32Avail")]) # Need two sets here, one for strictness on left side, one for right. for sfx, sz, immsz in zip("bwlq", [8, 16, 32, 64], [8, 16, 32, 32]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0xC6+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=immsz, dest="Imm")]) for sfx, sz, immsz in zip("bwlq", [8, 16, 32, 64], [8, 16, 32, 32]): add_group("mov", suffix=sfx, opersize=sz, opcode=[0xC6+(sz!=8)], operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Imm", size=immsz, relaxed=True, dest="Imm")]) # CR forms add_group("mov", suffix="l", not64=True, cpu=["Priv"], opcode=[0x0F, 0x22], operands=[Operand(type="CR4", size=32, dest="Spare"), Operand(type="Reg", size=32, dest="EA")]) add_group("mov", suffix="l", not64=True, cpu=["Priv"], opcode=[0x0F, 0x22], operands=[Operand(type="CRReg", size=32, dest="Spare"), Operand(type="Reg", size=32, dest="EA")]) add_group("mov", suffix="q", cpu=["Priv"], opcode=[0x0F, 0x22], operands=[Operand(type="CRReg", size=32, dest="Spare"), Operand(type="Reg", size=64, dest="EA")]) add_group("mov", suffix="l", not64=True, cpu=["Priv"], opcode=[0x0F, 0x20], operands=[Operand(type="Reg", size=32, dest="EA"), Operand(type="CR4", size=32, dest="Spare")]) add_group("mov", suffix="l", cpu=["Priv"], not64=True, opcode=[0x0F, 0x20], operands=[Operand(type="Reg", size=32, dest="EA"), Operand(type="CRReg", size=32, dest="Spare")]) add_group("mov", suffix="q", cpu=["Priv"], opcode=[0x0F, 0x20], operands=[Operand(type="Reg", size=64, dest="EA"), Operand(type="CRReg", size=32, dest="Spare")]) # DR forms add_group("mov", suffix="l", not64=True, cpu=["Priv"], opcode=[0x0F, 0x23], operands=[Operand(type="DRReg", size=32, dest="Spare"), Operand(type="Reg", size=32, dest="EA")]) add_group("mov", suffix="q", cpu=["Priv"], opcode=[0x0F, 0x23], operands=[Operand(type="DRReg", size=32, dest="Spare"), Operand(type="Reg", size=64, dest="EA")]) add_group("mov", suffix="l", not64=True, cpu=["Priv"], opcode=[0x0F, 0x21], operands=[Operand(type="Reg", size=32, dest="EA"), Operand(type="DRReg", size=32, dest="Spare")]) add_group("mov", suffix="q", cpu=["Priv"], opcode=[0x0F, 0x21], operands=[Operand(type="Reg", size=64, dest="EA"), Operand(type="DRReg", size=32, dest="Spare")]) # MMX forms for GAS parser (copied from movq) add_group("mov", suffix="q", cpu=["MMX"], parsers=["gas"], opcode=[0x0F, 0x6F], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("mov", suffix="q", cpu=["MMX"], parsers=["gas"], opersize=64, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("mov", suffix="q", cpu=["MMX"], parsers=["gas"], opcode=[0x0F, 0x7F], operands=[Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) add_group("mov", suffix="q", cpu=["MMX"], parsers=["gas"], opersize=64, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) # SSE2 forms for GAS parser (copied from movq) add_group("mov", suffix="q", cpu=["SSE2"], parsers=["gas"], prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("mov", suffix="q", cpu=["SSE2"], parsers=["gas"], prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("mov", suffix="q", cpu=["SSE2"], parsers=["gas"], opersize=64, prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("mov", suffix="q", cpu=["SSE2"], parsers=["gas"], prefix=0x66, opcode=[0x0F, 0xD6], operands=[Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("mov", suffix="q", cpu=["SSE2"], parsers=["gas"], opersize=64, prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("mov", "mov") # # 64-bit absolute move (for GAS). # These are disabled for GAS for normal mov above. # add_group("movabs", suffix="b", only64=True, opcode=[0xA0], operands=[Operand(type="Areg", size=8, dest=None), Operand(type="MemOffs", size=8, relaxed=True, dest="EA64")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("movabs", only64=True, suffix=sfx, opersize=sz, opcode=[0xA1], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="MemOffs", size=sz, relaxed=True, dest="EA64")]) add_group("movabs", suffix="b", only64=True, opcode=[0xA2], operands=[Operand(type="MemOffs", size=8, relaxed=True, dest="EA64"), Operand(type="Areg", size=8, dest=None)]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("movabs", suffix=sfx, only64=True, opersize=sz, opcode=[0xA3], operands=[Operand(type="MemOffs", size=sz, relaxed=True, dest="EA64"), Operand(type="Areg", size=sz, dest=None)]) # 64-bit immediate form add_group("movabs", suffix="q", opersize=64, opcode=[0xB8], operands=[Operand(type="Reg", size=64, dest="Op0Add"), Operand(type="Imm", size=64, relaxed=True, dest="Imm")]) add_insn("movabs", "movabs", parser="gas") # # Move with sign/zero extend # add_group("movszx", suffix="b", cpu=["386"], modifiers=["Op1Add"], opersize=16, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=16, dest="Spare"), Operand(type="RM", size=8, relaxed=True, dest="EA")]) add_group("movszx", suffix="b", cpu=["386"], modifiers=["Op1Add"], opersize=32, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=8, dest="EA")]) add_group("movszx", suffix="b", modifiers=["Op1Add"], opersize=64, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=8, dest="EA")]) add_group("movszx", suffix="w", cpu=["386"], modifiers=["Op1Add"], opersize=32, opcode=[0x0F, 0x01], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=16, dest="EA")]) add_group("movszx", suffix="w", modifiers=["Op1Add"], opersize=64, opcode=[0x0F, 0x01], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=16, dest="EA")]) add_insn("movsbw", "movszx", suffix="b", modifiers=[0xBE]) add_insn("movsbl", "movszx", suffix="b", modifiers=[0xBE]) add_insn("movswl", "movszx", suffix="w", modifiers=[0xBE]) add_insn("movsbq", "movszx", suffix="b", modifiers=[0xBE], only64=True) add_insn("movswq", "movszx", suffix="w", modifiers=[0xBE], only64=True) add_insn("movsx", "movszx", modifiers=[0xBE]) add_insn("movzbw", "movszx", suffix="b", modifiers=[0xB6]) add_insn("movzbl", "movszx", suffix="b", modifiers=[0xB6]) add_insn("movzwl", "movszx", suffix="w", modifiers=[0xB6]) add_insn("movzbq", "movszx", suffix="b", modifiers=[0xB6], only64=True) add_insn("movzwq", "movszx", suffix="w", modifiers=[0xB6], only64=True) add_insn("movzx", "movszx", modifiers=[0xB6]) # # Move with sign-extend doubleword (64-bit mode only) # add_group("movsxd", suffix="l", opersize=64, opcode=[0x63], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=32, dest="EA")]) add_insn("movslq", "movsxd", suffix="l") add_insn("movsxd", "movsxd", parser="nasm") # # Push instructions # add_group("push", def_opersize_64=64, opcode=[0x50], operands=[Operand(type="Reg", size="BITS", dest="Op0Add")]) add_group("push", suffix="w", opersize=16, def_opersize_64=64, opcode=[0x50], operands=[Operand(type="Reg", size=16, dest="Op0Add")]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0x50], operands=[Operand(type="Reg", size=32, dest="Op0Add")]) add_group("push", suffix="q", only64=True, def_opersize_64=64, opcode=[0x50], operands=[Operand(type="Reg", size=64, dest="Op0Add")]) add_group("push", def_opersize_64=64, opcode=[0xFF], spare=6, operands=[Operand(type="RM", size="BITS", dest="EA")]) add_group("push", suffix="w", opersize=16, def_opersize_64=64, opcode=[0xFF], spare=6, operands=[Operand(type="RM", size=16, dest="EA")]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0xFF], spare=6, operands=[Operand(type="RM", size=32, dest="EA")]) add_group("push", suffix="q", only64=True, def_opersize_64=64, opcode=[0xFF], spare=6, operands=[Operand(type="RM", size=64, dest="EA")]) add_group("push", cpu=["186"], parsers=["nasm"], def_opersize_64=64, opcode=[0x6A], operands=[Operand(type="Imm", size=8, dest="SImm")]) add_group("push", cpu=["186"], parsers=["gas"], def_opersize_64=64, opcode=[0x6A], operands=[Operand(type="Imm", size=8, relaxed=True, dest="SImm")]) add_group("push", suffix="q", only64=True, opersize=64, def_opersize_64=64, opcode1=[0x6A], opcode2=[0x68], operands=[Operand(type="Imm", size=32, relaxed=True, dest="SImm", opt="SImm8")]) add_group("push", not64=True, cpu=["186"], parsers=["nasm"], opcode1=[0x6A], opcode2=[0x68], operands=[Operand(type="Imm", size="BITS", relaxed=True, dest="Imm", opt="SImm8")]) add_group("push", suffix="w", cpu=["186"], opersize=16, def_opersize_64=64, opcode1=[0x6A], opcode2=[0x68], operands=[Operand(type="Imm", size=16, relaxed=True, dest="Imm", opt="SImm8")]) add_group("push", suffix="l", not64=True, opersize=32, opcode1=[0x6A], opcode2=[0x68], operands=[Operand(type="Imm", size=32, relaxed=True, dest="Imm", opt="SImm8")]) # Need these when we don't match the BITS size, but they need to be # below the above line so the optimizer can kick in by default. add_group("push", cpu=["186"], parsers=["nasm"], opersize=16, def_opersize_64=64, opcode=[0x68], operands=[Operand(type="Imm", size=16, dest="Imm")]) add_group("push", not64=True, parsers=["nasm"], opersize=32, opcode=[0x68], operands=[Operand(type="Imm", size=32, dest="Imm")]) add_group("push", only64=True, parsers=["nasm"], opersize=64, def_opersize_64=64, opcode=[0x68], operands=[Operand(type="Imm", size=32, dest="SImm")]) add_group("push", not64=True, opcode=[0x0E], operands=[Operand(type="CS", dest=None)]) add_group("push", suffix="w", not64=True, opersize=16, opcode=[0x0E], operands=[Operand(type="CS", size=16, dest=None)]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0x0E], operands=[Operand(type="CS", size=32, dest=None)]) add_group("push", not64=True, opcode=[0x16], operands=[Operand(type="SS", dest=None)]) add_group("push", suffix="w", not64=True, opersize=16, opcode=[0x16], operands=[Operand(type="SS", size=16, dest=None)]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0x16], operands=[Operand(type="SS", size=32, dest=None)]) add_group("push", not64=True, opcode=[0x1E], operands=[Operand(type="DS", dest=None)]) add_group("push", suffix="w", not64=True, opersize=16, opcode=[0x1E], operands=[Operand(type="DS", size=16, dest=None)]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0x1E], operands=[Operand(type="DS", size=32, dest=None)]) add_group("push", not64=True, opcode=[0x06], operands=[Operand(type="ES", dest=None)]) add_group("push", suffix="w", not64=True, opersize=16, opcode=[0x06], operands=[Operand(type="ES", size=16, dest=None)]) add_group("push", suffix="l", not64=True, opersize=32, opcode=[0x06], operands=[Operand(type="ES", size=32, dest=None)]) add_group("push", opcode=[0x0F, 0xA0], operands=[Operand(type="FS", dest=None)]) add_group("push", suffix="w", opersize=16, opcode=[0x0F, 0xA0], operands=[Operand(type="FS", size=16, dest=None)]) add_group("push", suffix="l", opersize=32, opcode=[0x0F, 0xA0], operands=[Operand(type="FS", size=32, dest=None)]) add_group("push", opcode=[0x0F, 0xA8], operands=[Operand(type="GS", dest=None)]) add_group("push", suffix="w", opersize=16, opcode=[0x0F, 0xA8], operands=[Operand(type="GS", size=16, dest=None)]) add_group("push", suffix="l", opersize=32, opcode=[0x0F, 0xA8], operands=[Operand(type="GS", size=32, dest=None)]) add_insn("push", "push") add_insn("pusha", "onebyte", modifiers=[0x60, 0], cpu=["186"], not64=True) add_insn("pushad", "onebyte", parser="nasm", modifiers=[0x60, 32], cpu=["386"], not64=True) add_insn("pushal", "onebyte", parser="gas", modifiers=[0x60, 32], cpu=["386"], not64=True) add_insn("pushaw", "onebyte", modifiers=[0x60, 16], cpu=["186"], not64=True) # # Pop instructions # add_group("pop", def_opersize_64=64, opcode=[0x58], operands=[Operand(type="Reg", size="BITS", dest="Op0Add")]) add_group("pop", suffix="w", opersize=16, def_opersize_64=64, opcode=[0x58], operands=[Operand(type="Reg", size=16, dest="Op0Add")]) add_group("pop", suffix="l", not64=True, opersize=32, opcode=[0x58], operands=[Operand(type="Reg", size=32, dest="Op0Add")]) add_group("pop", suffix="q", only64=True, def_opersize_64=64, opcode=[0x58], operands=[Operand(type="Reg", size=64, dest="Op0Add")]) add_group("pop", def_opersize_64=64, opcode=[0x8F], operands=[Operand(type="RM", size="BITS", dest="EA")]) add_group("pop", suffix="w", opersize=16, def_opersize_64=64, opcode=[0x8F], operands=[Operand(type="RM", size=16, dest="EA")]) add_group("pop", suffix="l", not64=True, opersize=32, opcode=[0x8F], operands=[Operand(type="RM", size=32, dest="EA")]) add_group("pop", suffix="q", only64=True, def_opersize_64=64, opcode=[0x8F], operands=[Operand(type="RM", size=64, dest="EA")]) # POP CS is debateably valid on the 8086, if obsolete and undocumented. # We don't include it because it's VERY unlikely it will ever be used # anywhere. If someone really wants it they can db 0x0F it. #add_group("pop", # cpu=["Undoc", "Obs"], # opcode=[0x0F], # operands=[Operand(type="CS", dest=None)]) add_group("pop", not64=True, opcode=[0x17], operands=[Operand(type="SS", dest=None)]) add_group("pop", not64=True, opersize=16, opcode=[0x17], operands=[Operand(type="SS", size=16, dest=None)]) add_group("pop", not64=True, opersize=32, opcode=[0x17], operands=[Operand(type="SS", size=32, dest=None)]) add_group("pop", not64=True, opcode=[0x1F], operands=[Operand(type="DS", dest=None)]) add_group("pop", not64=True, opersize=16, opcode=[0x1F], operands=[Operand(type="DS", size=16, dest=None)]) add_group("pop", not64=True, opersize=32, opcode=[0x1F], operands=[Operand(type="DS", size=32, dest=None)]) add_group("pop", not64=True, opcode=[0x07], operands=[Operand(type="ES", dest=None)]) add_group("pop", not64=True, opersize=16, opcode=[0x07], operands=[Operand(type="ES", size=16, dest=None)]) add_group("pop", not64=True, opersize=32, opcode=[0x07], operands=[Operand(type="ES", size=32, dest=None)]) add_group("pop", opcode=[0x0F, 0xA1], operands=[Operand(type="FS", dest=None)]) add_group("pop", opersize=16, opcode=[0x0F, 0xA1], operands=[Operand(type="FS", size=16, dest=None)]) add_group("pop", opersize=32, opcode=[0x0F, 0xA1], operands=[Operand(type="FS", size=32, dest=None)]) add_group("pop", opcode=[0x0F, 0xA9], operands=[Operand(type="GS", dest=None)]) add_group("pop", opersize=16, opcode=[0x0F, 0xA9], operands=[Operand(type="GS", size=16, dest=None)]) add_group("pop", opersize=32, opcode=[0x0F, 0xA9], operands=[Operand(type="GS", size=32, dest=None)]) add_insn("pop", "pop") add_insn("popa", "onebyte", modifiers=[0x61, 0], cpu=["186"], not64=True) add_insn("popad", "onebyte", parser="nasm", modifiers=[0x61, 32], cpu=["386"], not64=True) add_insn("popal", "onebyte", parser="gas", modifiers=[0x61, 32], cpu=["386"], not64=True) add_insn("popaw", "onebyte", modifiers=[0x61, 16], cpu=["186"], not64=True) # # Exchange instructions # add_group("xchg", suffix="b", opcode=[0x86], operands=[Operand(type="RM", size=8, relaxed=True, dest="EA"), Operand(type="Reg", size=8, dest="Spare")]) add_group("xchg", suffix="b", opcode=[0x86], operands=[Operand(type="Reg", size=8, dest="Spare"), Operand(type="RM", size=8, relaxed=True, dest="EA")]) # We could be extra-efficient in the 64-bit mode case here. # XCHG AX, AX in 64-bit mode is a NOP, as it doesn't clear the # high 48 bits of RAX. Thus we don't need the operand-size prefix. # But this feels too clever, and probably not what the user really # expects in the generated code, so we don't do it. #add_group("xchg", # suffix="w", # only64=True, # opcode=[0x90], # operands=[Operand(type="Areg", size=16, dest=None), # Operand(type="AReg", size=16, dest="Op0Add")]) add_group("xchg", suffix="w", opersize=16, opcode=[0x90], operands=[Operand(type="Areg", size=16, dest=None), Operand(type="Reg", size=16, dest="Op0Add")]) add_group("xchg", suffix="w", opersize=16, opcode=[0x90], operands=[Operand(type="Reg", size=16, dest="Op0Add"), Operand(type="Areg", size=16, dest=None)]) add_group("xchg", suffix="w", opersize=16, opcode=[0x87], operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), Operand(type="Reg", size=16, dest="Spare")]) add_group("xchg", suffix="w", opersize=16, opcode=[0x87], operands=[Operand(type="Reg", size=16, dest="Spare"), Operand(type="RM", size=16, relaxed=True, dest="EA")]) # Be careful with XCHG EAX, EAX in 64-bit mode. This needs to use # the long form rather than the NOP form, as the long form clears # the high 32 bits of RAX. This makes all 32-bit forms in 64-bit # mode have consistent operation. # # FIXME: due to a hard-to-fix bug in how we handle generating gas suffix CPU # rules, this causes xchgl to be CPU_Any instead of CPU_386. A hacky patch # could fix it, but it's doubtful anyone will ever notice, so leave it. add_group("xchg", suffix="l", only64=True, opersize=32, opcode=[0x87], operands=[Operand(type="Areg", size=32, dest="EA"), Operand(type="Areg", size=32, dest="Spare")]) add_group("xchg", suffix="l", opersize=32, opcode=[0x90], operands=[Operand(type="Areg", size=32, dest=None), Operand(type="Reg", size=32, dest="Op0Add")]) add_group("xchg", suffix="l", opersize=32, opcode=[0x90], operands=[Operand(type="Reg", size=32, dest="Op0Add"), Operand(type="Areg", size=32, dest=None)]) add_group("xchg", suffix="l", opersize=32, opcode=[0x87], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Reg", size=32, dest="Spare")]) add_group("xchg", suffix="l", opersize=32, opcode=[0x87], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) # Be efficient with XCHG RAX, RAX. # This is a NOP and thus doesn't need the REX prefix. add_group("xchg", suffix="q", only64=True, opcode=[0x90], operands=[Operand(type="Areg", size=64, dest=None), Operand(type="Areg", size=64, dest="Op0Add")]) add_group("xchg", suffix="q", opersize=64, opcode=[0x90], operands=[Operand(type="Areg", size=64, dest=None), Operand(type="Reg", size=64, dest="Op0Add")]) add_group("xchg", suffix="q", opersize=64, opcode=[0x90], operands=[Operand(type="Reg", size=64, dest="Op0Add"), Operand(type="Areg", size=64, dest=None)]) add_group("xchg", suffix="q", opersize=64, opcode=[0x87], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="Reg", size=64, dest="Spare")]) add_group("xchg", suffix="q", opersize=64, opcode=[0x87], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_insn("xchg", "xchg") ##################################################################### # In/out from ports ##################################################################### add_group("in", suffix="b", opcode=[0xE4], operands=[Operand(type="Areg", size=8, dest=None), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for sfx, sz in zip("wl", [16, 32]): add_group("in", suffix=sfx, opersize=sz, opcode=[0xE5], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("in", suffix="b", opcode=[0xEC], operands=[Operand(type="Areg", size=8, dest=None), Operand(type="Dreg", size=16, dest=None)]) for sfx, sz in zip("wl", [16, 32]): add_group("in", suffix=sfx, opersize=sz, opcode=[0xED], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="Dreg", size=16, dest=None)]) # GAS-only variants (implicit accumulator register) add_group("in", suffix="b", parsers=["gas"], opcode=[0xE4], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for sfx, sz in zip("wl", [16, 32]): add_group("in", suffix=sfx, parsers=["gas"], opersize=sz, opcode=[0xE5], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("in", suffix="b", parsers=["gas"], opcode=[0xEC], operands=[Operand(type="Dreg", size=16, dest=None)]) add_group("in", suffix="w", parsers=["gas"], opersize=16, opcode=[0xED], operands=[Operand(type="Dreg", size=16, dest=None)]) add_group("in", suffix="l", cpu=["386"], parsers=["gas"], opersize=32, opcode=[0xED], operands=[Operand(type="Dreg", size=16, dest=None)]) add_insn("in", "in") add_group("out", suffix="b", opcode=[0xE6], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm"), Operand(type="Areg", size=8, dest=None)]) for sfx, sz in zip("wl", [16, 32]): add_group("out", suffix=sfx, opersize=sz, opcode=[0xE7], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm"), Operand(type="Areg", size=sz, dest=None)]) add_group("out", suffix="b", opcode=[0xEE], operands=[Operand(type="Dreg", size=16, dest=None), Operand(type="Areg", size=8, dest=None)]) for sfx, sz in zip("wl", [16, 32]): add_group("out", suffix=sfx, opersize=sz, opcode=[0xEF], operands=[Operand(type="Dreg", size=16, dest=None), Operand(type="Areg", size=sz, dest=None)]) # GAS-only variants (implicit accumulator register) add_group("out", suffix="b", parsers=["gas"], opcode=[0xE6], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("out", suffix="w", parsers=["gas"], opersize=16, opcode=[0xE7], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("out", suffix="l", cpu=["386"], parsers=["gas"], opersize=32, opcode=[0xE7], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("out", suffix="b", parsers=["gas"], opcode=[0xEE], operands=[Operand(type="Dreg", size=16, dest=None)]) add_group("out", suffix="w", parsers=["gas"], opersize=16, opcode=[0xEF], operands=[Operand(type="Dreg", size=16, dest=None)]) add_group("out", suffix="l", cpu=["386"], parsers=["gas"], opersize=32, opcode=[0xEF], operands=[Operand(type="Dreg", size=16, dest=None)]) add_insn("out", "out") # # Load effective address # for sfx, sz in zip("wlq", [16, 32, 64]): add_group("lea", suffix=sfx, opersize=sz, opcode=[0x8D], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Mem", relaxed=True, dest="EA")]) add_insn("lea", "lea") # # Load segment registers from memory # for sfx, sz in zip("wl", [16, 32]): add_group("ldes", suffix=sfx, not64=True, modifiers=["Op0Add"], opersize=sz, opcode=[0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Mem", relaxed=True, dest="EA")]) add_insn("lds", "ldes", modifiers=[0xC5]) add_insn("les", "ldes", modifiers=[0xC4]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("lfgss", suffix=sfx, cpu=["386"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Mem", relaxed=True, dest="EA")]) add_insn("lfs", "lfgss", modifiers=[0xB4]) add_insn("lgs", "lfgss", modifiers=[0xB5]) add_insn("lss", "lfgss", modifiers=[0xB2]) # # Flags registers instructions # add_insn("clc", "onebyte", modifiers=[0xF8]) add_insn("cld", "onebyte", modifiers=[0xFC]) add_insn("cli", "onebyte", modifiers=[0xFA]) add_insn("clts", "twobyte", modifiers=[0x0F, 0x06], cpu=["286", "Priv"]) add_insn("cmc", "onebyte", modifiers=[0xF5]) add_insn("lahf", "onebyte", modifiers=[0x9F]) add_insn("sahf", "onebyte", modifiers=[0x9E]) add_insn("pushf", "onebyte", modifiers=[0x9C, 0, 64]) add_insn("pushfd", "onebyte", parser="nasm", modifiers=[0x9C, 32], cpu=["386"], not64=True) add_insn("pushfl", "onebyte", parser="gas", modifiers=[0x9C, 32], cpu=["386"], not64=True) add_insn("pushfw", "onebyte", modifiers=[0x9C, 16, 64]) add_insn("pushfq", "onebyte", modifiers=[0x9C, 64, 64], only64=True) add_insn("popf", "onebyte", modifiers=[0x9D, 0, 64]) add_insn("popfd", "onebyte", parser="nasm", modifiers=[0x9D, 32], cpu=["386"], not64=True) add_insn("popfl", "onebyte", parser="gas", modifiers=[0x9D, 32], cpu=["386"], not64=True) add_insn("popfw", "onebyte", modifiers=[0x9D, 16, 64]) add_insn("popfq", "onebyte", modifiers=[0x9D, 64, 64], only64=True) add_insn("stc", "onebyte", modifiers=[0xF9]) add_insn("std", "onebyte", modifiers=[0xFD]) add_insn("sti", "onebyte", modifiers=[0xFB]) # # Arithmetic - general # add_group("arith", suffix="b", modifiers=["Op0Add"], opcode=[0x04], operands=[Operand(type="Areg", size=8, dest=None), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for sfx, sz, immsz in zip("wlq", [16, 32, 64], [16, 32, 32]): add_group("arith", suffix=sfx, modifiers=["Op2Add", "Op1AddSp"], opersize=sz, opcode1=[0x83, 0xC0], opcode2=[0x05], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="Imm", size=immsz, relaxed=True, dest="Imm", opt="SImm8")]) add_group("arith", suffix="b", modifiers=["Gap", "SpAdd"], opcode=[0x80], spare=0, operands=[Operand(type="RM", size=8, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("arith", suffix="b", modifiers=["Gap", "SpAdd"], opcode=[0x80], spare=0, operands=[Operand(type="RM", size=8, relaxed=True, dest="EA"), Operand(type="Imm", size=8, dest="Imm")]) add_group("arith", suffix="w", modifiers=["Gap", "SpAdd"], opersize=16, opcode=[0x83], spare=0, operands=[Operand(type="RM", size=16, dest="EA"), Operand(type="Imm", size=8, dest="SImm")]) add_group("arith", parsers=["nasm"], modifiers=["Gap", "SpAdd"], opersize=16, opcode1=[0x83], opcode2=[0x81], spare=0, operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), Operand(type="Imm", size=16, dest="Imm", opt="SImm8")]) add_group("arith", suffix="w", modifiers=["Gap", "SpAdd"], opersize=16, opcode1=[0x83], opcode2=[0x81], spare=0, operands=[ Operand(type="RM", size=16, dest="EA"), Operand(type="Imm", size=16, relaxed=True, dest="Imm", opt="SImm8")]) add_group("arith", suffix="l", modifiers=["Gap", "SpAdd"], opersize=32, opcode=[0x83], spare=0, operands=[Operand(type="RM", size=32, dest="EA"), Operand(type="Imm", size=8, dest="SImm")]) # Not64 because we can't tell if add [], dword in 64-bit mode is supposed # to be a qword destination or a dword destination. add_group("arith", not64=True, parsers=["nasm"], modifiers=["Gap", "SpAdd"], opersize=32, opcode1=[0x83], opcode2=[0x81], spare=0, operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=32, dest="Imm", opt="SImm8")]) add_group("arith", suffix="l", modifiers=["Gap", "SpAdd"], opersize=32, opcode1=[0x83], opcode2=[0x81], spare=0, operands=[ Operand(type="RM", size=32, dest="EA"), Operand(type="Imm", size=32, relaxed=True, dest="Imm", opt="SImm8")]) # No relaxed-RM mode for 64-bit destinations; see above Not64 comment. add_group("arith", suffix="q", modifiers=["Gap", "SpAdd"], opersize=64, opcode=[0x83], spare=0, operands=[Operand(type="RM", size=64, dest="EA"), Operand(type="Imm", size=8, dest="SImm")]) add_group("arith", suffix="q", modifiers=["Gap", "SpAdd"], opersize=64, opcode1=[0x83], opcode2=[0x81], spare=0, operands=[ Operand(type="RM", size=64, dest="EA"), Operand(type="Imm", size=32, relaxed=True, dest="Imm", opt="SImm8")]) for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("arith", suffix=sfx, modifiers=["Op0Add"], opersize=sz, opcode=[0x00+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("arith", suffix=sfx, modifiers=["Op0Add"], opersize=sz, opcode=[0x02+(sz!=8)], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("add", "arith", modifiers=[0x00, 0]) add_insn("or", "arith", modifiers=[0x08, 1]) add_insn("adc", "arith", modifiers=[0x10, 2]) add_insn("sbb", "arith", modifiers=[0x18, 3]) add_insn("and", "arith", modifiers=[0x20, 4]) add_insn("sub", "arith", modifiers=[0x28, 5]) add_insn("xor", "arith", modifiers=[0x30, 6]) add_insn("cmp", "arith", modifiers=[0x38, 7]) # # Arithmetic - inc/dec # add_group("incdec", suffix="b", modifiers=["Gap", "SpAdd"], opcode=[0xFE], spare=0, operands=[Operand(type="RM", size=8, dest="EA")]) for sfx, sz in zip("wl", [16, 32]): add_group("incdec", suffix=sfx, not64=True, modifiers=["Op0Add"], opersize=sz, opcode=[0x00], operands=[Operand(type="Reg", size=sz, dest="Op0Add")]) add_group("incdec", suffix=sfx, modifiers=["Gap", "SpAdd"], opersize=sz, opcode=[0xFF], spare=0, operands=[Operand(type="RM", size=sz, dest="EA")]) add_group("incdec", suffix="q", modifiers=["Gap", "SpAdd"], opersize=64, opcode=[0xFF], spare=0, operands=[Operand(type="RM", size=64, dest="EA")]) add_insn("inc", "incdec", modifiers=[0x40, 0]) add_insn("dec", "incdec", modifiers=[0x48, 1]) # # Arithmetic - mul/neg/not F6 opcodes # for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("f6", suffix=sfx, modifiers=["SpAdd"], opersize=sz, opcode=[0xF6+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA")]) add_insn("not", "f6", modifiers=[2]) add_insn("neg", "f6", modifiers=[3]) add_insn("mul", "f6", modifiers=[4]) # # Arithmetic - div/idiv F6 opcodes # These allow explicit accumulator in GAS mode. # for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("div", suffix=sfx, modifiers=["SpAdd"], opersize=sz, opcode=[0xF6+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA")]) # Versions with explicit accumulator for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("div", suffix=sfx, modifiers=["SpAdd"], opersize=sz, opcode=[0xF6+(sz!=8)], spare=0, operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="RM", size=sz, dest="EA")]) add_insn("div", "div", modifiers=[6]) add_insn("idiv", "div", modifiers=[7]) # # Arithmetic - test instruction # for sfx, sz, immsz in zip("bwlq", [8, 16, 32, 64], [8, 16, 32, 32]): add_group("test", suffix=sfx, opersize=sz, opcode=[0xA8+(sz!=8)], operands=[Operand(type="Areg", size=sz, dest=None), Operand(type="Imm", size=immsz, relaxed=True, dest="Imm")]) for sfx, sz, immsz in zip("bwlq", [8, 16, 32, 64], [8, 16, 32, 32]): add_group("test", suffix=sfx, opersize=sz, opcode=[0xF6+(sz!=8)], operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Imm", size=immsz, relaxed=True, dest="Imm")]) add_group("test", suffix=sfx, opersize=sz, opcode=[0xF6+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=immsz, dest="Imm")]) for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("test", suffix=sfx, opersize=sz, opcode=[0x84+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("test", suffix=sfx, opersize=sz, opcode=[0x84+(sz!=8)], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("test", "test") # # Arithmetic - aad/aam # add_group("aadm", modifiers=["Op0Add"], opcode=[0xD4, 0x0A], operands=[]) add_group("aadm", modifiers=["Op0Add"], opcode=[0xD4], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("aaa", "onebyte", modifiers=[0x37], not64=True) add_insn("aas", "onebyte", modifiers=[0x3F], not64=True) add_insn("daa", "onebyte", modifiers=[0x27], not64=True) add_insn("das", "onebyte", modifiers=[0x2F], not64=True) add_insn("aad", "aadm", modifiers=[0x01], not64=True) add_insn("aam", "aadm", modifiers=[0x00], not64=True) # # Conversion instructions # add_insn("cbw", "onebyte", modifiers=[0x98, 16]) add_insn("cwde", "onebyte", modifiers=[0x98, 32], cpu=["386"]) add_insn("cdqe", "onebyte", modifiers=[0x98, 64], only64=True) add_insn("cwd", "onebyte", modifiers=[0x99, 16]) add_insn("cdq", "onebyte", modifiers=[0x99, 32], cpu=["386"]) add_insn("cqo", "onebyte", modifiers=[0x99, 64], only64=True) # # Conversion instructions - GAS / AT&T naming # add_insn("cbtw", "onebyte", parser="gas", modifiers=[0x98, 16]) add_insn("cwtl", "onebyte", parser="gas", modifiers=[0x98, 32], cpu=["386"]) add_insn("cltq", "onebyte", parser="gas", modifiers=[0x98, 64], only64=True) add_insn("cwtd", "onebyte", parser="gas", modifiers=[0x99, 16]) add_insn("cltd", "onebyte", parser="gas", modifiers=[0x99, 32], cpu=["386"]) add_insn("cqto", "onebyte", parser="gas", modifiers=[0x99, 64], only64=True) # # Arithmetic - imul # for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("imul", suffix=sfx, opersize=sz, opcode=[0xF6+(sz!=8)], spare=5, operands=[Operand(type="RM", size=sz, dest="EA")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("imul", suffix=sfx, cpu=["386"], opersize=sz, opcode=[0x0F, 0xAF], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("imul", suffix=sfx, cpu=["186"], opersize=sz, opcode=[0x6B], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=8, dest="SImm")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("imul", suffix=sfx, cpu=["186"], opersize=sz, opcode=[0x6B], operands=[Operand(type="Reg", size=sz, dest="SpareEA"), Operand(type="Imm", size=8, dest="SImm")]) for sfx, sz, immsz in zip("wlq", [16, 32, 64], [16, 32, 32]): add_group("imul", suffix=sfx, cpu=["186"], opersize=sz, opcode1=[0x6B], opcode2=[0x69], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=immsz, relaxed=True, dest="SImm", opt="SImm8")]) for sfx, sz, immsz in zip("wlq", [16, 32, 64], [16, 32, 32]): add_group("imul", suffix=sfx, cpu=["186"], opersize=sz, opcode1=[0x6B], opcode2=[0x69], operands=[Operand(type="Reg", size=sz, dest="SpareEA"), Operand(type="Imm", size=immsz, relaxed=True, dest="SImm", opt="SImm8")]) add_insn("imul", "imul") # # Shifts - standard # for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("shift", suffix=sfx, modifiers=["SpAdd"], opersize=sz, opcode=[0xD2+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Creg", size=8, dest=None)]) add_group("shift", suffix=sfx, modifiers=["SpAdd"], opersize=sz, opcode=[0xD0+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Imm1", size=8, relaxed=True, dest=None)]) add_group("shift", suffix=sfx, cpu=["186"], modifiers=["SpAdd"], opersize=sz, opcode=[0xC0+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # In GAS mode, single operands are equivalent to shifting by 1 forms for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("shift", suffix=sfx, parsers=["gas"], modifiers=["SpAdd"], opersize=sz, opcode=[0xD0+(sz!=8)], spare=0, operands=[Operand(type="RM", size=sz, dest="EA")]) add_insn("rol", "shift", modifiers=[0]) add_insn("ror", "shift", modifiers=[1]) add_insn("rcl", "shift", modifiers=[2]) add_insn("rcr", "shift", modifiers=[3]) add_insn("sal", "shift", modifiers=[4]) add_insn("shl", "shift", modifiers=[4]) add_insn("shr", "shift", modifiers=[5]) add_insn("sar", "shift", modifiers=[7]) # # Shifts - doubleword # for sfx, sz in zip("wlq", [16, 32, 64]): add_group("shlrd", suffix=sfx, cpu=["386"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("shlrd", suffix=sfx, cpu=["386"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x01], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Creg", size=8, dest=None)]) # GAS parser supports two-operand form for shift with CL count for sfx, sz in zip("wlq", [16, 32, 64]): add_group("shlrd", suffix=sfx, cpu=["386"], parsers=["gas"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x01], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) add_insn("shld", "shlrd", modifiers=[0xA4]) add_insn("shrd", "shlrd", modifiers=[0xAC]) ##################################################################### # Control transfer instructions (unconditional) ##################################################################### # # call # add_group("call", opcode=[], operands=[Operand(type="ImmNotSegOff", dest="JmpRel")]) add_group("call", suffix="w", opersize=16, opcode=[], operands=[Operand(type="ImmNotSegOff", size=16, dest="JmpRel")]) add_group("call", suffix="l", not64=True, opersize=32, opcode=[], operands=[Operand(type="ImmNotSegOff", size=32, dest="JmpRel")]) add_group("call", suffixes=["l", "q"], only64=True, opersize=64, opcode=[], operands=[Operand(type="ImmNotSegOff", size=32, dest="JmpRel")]) add_group("call", opersize=16, def_opersize_64=64, opcode=[0xE8], operands=[Operand(type="Imm", size=16, tmod="Near", dest="JmpRel")]) add_group("call", not64=True, opersize=32, opcode=[0xE8], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("call", only64=True, opersize=64, def_opersize_64=64, opcode=[0xE8], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("call", def_opersize_64=64, opcode=[0xE8], operands=[Operand(type="Imm", tmod="Near", dest="JmpRel")]) add_group("call", suffix="w", req_suffix=True, opersize=16, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=16, dest="EA")]) add_group("call", suffix="l", req_suffix=True, not64=True, opersize=32, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=32, dest="EA")]) add_group("call", suffix="q", req_suffix=True, opersize=64, def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=64, dest="EA")]) add_group("call", parsers=["gas"], def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="Reg", size="BITS", dest="EA")]) add_group("call", def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="Mem", dest="EA")]) add_group("call", parsers=["nasm"], opersize=16, def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=16, tmod="Near", dest="EA")]) add_group("call", parsers=["nasm"], not64=True, opersize=32, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=32, tmod="Near", dest="EA")]) add_group("call", parsers=["nasm"], opersize=64, def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="RM", size=64, tmod="Near", dest="EA")]) add_group("call", parsers=["nasm"], def_opersize_64=64, opcode=[0xFF], spare=2, operands=[Operand(type="Mem", tmod="Near", dest="EA")]) # Far indirect (through memory). Needs explicit FAR override (NASM only) for sz in [16, 32, 64]: add_group("call", parsers=["nasm"], opersize=sz, opcode=[0xFF], spare=3, operands=[Operand(type="Mem", size=sz, tmod="Far", dest="EA")]) add_group("call", parsers=["nasm"], opcode=[0xFF], spare=3, operands=[Operand(type="Mem", tmod="Far", dest="EA")]) # With explicit FAR override for sz in [16, 32]: add_group("call", parsers=["nasm"], not64=True, opersize=sz, opcode=[0x9A], operands=[Operand(type="Imm", size=sz, tmod="Far", dest="JmpFar")]) add_group("call", parsers=["nasm"], not64=True, opcode=[0x9A], operands=[Operand(type="Imm", tmod="Far", dest="JmpFar")]) # Since not caught by first ImmNotSegOff group, implicitly FAR (in NASM). for sz in [16, 32]: add_group("call", parsers=["nasm"], not64=True, opersize=sz, opcode=[0x9A], operands=[Operand(type="Imm", size=sz, dest="JmpFar")]) add_group("call", parsers=["nasm"], not64=True, opcode=[0x9A], operands=[Operand(type="Imm", dest="JmpFar")]) # Two-operand FAR (GAS only) for sfx, sz in zip("wl", [16, 32]): add_group("call", suffix=sfx, req_suffix=True, parsers=["gas"], not64=True, gas_no_reverse=True, opersize=sz, opcode=[0x9A], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size=sz, relaxed=True, dest="JmpFar")]) add_group("call", parsers=["gas"], not64=True, gas_no_reverse=True, opcode=[0x9A], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size="BITS", relaxed=True, dest="JmpFar")]) add_insn("call", "call") # # jmp # add_group("jmp", opcode=[], operands=[Operand(type="ImmNotSegOff", dest="JmpRel")]) add_group("jmp", suffix="w", opersize=16, opcode=[], operands=[Operand(type="ImmNotSegOff", size=16, dest="JmpRel")]) add_group("jmp", suffix="l", not64=True, opersize=32, opcode=[0x00], operands=[Operand(type="ImmNotSegOff", size=32, dest="JmpRel")]) add_group("jmp", suffixes=["l", "q"], only64=True, opersize=64, opcode=[0x00], operands=[Operand(type="ImmNotSegOff", size=32, dest="JmpRel")]) add_group("jmp", def_opersize_64=64, opcode=[0xEB], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel")]) add_group("jmp", opersize=16, def_opersize_64=64, opcode=[0xE9], operands=[Operand(type="Imm", size=16, tmod="Near", dest="JmpRel")]) add_group("jmp", not64=True, cpu=["386"], opersize=32, opcode=[0xE9], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("jmp", only64=True, opersize=64, def_opersize_64=64, opcode=[0xE9], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("jmp", def_opersize_64=64, opcode=[0xE9], operands=[Operand(type="Imm", tmod="Near", dest="JmpRel")]) add_group("jmp", suffix="w", req_suffix=True, opersize=16, def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=16, dest="EA")]) add_group("jmp", suffix="l", req_suffix=True, not64=True, opersize=32, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=32, dest="EA")]) add_group("jmp", suffix="q", req_suffix=True, opersize=64, def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=64, dest="EA")]) add_group("jmp", parsers=["gas"], def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="Reg", size="BITS", dest="EA")]) add_group("jmp", def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="Mem", dest="EA")]) add_group("jmp", parsers=["nasm"], opersize=16, def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=16, tmod="Near", dest="EA")]) add_group("jmp", parsers=["nasm"], not64=True, cpu=["386"], opersize=32, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=32, tmod="Near", dest="EA")]) add_group("jmp", parsers=["nasm"], opersize=64, def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="RM", size=64, tmod="Near", dest="EA")]) add_group("jmp", parsers=["nasm"], def_opersize_64=64, opcode=[0xFF], spare=4, operands=[Operand(type="Mem", tmod="Near", dest="EA")]) # Far indirect (through memory). Needs explicit FAR override. for sz in [16, 32, 64]: add_group("jmp", opersize=sz, opcode=[0xFF], spare=5, operands=[Operand(type="Mem", size=sz, tmod="Far", dest="EA")]) add_group("jmp", opcode=[0xFF], spare=5, operands=[Operand(type="Mem", tmod="Far", dest="EA")]) # With explicit FAR override for sz in [16, 32]: add_group("jmp", not64=True, opersize=sz, opcode=[0xEA], operands=[Operand(type="Imm", size=sz, tmod="Far", dest="JmpFar")]) add_group("jmp", not64=True, opcode=[0xEA], operands=[Operand(type="Imm", tmod="Far", dest="JmpFar")]) # Since not caught by first ImmNotSegOff group, implicitly FAR (in NASM). for sz in [16, 32]: add_group("jmp", parsers=["nasm"], not64=True, opersize=sz, opcode=[0xEA], operands=[Operand(type="Imm", size=sz, dest="JmpFar")]) add_group("jmp", parsers=["nasm"], not64=True, opcode=[0xEA], operands=[Operand(type="Imm", dest="JmpFar")]) # Two-operand FAR (GAS only) for sfx, sz in zip("wl", [16, 32]): add_group("jmp", parsers=["gas"], suffix=sfx, req_suffix=True, not64=True, gas_no_reverse=True, opersize=sz, opcode=[0xEA], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size=sz, relaxed=True, dest="JmpFar")]) add_group("jmp", parsers=["gas"], not64=True, gas_no_reverse=True, opcode=[0xEA], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size="BITS", relaxed=True, dest="JmpFar")]) add_insn("jmp", "jmp") # # GAS far calls/jumps # # Far indirect (through memory) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("ljmpcall", suffix=sfx, req_suffix=True, opersize=sz, modifiers=["SpAdd"], opcode=[0xFF], spare=0, operands=[Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) add_group("ljmpcall", modifiers=["SpAdd"], opcode=[0xFF], spare=0, operands=[Operand(type="Mem", size="BITS", relaxed=True, dest="EA")]) # Two-operand far for sfx, sz in zip("wl", [16, 32]): add_group("ljmpcall", not64=True, gas_no_reverse=True, suffix=sfx, req_suffix=True, opersize=sz, modifiers=["Gap", "Op0Add"], opcode=[0x00], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size=sz, relaxed=True, dest="JmpFar")]) add_group("ljmpcall", not64=True, gas_no_reverse=True, modifiers=["Gap", "Op0Add"], opcode=[0x00], operands=[Operand(type="Imm", size=16, relaxed=True, dest="JmpFar"), Operand(type="Imm", size="BITS", relaxed=True, dest="JmpFar")]) add_insn("ljmp", "ljmpcall", parser="gas", modifiers=[5, 0xEA]) add_insn("lcall", "ljmpcall", parser="gas", modifiers=[3, 0x9A]) # # ret # add_group("retnf", not64=True, modifiers=["Op0Add"], opcode=[0x01], operands=[]) add_group("retnf", not64=True, modifiers=["Op0Add"], opcode=[0x00], operands=[Operand(type="Imm", size=16, relaxed=True, dest="Imm")]) add_group("retnf", only64=True, modifiers=["Op0Add", "OpSizeR"], opcode=[0x01], operands=[]) add_group("retnf", only64=True, modifiers=["Op0Add", "OpSizeR"], opcode=[0x00], operands=[Operand(type="Imm", size=16, relaxed=True, dest="Imm")]) add_group("retnf", gen_suffix=False, suffixes=["w", "l", "q"], modifiers=["Op0Add", "OpSizeR"], opcode=[0x01], operands=[]) # GAS suffix versions add_group("retnf", gen_suffix=False, suffixes=["w", "l", "q"], modifiers=["Op0Add", "OpSizeR"], opcode=[0x00], operands=[Operand(type="Imm", size=16, relaxed=True, dest="Imm")]) add_insn("ret", "retnf", modifiers=[0xC2]) add_insn("retw", "retnf", parser="gas", modifiers=[0xC2, 16]) add_insn("retl", "retnf", parser="gas", modifiers=[0xC2], not64=True) add_insn("retq", "retnf", parser="gas", modifiers=[0xC2], only64=True) add_insn("retn", "retnf", parser="nasm", modifiers=[0xC2]) add_insn("retf", "retnf", parser="nasm", modifiers=[0xCA, 64]) add_insn("lret", "retnf", parser="gas", modifiers=[0xCA], suffix="z") add_insn("lretw", "retnf", parser="gas", modifiers=[0xCA, 16], suffix="w") add_insn("lretl", "retnf", parser="gas", modifiers=[0xCA], suffix="l") add_insn("lretq", "retnf", parser="gas", modifiers=[0xCA, 64], only64=True, suffix="q") # # enter # add_group("enter", suffix="l", not64=True, cpu=["186"], gas_no_reverse=True, opcode=[0xC8], operands=[ Operand(type="Imm", size=16, relaxed=True, dest="EA", opt="A16"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("enter", suffix="q", only64=True, cpu=["186"], gas_no_reverse=True, opersize=64, def_opersize_64=64, opcode=[0xC8], operands=[ Operand(type="Imm", size=16, relaxed=True, dest="EA", opt="A16"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # GAS suffix version add_group("enter", suffix="w", cpu=["186"], parsers=["gas"], gas_no_reverse=True, opersize=16, opcode=[0xC8], operands=[ Operand(type="Imm", size=16, relaxed=True, dest="EA", opt="A16"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("enter", "enter") # # leave # add_insn("leave", "onebyte", modifiers=[0xC9, 0, 64], cpu=["186"]) add_insn("leavew", "onebyte", parser="gas", modifiers=[0xC9, 16, 0], cpu=["186"]) add_insn("leavel", "onebyte", parser="gas", modifiers=[0xC9, 0, 64], cpu=["186"]) add_insn("leaveq", "onebyte", parser="gas", modifiers=[0xC9, 0, 64], only64=True) ##################################################################### # Conditional jumps ##################################################################### add_group("jcc", opcode=[], operands=[Operand(type="Imm", dest="JmpRel")]) add_group("jcc", opersize=16, opcode=[], operands=[Operand(type="Imm", size=16, dest="JmpRel")]) add_group("jcc", not64=True, opersize=32, opcode=[], operands=[Operand(type="Imm", size=32, dest="JmpRel")]) add_group("jcc", only64=True, opersize=64, opcode=[], operands=[Operand(type="Imm", size=32, dest="JmpRel")]) add_group("jcc", modifiers=["Op0Add"], def_opersize_64=64, opcode=[0x70], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel")]) add_group("jcc", cpu=["186"], modifiers=["Op1Add"], opersize=16, def_opersize_64=64, opcode=[0x0F, 0x80], operands=[Operand(type="Imm", size=16, tmod="Near", dest="JmpRel")]) add_group("jcc", not64=True, cpu=["386"], modifiers=["Op1Add"], opersize=32, opcode=[0x0F, 0x80], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("jcc", only64=True, modifiers=["Op1Add"], opersize=64, def_opersize_64=64, opcode=[0x0F, 0x80], operands=[Operand(type="Imm", size=32, tmod="Near", dest="JmpRel")]) add_group("jcc", cpu=["186"], modifiers=["Op1Add"], def_opersize_64=64, opcode=[0x0F, 0x80], operands=[Operand(type="Imm", tmod="Near", dest="JmpRel")]) add_insn("jo", "jcc", modifiers=[0x00]) add_insn("jno", "jcc", modifiers=[0x01]) add_insn("jb", "jcc", modifiers=[0x02]) add_insn("jc", "jcc", modifiers=[0x02]) add_insn("jnae", "jcc", modifiers=[0x02]) add_insn("jnb", "jcc", modifiers=[0x03]) add_insn("jnc", "jcc", modifiers=[0x03]) add_insn("jae", "jcc", modifiers=[0x03]) add_insn("je", "jcc", modifiers=[0x04]) add_insn("jz", "jcc", modifiers=[0x04]) add_insn("jne", "jcc", modifiers=[0x05]) add_insn("jnz", "jcc", modifiers=[0x05]) add_insn("jbe", "jcc", modifiers=[0x06]) add_insn("jna", "jcc", modifiers=[0x06]) add_insn("jnbe", "jcc", modifiers=[0x07]) add_insn("ja", "jcc", modifiers=[0x07]) add_insn("js", "jcc", modifiers=[0x08]) add_insn("jns", "jcc", modifiers=[0x09]) add_insn("jp", "jcc", modifiers=[0x0A]) add_insn("jpe", "jcc", modifiers=[0x0A]) add_insn("jnp", "jcc", modifiers=[0x0B]) add_insn("jpo", "jcc", modifiers=[0x0B]) add_insn("jl", "jcc", modifiers=[0x0C]) add_insn("jnge", "jcc", modifiers=[0x0C]) add_insn("jnl", "jcc", modifiers=[0x0D]) add_insn("jge", "jcc", modifiers=[0x0D]) add_insn("jle", "jcc", modifiers=[0x0E]) add_insn("jng", "jcc", modifiers=[0x0E]) add_insn("jnle", "jcc", modifiers=[0x0F]) add_insn("jg", "jcc", modifiers=[0x0F]) # # jcxz # add_group("jcxz", modifiers=["AdSizeR"], opcode=[], operands=[Operand(type="Imm", dest="JmpRel")]) add_group("jcxz", modifiers=["AdSizeR"], def_opersize_64=64, opcode=[0xE3], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel")]) add_insn("jcxz", "jcxz", modifiers=[16]) add_insn("jecxz", "jcxz", modifiers=[32], cpu=["386"]) add_insn("jrcxz", "jcxz", modifiers=[64], only64=True) ##################################################################### # Loop instructions ##################################################################### add_group("loop", opcode=[], operands=[Operand(type="Imm", dest="JmpRel")]) add_group("loop", not64=True, opcode=[], operands=[Operand(type="Imm", dest="JmpRel"), Operand(type="Creg", size=16, dest="AdSizeR")]) add_group("loop", def_opersize_64=64, opcode=[], operands=[Operand(type="Imm", dest="JmpRel"), Operand(type="Creg", size=32, dest="AdSizeR")]) add_group("loop", def_opersize_64=64, opcode=[], operands=[Operand(type="Imm", dest="JmpRel"), Operand(type="Creg", size=64, dest="AdSizeR")]) add_group("loop", not64=True, modifiers=["Op0Add"], opcode=[0xE0], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel")]) for sz in [16, 32, 64]: add_group("loop", modifiers=["Op0Add"], def_opersize_64=64, opcode=[0xE0], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel"), Operand(type="Creg", size=sz, dest="AdSizeR")]) add_insn("loop", "loop", modifiers=[2]) add_insn("loopz", "loop", modifiers=[1]) add_insn("loope", "loop", modifiers=[1]) add_insn("loopnz", "loop", modifiers=[0]) add_insn("loopne", "loop", modifiers=[0]) # GAS w/l/q suffixes have to set addrsize via modifiers for sfx, sz in zip("wlq", [16, 32, 64]): add_group("loop"+sfx, not64=(sz == 16), only64=(sz == 64), modifiers=["Gap", "AdSizeR"], def_opersize_64=64, opcode=[], operands=[Operand(type="Imm", dest="JmpRel")]) add_group("loop"+sfx, not64=(sz == 16), only64=(sz == 64), modifiers=["Op0Add", "AdSizeR"], def_opersize_64=64, opcode=[0xE0], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel")]) add_group("loop"+sfx, not64=(sz == 16), only64=(sz == 64), def_opersize_64=64, opcode=[], operands=[Operand(type="Imm", dest="JmpRel"), Operand(type="Creg", size=sz, dest="AdSizeR")]) add_group("loop"+sfx, not64=(sz == 16), only64=(sz == 64), modifiers=["Op0Add"], def_opersize_64=64, opcode=[0xE0], operands=[Operand(type="Imm", tmod="Short", dest="JmpRel"), Operand(type="Creg", size=sz, dest="AdSizeR")]) add_insn("loop"+sfx, "loop"+sfx, parser="gas", modifiers=[2, sz]) add_insn("loopz"+sfx, "loop"+sfx, parser="gas", modifiers=[1, sz]) add_insn("loope"+sfx, "loop"+sfx, parser="gas", modifiers=[1, sz]) add_insn("loopnz"+sfx, "loop"+sfx, parser="gas", modifiers=[0, sz]) add_insn("loopne"+sfx, "loop"+sfx, parser="gas", modifiers=[0, sz]) ##################################################################### # Set byte on flag instructions ##################################################################### add_group("setcc", suffix="b", cpu=["386"], modifiers=["Op1Add"], opcode=[0x0F, 0x90], spare=2, operands=[Operand(type="RM", size=8, relaxed=True, dest="EA")]) add_insn("seto", "setcc", modifiers=[0x00]) add_insn("setno", "setcc", modifiers=[0x01]) add_insn("setb", "setcc", modifiers=[0x02]) add_insn("setc", "setcc", modifiers=[0x02]) add_insn("setnae", "setcc", modifiers=[0x02]) add_insn("setnb", "setcc", modifiers=[0x03]) add_insn("setnc", "setcc", modifiers=[0x03]) add_insn("setae", "setcc", modifiers=[0x03]) add_insn("sete", "setcc", modifiers=[0x04]) add_insn("setz", "setcc", modifiers=[0x04]) add_insn("setne", "setcc", modifiers=[0x05]) add_insn("setnz", "setcc", modifiers=[0x05]) add_insn("setbe", "setcc", modifiers=[0x06]) add_insn("setna", "setcc", modifiers=[0x06]) add_insn("setnbe", "setcc", modifiers=[0x07]) add_insn("seta", "setcc", modifiers=[0x07]) add_insn("sets", "setcc", modifiers=[0x08]) add_insn("setns", "setcc", modifiers=[0x09]) add_insn("setp", "setcc", modifiers=[0x0A]) add_insn("setpe", "setcc", modifiers=[0x0A]) add_insn("setnp", "setcc", modifiers=[0x0B]) add_insn("setpo", "setcc", modifiers=[0x0B]) add_insn("setl", "setcc", modifiers=[0x0C]) add_insn("setnge", "setcc", modifiers=[0x0C]) add_insn("setnl", "setcc", modifiers=[0x0D]) add_insn("setge", "setcc", modifiers=[0x0D]) add_insn("setle", "setcc", modifiers=[0x0E]) add_insn("setng", "setcc", modifiers=[0x0E]) add_insn("setnle", "setcc", modifiers=[0x0F]) add_insn("setg", "setcc", modifiers=[0x0F]) ##################################################################### # String instructions ##################################################################### add_insn("cmpsb", "onebyte", modifiers=[0xA6, 0]) add_insn("cmpsw", "onebyte", modifiers=[0xA7, 16]) # cmpsd has to be non-onebyte for SSE2 forms below add_group("cmpsd", parsers=["nasm"], notavx=True, opersize=32, opcode=[0xA7], operands=[]) add_insn("cmpsd", "cmpsd", cpu=[]) add_insn("cmpsl", "onebyte", parser="gas", modifiers=[0xA7, 32], cpu=["386"]) add_insn("cmpsq", "onebyte", modifiers=[0xA7, 64], only64=True) add_insn("insb", "onebyte", modifiers=[0x6C, 0]) add_insn("insw", "onebyte", modifiers=[0x6D, 16]) add_insn("insd", "onebyte", parser="nasm", modifiers=[0x6D, 32], cpu=["386"]) add_insn("insl", "onebyte", parser="gas", modifiers=[0x6D, 32], cpu=["386"]) add_insn("outsb", "onebyte", modifiers=[0x6E, 0]) add_insn("outsw", "onebyte", modifiers=[0x6F, 16]) add_insn("outsd", "onebyte", parser="nasm", modifiers=[0x6F, 32], cpu=["386"]) add_insn("outsl", "onebyte", parser="gas", modifiers=[0x6F, 32], cpu=["386"]) add_insn("lodsb", "onebyte", modifiers=[0xAC, 0]) add_insn("lodsw", "onebyte", modifiers=[0xAD, 16]) add_insn("lodsd", "onebyte", parser="nasm", modifiers=[0xAD, 32], cpu=["386"]) add_insn("lodsl", "onebyte", parser="gas", modifiers=[0xAD, 32], cpu=["386"]) add_insn("lodsq", "onebyte", modifiers=[0xAD, 64], only64=True) add_insn("movsb", "onebyte", modifiers=[0xA4, 0]) add_insn("movsw", "onebyte", modifiers=[0xA5, 16]) # movsd has to be non-onebyte for SSE2 forms below add_group("movsd", parsers=["nasm", "gas"], notavx=True, opersize=32, opcode=[0xA5], operands=[]) add_insn("movsd", "movsd", cpu=["386"]) add_insn("movsl", "onebyte", parser="gas", modifiers=[0xA5, 32], cpu=["386"]) add_insn("movsq", "onebyte", modifiers=[0xA5, 64], only64=True) # smov alias for movs in GAS mode add_insn("smovb", "onebyte", parser="gas", modifiers=[0xA4, 0]) add_insn("smovw", "onebyte", parser="gas", modifiers=[0xA5, 16]) add_insn("smovl", "onebyte", parser="gas", modifiers=[0xA5, 32], cpu=["386"]) add_insn("smovq", "onebyte", parser="gas", modifiers=[0xA5, 64], only64=True) add_insn("scasb", "onebyte", modifiers=[0xAE, 0]) add_insn("scasw", "onebyte", modifiers=[0xAF, 16]) add_insn("scasd", "onebyte", parser="nasm", modifiers=[0xAF, 32], cpu=["386"]) add_insn("scasl", "onebyte", parser="gas", modifiers=[0xAF, 32], cpu=["386"]) add_insn("scasq", "onebyte", modifiers=[0xAF, 64], only64=True) # ssca alias for scas in GAS mode add_insn("sscab", "onebyte", parser="gas", modifiers=[0xAE, 0]) add_insn("sscaw", "onebyte", parser="gas", modifiers=[0xAF, 16]) add_insn("sscal", "onebyte", parser="gas", modifiers=[0xAF, 32], cpu=["386"]) add_insn("sscaq", "onebyte", parser="gas", modifiers=[0xAF, 64], only64=True) add_insn("stosb", "onebyte", modifiers=[0xAA, 0]) add_insn("stosw", "onebyte", modifiers=[0xAB, 16]) add_insn("stosd", "onebyte", parser="nasm", modifiers=[0xAB, 32], cpu=["386"]) add_insn("stosl", "onebyte", parser="gas", modifiers=[0xAB, 32], cpu=["386"]) add_insn("stosq", "onebyte", modifiers=[0xAB, 64], only64=True) add_insn("xlatb", "onebyte", modifiers=[0xD7, 0]) ##################################################################### # Bit manipulation ##################################################################### # # bit tests # for sfx, sz in zip("wlq", [16, 32, 64]): add_group("bittest", suffix=sfx, cpu=["386"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("bittest", suffix=sfx, cpu=["386"], modifiers=["Gap", "SpAdd"], opersize=sz, opcode=[0x0F, 0xBA], spare=0, operands=[Operand(type="RM", size=sz, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("bt", "bittest", modifiers=[0xA3, 4]) add_insn("bts", "bittest", modifiers=[0xAB, 5]) add_insn("btr", "bittest", modifiers=[0xB3, 6]) add_insn("btc", "bittest", modifiers=[0xBB, 7]) # # bit scans - also used for lar/lsl # for sfx, sz in zip("wlq", [16, 32, 64]): add_group("bsfr", suffix=sfx, modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("bsf", "bsfr", modifiers=[0xBC], cpu=["386"]) add_insn("bsr", "bsfr", modifiers=[0xBD], cpu=["386"]) ##################################################################### # Interrupts and operating system instructions ##################################################################### add_group("int", opcode=[0xCD], operands=[Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("int", "int") add_insn("int3", "onebyte", modifiers=[0xCC]) add_insn("int03", "onebyte", parser="nasm", modifiers=[0xCC]) add_insn("into", "onebyte", modifiers=[0xCE], not64=True) add_insn("iret", "onebyte", modifiers=[0xCF]) add_insn("iretw", "onebyte", modifiers=[0xCF, 16]) add_insn("iretd", "onebyte", parser="nasm", modifiers=[0xCF, 32], cpu=["386"]) add_insn("iretl", "onebyte", parser="gas", modifiers=[0xCF, 32], cpu=["386"]) add_insn("iretq", "onebyte", modifiers=[0xCF, 64], only64=True) add_insn("rsm", "twobyte", modifiers=[0x0F, 0xAA], cpu=["586", "SMM"]) for sfx, sz in zip("wl", [16, 32]): add_group("bound", suffix=sfx, cpu=["186"], not64=True, opersize=sz, opcode=[0x62], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) add_insn("bound", "bound") add_insn("hlt", "onebyte", modifiers=[0xF4], cpu=["Priv"]) add_insn("nop", "onebyte", modifiers=[0x90]) # # Protection control # for sfx, sz, sz2 in zip("wlq", [16, 32, 64], [16, 32, 32]): add_group("larlsl", suffix=sfx, modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Reg", size=sz2, dest="EA")]) add_group("larlsl", suffix=sfx, modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=16, relaxed=True, dest="EA")]) add_insn("lar", "larlsl", modifiers=[0x02], cpu=["286", "Prot"]) add_insn("lsl", "larlsl", modifiers=[0x03], cpu=["286", "Prot"]) add_group("arpl", suffix="w", cpu=["Prot", "286"], not64=True, opcode=[0x63], operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), Operand(type="Reg", size=16, dest="Spare")]) add_insn("arpl", "arpl") for sfx in [None, "w", "l", "q"]: add_insn("lgdt"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[2, 0x0F, 0x01], cpu=["286", "Priv"]) add_insn("lidt"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[3, 0x0F, 0x01], cpu=["286", "Priv"]) add_insn("sgdt"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[0, 0x0F, 0x01], cpu=["286", "Priv"]) add_insn("sidt"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[1, 0x0F, 0x01], cpu=["286", "Priv"]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("str", suffix=sfx, cpu=["Prot", "286"], opersize=sz, opcode=[0x0F, 0x00], spare=1, operands=[Operand(type="Reg", size=sz, dest="EA")]) add_group("str", suffixes=["w", "l"], cpu=["Prot", "286"], opcode=[0x0F, 0x00], spare=1, operands=[Operand(type="RM", size=16, relaxed=True, dest="EA")]) add_insn("str", "str") add_group("prot286", suffix="w", cpu=["286"], modifiers=["SpAdd", "Op1Add"], opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="RM", size=16, relaxed=True, dest="EA")]) add_insn("lldt", "prot286", modifiers=[2, 0], cpu=["286", "Prot", "Priv"]) add_insn("ltr", "prot286", modifiers=[3, 0], cpu=["286", "Prot", "Priv"]) add_insn("verr", "prot286", modifiers=[4, 0], cpu=["286", "Prot"]) add_insn("verw", "prot286", modifiers=[5, 0], cpu=["286", "Prot"]) add_insn("lmsw", "prot286", modifiers=[6, 1], cpu=["286", "Priv"]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("sldtmsw", suffix=sfx, only64=(sz==64), cpu=[(sz==32) and "386" or "286"], modifiers=["SpAdd", "Op1Add"], opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("sldtmsw", suffix=sfx, cpu=["286"], modifiers=["SpAdd", "Op1Add"], opersize=sz, opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="Reg", size=sz, dest="EA")]) add_insn("sldt", "sldtmsw", modifiers=[0, 0]) add_insn("smsw", "sldtmsw", modifiers=[4, 1]) ##################################################################### # Floating point instructions ##################################################################### add_insn("fcompp", "twobyte", modifiers=[0xDE, 0xD9], cpu=["FPU"]) add_insn("fucompp", "twobyte", modifiers=[0xDA, 0xE9], cpu=["286", "FPU"]) add_insn("ftst", "twobyte", modifiers=[0xD9, 0xE4], cpu=["FPU"]) add_insn("fxam", "twobyte", modifiers=[0xD9, 0xE5], cpu=["FPU"]) add_insn("fld1", "twobyte", modifiers=[0xD9, 0xE8], cpu=["FPU"]) add_insn("fldl2t", "twobyte", modifiers=[0xD9, 0xE9], cpu=["FPU"]) add_insn("fldl2e", "twobyte", modifiers=[0xD9, 0xEA], cpu=["FPU"]) add_insn("fldpi", "twobyte", modifiers=[0xD9, 0xEB], cpu=["FPU"]) add_insn("fldlg2", "twobyte", modifiers=[0xD9, 0xEC], cpu=["FPU"]) add_insn("fldln2", "twobyte", modifiers=[0xD9, 0xED], cpu=["FPU"]) add_insn("fldz", "twobyte", modifiers=[0xD9, 0xEE], cpu=["FPU"]) add_insn("f2xm1", "twobyte", modifiers=[0xD9, 0xF0], cpu=["FPU"]) add_insn("fyl2x", "twobyte", modifiers=[0xD9, 0xF1], cpu=["FPU"]) add_insn("fptan", "twobyte", modifiers=[0xD9, 0xF2], cpu=["FPU"]) add_insn("fpatan", "twobyte", modifiers=[0xD9, 0xF3], cpu=["FPU"]) add_insn("fxtract", "twobyte", modifiers=[0xD9, 0xF4], cpu=["FPU"]) add_insn("fprem1", "twobyte", modifiers=[0xD9, 0xF5], cpu=["286", "FPU"]) add_insn("fdecstp", "twobyte", modifiers=[0xD9, 0xF6], cpu=["FPU"]) add_insn("fincstp", "twobyte", modifiers=[0xD9, 0xF7], cpu=["FPU"]) add_insn("fprem", "twobyte", modifiers=[0xD9, 0xF8], cpu=["FPU"]) add_insn("fyl2xp1", "twobyte", modifiers=[0xD9, 0xF9], cpu=["FPU"]) add_insn("fsqrt", "twobyte", modifiers=[0xD9, 0xFA], cpu=["FPU"]) add_insn("fsincos", "twobyte", modifiers=[0xD9, 0xFB], cpu=["286", "FPU"]) add_insn("frndint", "twobyte", modifiers=[0xD9, 0xFC], cpu=["FPU"]) add_insn("fscale", "twobyte", modifiers=[0xD9, 0xFD], cpu=["FPU"]) add_insn("fsin", "twobyte", modifiers=[0xD9, 0xFE], cpu=["286", "FPU"]) add_insn("fcos", "twobyte", modifiers=[0xD9, 0xFF], cpu=["286", "FPU"]) add_insn("fchs", "twobyte", modifiers=[0xD9, 0xE0], cpu=["FPU"]) add_insn("fabs", "twobyte", modifiers=[0xD9, 0xE1], cpu=["FPU"]) add_insn("fninit", "twobyte", modifiers=[0xDB, 0xE3], cpu=["FPU"]) add_insn("finit", "threebyte", modifiers=[0x9B, 0xDB, 0xE3], cpu=["FPU"]) add_insn("fnclex", "twobyte", modifiers=[0xDB, 0xE2], cpu=["FPU"]) add_insn("fclex", "threebyte", modifiers=[0x9B, 0xDB, 0xE2], cpu=["FPU"]) for sfx in [None, "l", "s"]: add_insn("fnstenv"+(sfx or ""), "onebytemem", suffix=sfx, modifiers=[6, 0xD9], cpu=["FPU"]) add_insn("fstenv"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[6, 0x9B, 0xD9], cpu=["FPU"]) add_insn("fldenv"+(sfx or ""), "onebytemem", suffix=sfx, modifiers=[4, 0xD9], cpu=["FPU"]) add_insn("fnsave"+(sfx or ""), "onebytemem", suffix=sfx, modifiers=[6, 0xDD], cpu=["FPU"]) add_insn("fsave"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[6, 0x9B, 0xDD], cpu=["FPU"]) add_insn("frstor"+(sfx or ""), "onebytemem", suffix=sfx, modifiers=[4, 0xDD], cpu=["FPU"]) add_insn("fnop", "twobyte", modifiers=[0xD9, 0xD0], cpu=["FPU"]) add_insn("fwait", "onebyte", modifiers=[0x9B], cpu=["FPU"]) # Prefixes; should the others be here too? should wait be a prefix? add_insn("wait", "onebyte", modifiers=[0x9B]) # # load/store with pop (integer and normal) # add_group("fld", suffix="s", cpu=["FPU"], opcode=[0xD9], operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("fld", suffix="l", cpu=["FPU"], opcode=[0xDD], operands=[Operand(type="Mem", size=64, dest="EA")]) add_group("fld", cpu=["FPU"], opcode=[0xDB], spare=5, operands=[Operand(type="Mem", size=80, dest="EA")]) add_group("fld", cpu=["FPU"], opcode=[0xD9, 0xC0], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fld", "fld") add_group("fstp", suffix="s", cpu=["FPU"], opcode=[0xD9], spare=3, operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("fstp", suffix="l", cpu=["FPU"], opcode=[0xDD], spare=3, operands=[Operand(type="Mem", size=64, dest="EA")]) add_group("fstp", cpu=["FPU"], opcode=[0xDB], spare=7, operands=[Operand(type="Mem", size=80, dest="EA")]) add_group("fstp", cpu=["FPU"], opcode=[0xDD, 0xD8], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fstp", "fstp") # # Long memory version of floating point load/store for GAS # add_group("fldstpt", cpu=["FPU"], modifiers=["SpAdd"], opcode=[0xDB], spare=0, operands=[Operand(type="Mem", size=80, relaxed=True, dest="EA")]) add_insn("fldt", "fldstpt", modifiers=[5]) add_insn("fstpt", "fldstpt", modifiers=[7]) add_group("fildstp", suffix="s", cpu=["FPU"], modifiers=["SpAdd"], opcode=[0xDF], spare=0, operands=[Operand(type="Mem", size=16, dest="EA")]) add_group("fildstp", suffix="l", cpu=["FPU"], modifiers=["SpAdd"], opcode=[0xDB], spare=0, operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("fildstp", suffix="q", cpu=["FPU"], modifiers=["Gap", "Op0Add", "SpAdd"], opcode=[0xDD], spare=0, operands=[Operand(type="Mem", size=64, dest="EA")]) # No-suffix alias for memory for GAS compat -> "s" version generated add_group("fildstp", cpu=["FPU"], parsers=["gas"], modifiers=["SpAdd"], opcode=[0xDF], spare=0, operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_insn("fild", "fildstp", modifiers=[0, 2, 5]) add_insn("fistp", "fildstp", modifiers=[3, 2, 7]) add_group("fbldstp", cpu=["FPU"], modifiers=["SpAdd"], opcode=[0xDF], spare=0, operands=[Operand(type="Mem", size=80, relaxed=True, dest="EA")]) add_insn("fbld", "fbldstp", modifiers=[4]) add_insn("fildll", "fbldstp", parser="gas", modifiers=[5]) add_insn("fbstp", "fbldstp", modifiers=[6]) add_insn("fistpll", "fbldstp", parser="gas", modifiers=[7]) # # store (normal) # add_group("fst", suffix="s", cpu=["FPU"], opcode=[0xD9], spare=2, operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("fst", suffix="l", cpu=["FPU"], opcode=[0xDD], spare=2, operands=[Operand(type="Mem", size=64, dest="EA")]) add_group("fst", cpu=["FPU"], opcode=[0xDD, 0xD0], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fst", "fst") # # exchange (with ST0) # add_group("fxch", cpu=["FPU"], opcode=[0xD9, 0xC8], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_group("fxch", cpu=["FPU"], opcode=[0xD9, 0xC8], operands=[Operand(type="ST0", size=80, dest=None), Operand(type="Reg", size=80, dest="Op1Add")]) add_group("fxch", cpu=["FPU"], opcode=[0xD9, 0xC8], operands=[Operand(type="Reg", size=80, dest="Op1Add"), Operand(type="ST0", size=80, dest=None)]) add_group("fxch", cpu=["FPU"], opcode=[0xD9, 0xC9], operands=[]) add_insn("fxch", "fxch") # # comparisons # add_group("fcom", suffix="s", cpu=["FPU"], modifiers=["Gap", "SpAdd"], opcode=[0xD8], spare=0, operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("fcom", suffix="l", cpu=["FPU"], modifiers=["Gap", "SpAdd"], opcode=[0xDC], spare=0, operands=[Operand(type="Mem", size=64, dest="EA")]) add_group("fcom", cpu=["FPU"], modifiers=["Op1Add"], opcode=[0xD8, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) # No-suffix alias for memory for GAS compat -> "s" version generated add_group("fcom", cpu=["FPU"], parsers=["gas"], modifiers=["Gap", "SpAdd"], opcode=[0xD8], spare=0, operands=[Operand(type="Mem", size=32, relaxed=True, dest="EA")]) # Alias for fcom %st(1) for GAS compat add_group("fcom", cpu=["FPU"], parsers=["gas"], modifiers=["Op1Add"], opcode=[0xD8, 0x01], operands=[]) add_group("fcom", cpu=["FPU"], parsers=["nasm"], modifiers=["Op1Add"], opcode=[0xD8, 0x00], operands=[Operand(type="ST0", size=80, dest=None), Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fcom", "fcom", modifiers=[0xD0, 2]) add_insn("fcomp", "fcom", modifiers=[0xD8, 3]) # # extended comparisons # add_group("fcom2", cpu=["FPU", "286"], modifiers=["Op0Add", "Op1Add"], opcode=[0x00, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_group("fcom2", cpu=["FPU", "286"], modifiers=["Op0Add", "Op1Add"], opcode=[0x00, 0x00], operands=[Operand(type="ST0", size=80, dest=None), Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fucom", "fcom2", modifiers=[0xDD, 0xE0]) add_insn("fucomp", "fcom2", modifiers=[0xDD, 0xE8]) # # arithmetic # add_group("farith", suffix="s", cpu=["FPU"], modifiers=["Gap", "Gap", "SpAdd"], opcode=[0xD8], spare=0, operands=[Operand(type="Mem", size=32, dest="EA")]) add_group("farith", suffix="l", cpu=["FPU"], modifiers=["Gap", "Gap", "SpAdd"], opcode=[0xDC], spare=0, operands=[Operand(type="Mem", size=64, dest="EA")]) add_group("farith", cpu=["FPU"], modifiers=["Gap", "Op1Add"], opcode=[0xD8, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_group("farith", cpu=["FPU"], modifiers=["Gap", "Op1Add"], opcode=[0xD8, 0x00], operands=[Operand(type="ST0", size=80, dest=None), Operand(type="Reg", size=80, dest="Op1Add")]) add_group("farith", cpu=["FPU"], modifiers=["Op1Add"], opcode=[0xDC, 0x00], operands=[Operand(type="Reg", size=80, tmod="To", dest="Op1Add")]) add_group("farith", cpu=["FPU"], parsers=["nasm"], modifiers=["Op1Add"], opcode=[0xDC, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add"), Operand(type="ST0", size=80, dest=None)]) add_group("farith", cpu=["FPU"], parsers=["gas"], modifiers=["Gap", "Op1Add"], opcode=[0xDC, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add"), Operand(type="ST0", size=80, dest=None)]) add_insn("fadd", "farith", modifiers=[0xC0, 0xC0, 0]) add_insn("fsub", "farith", modifiers=[0xE8, 0xE0, 4]) add_insn("fsubr", "farith", modifiers=[0xE0, 0xE8, 5]) add_insn("fmul", "farith", modifiers=[0xC8, 0xC8, 1]) add_insn("fdiv", "farith", modifiers=[0xF8, 0xF0, 6]) add_insn("fdivr", "farith", modifiers=[0xF0, 0xF8, 7]) add_group("farithp", cpu=["FPU"], modifiers=["Op1Add"], opcode=[0xDE, 0x01], operands=[]) add_group("farithp", cpu=["FPU"], modifiers=["Op1Add"], opcode=[0xDE, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_group("farithp", cpu=["FPU"], modifiers=["Op1Add"], opcode=[0xDE, 0x00], operands=[Operand(type="Reg", size=80, dest="Op1Add"), Operand(type="ST0", size=80, dest=None)]) add_insn("faddp", "farithp", modifiers=[0xC0]) add_insn("fsubp", "farithp", parser="nasm", modifiers=[0xE8]) add_insn("fsubp", "farithp", parser="gas", modifiers=[0xE0]) add_insn("fsubrp", "farithp", parser="nasm", modifiers=[0xE0]) add_insn("fsubrp", "farithp", parser="gas", modifiers=[0xE8]) add_insn("fmulp", "farithp", modifiers=[0xC8]) add_insn("fdivp", "farithp", parser="nasm", modifiers=[0xF8]) add_insn("fdivp", "farithp", parser="gas", modifiers=[0xF0]) add_insn("fdivrp", "farithp", parser="nasm", modifiers=[0xF0]) add_insn("fdivrp", "farithp", parser="gas", modifiers=[0xF8]) # # integer arith/store wo pop/compare # add_group("fiarith", suffix="s", cpu=["FPU"], modifiers=["SpAdd", "Op0Add"], opcode=[0x04], spare=0, operands=[Operand(type="Mem", size=16, dest="EA")]) add_group("fiarith", suffix="l", cpu=["FPU"], modifiers=["SpAdd", "Op0Add"], opcode=[0x00], spare=0, operands=[Operand(type="Mem", size=32, dest="EA")]) add_insn("fist", "fiarith", modifiers=[2, 0xDB]) add_insn("ficom", "fiarith", modifiers=[2, 0xDA]) add_insn("ficomp", "fiarith", modifiers=[3, 0xDA]) add_insn("fiadd", "fiarith", modifiers=[0, 0xDA]) add_insn("fisub", "fiarith", modifiers=[4, 0xDA]) add_insn("fisubr", "fiarith", modifiers=[5, 0xDA]) add_insn("fimul", "fiarith", modifiers=[1, 0xDA]) add_insn("fidiv", "fiarith", modifiers=[6, 0xDA]) add_insn("fidivr", "fiarith", modifiers=[7, 0xDA]) # # processor control # add_group("fldnstcw", suffix="w", cpu=["FPU"], modifiers=["SpAdd"], opcode=[0xD9], spare=0, operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_insn("fldcw", "fldnstcw", modifiers=[5]) add_insn("fnstcw", "fldnstcw", modifiers=[7]) add_group("fstcw", suffix="w", cpu=["FPU"], opcode=[0x9B, 0xD9], spare=7, operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_insn("fstcw", "fstcw") add_group("fnstsw", suffix="w", cpu=["FPU"], opcode=[0xDD], spare=7, operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_group("fnstsw", suffix="w", cpu=["FPU"], opcode=[0xDF, 0xE0], operands=[Operand(type="Areg", size=16, dest=None)]) add_insn("fnstsw", "fnstsw") add_group("fstsw", suffix="w", cpu=["FPU"], opcode=[0x9B, 0xDD], spare=7, operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_group("fstsw", suffix="w", cpu=["FPU"], opcode=[0x9B, 0xDF, 0xE0], operands=[Operand(type="Areg", size=16, dest=None)]) add_insn("fstsw", "fstsw") add_group("ffree", cpu=["FPU"], modifiers=["Op0Add"], opcode=[0x00, 0xC0], operands=[Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("ffree", "ffree", modifiers=[0xDD]) add_insn("ffreep", "ffree", modifiers=[0xDF], cpu=["686", "FPU", "Undoc"]) ##################################################################### # 486 extensions ##################################################################### add_group("bswap", suffix="l", cpu=["486"], opersize=32, opcode=[0x0F, 0xC8], operands=[Operand(type="Reg", size=32, dest="Op1Add")]) add_group("bswap", suffix="q", opersize=64, opcode=[0x0F, 0xC8], operands=[Operand(type="Reg", size=64, dest="Op1Add")]) add_insn("bswap", "bswap") for sfx, sz in zip("bwlq", [8, 16, 32, 64]): add_group("cmpxchgxadd", suffix=sfx, cpu=["486"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x00+(sz!=8)], operands=[Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) add_insn("xadd", "cmpxchgxadd", modifiers=[0xC0]) add_insn("cmpxchg", "cmpxchgxadd", modifiers=[0xB0]) add_insn("cmpxchg486", "cmpxchgxadd", parser="nasm", modifiers=[0xA6], cpu=["486", "Undoc"]) add_insn("invd", "twobyte", modifiers=[0x0F, 0x08], cpu=["486", "Priv"]) add_insn("wbinvd", "twobyte", modifiers=[0x0F, 0x09], cpu=["486", "Priv"]) add_insn("invlpg", "twobytemem", modifiers=[7, 0x0F, 0x01], cpu=["486", "Priv"]) ##################################################################### # 586+ and late 486 extensions ##################################################################### add_insn("cpuid", "twobyte", modifiers=[0x0F, 0xA2], cpu=["486"]) ##################################################################### # Pentium extensions ##################################################################### add_insn("wrmsr", "twobyte", modifiers=[0x0F, 0x30], cpu=["586", "Priv"]) add_insn("rdtsc", "twobyte", modifiers=[0x0F, 0x31], cpu=["586"]) add_insn("rdmsr", "twobyte", modifiers=[0x0F, 0x32], cpu=["586", "Priv"]) add_group("cmpxchg8b", suffix="q", cpu=["586"], opcode=[0x0F, 0xC7], spare=1, operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("cmpxchg8b", "cmpxchg8b") ##################################################################### # Pentium II/Pentium Pro extensions ##################################################################### add_insn("sysenter", "twobyte", modifiers=[0x0F, 0x34], cpu=["686"], not64=True) add_insn("sysexit", "twobyte", modifiers=[0x0F, 0x35], cpu=["686", "Priv"], not64=True) for sfx in [None, "q"]: add_insn("fxsave"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[0, 0x0F, 0xAE], cpu=["686", "FPU"]) add_insn("fxrstor"+(sfx or ""), "twobytemem", suffix=sfx, modifiers=[1, 0x0F, 0xAE], cpu=["686", "FPU"]) add_insn("rdpmc", "twobyte", modifiers=[0x0F, 0x33], cpu=["686"]) add_insn("ud2", "twobyte", modifiers=[0x0F, 0x0B], cpu=["286"]) add_insn("ud1", "twobyte", modifiers=[0x0F, 0xB9], cpu=["286", "Undoc"]) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("cmovcc", suffix=sfx, cpu=["686"], modifiers=["Op1Add"], opersize=sz, opcode=[0x0F, 0x40], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("cmovo", "cmovcc", modifiers=[0x00]) add_insn("cmovno", "cmovcc", modifiers=[0x01]) add_insn("cmovb", "cmovcc", modifiers=[0x02]) add_insn("cmovc", "cmovcc", modifiers=[0x02]) add_insn("cmovnae", "cmovcc", modifiers=[0x02]) add_insn("cmovnb", "cmovcc", modifiers=[0x03]) add_insn("cmovnc", "cmovcc", modifiers=[0x03]) add_insn("cmovae", "cmovcc", modifiers=[0x03]) add_insn("cmove", "cmovcc", modifiers=[0x04]) add_insn("cmovz", "cmovcc", modifiers=[0x04]) add_insn("cmovne", "cmovcc", modifiers=[0x05]) add_insn("cmovnz", "cmovcc", modifiers=[0x05]) add_insn("cmovbe", "cmovcc", modifiers=[0x06]) add_insn("cmovna", "cmovcc", modifiers=[0x06]) add_insn("cmovnbe", "cmovcc", modifiers=[0x07]) add_insn("cmova", "cmovcc", modifiers=[0x07]) add_insn("cmovs", "cmovcc", modifiers=[0x08]) add_insn("cmovns", "cmovcc", modifiers=[0x09]) add_insn("cmovp", "cmovcc", modifiers=[0x0A]) add_insn("cmovpe", "cmovcc", modifiers=[0x0A]) add_insn("cmovnp", "cmovcc", modifiers=[0x0B]) add_insn("cmovpo", "cmovcc", modifiers=[0x0B]) add_insn("cmovl", "cmovcc", modifiers=[0x0C]) add_insn("cmovnge", "cmovcc", modifiers=[0x0C]) add_insn("cmovnl", "cmovcc", modifiers=[0x0D]) add_insn("cmovge", "cmovcc", modifiers=[0x0D]) add_insn("cmovle", "cmovcc", modifiers=[0x0E]) add_insn("cmovng", "cmovcc", modifiers=[0x0E]) add_insn("cmovnle", "cmovcc", modifiers=[0x0F]) add_insn("cmovg", "cmovcc", modifiers=[0x0F]) add_group("fcmovcc", cpu=["FPU", "686"], modifiers=["Op0Add", "Op1Add"], opcode=[0x00, 0x00], operands=[Operand(type="ST0", size=80, dest=None), Operand(type="Reg", size=80, dest="Op1Add")]) add_insn("fcmovb", "fcmovcc", modifiers=[0xDA, 0xC0]) add_insn("fcmove", "fcmovcc", modifiers=[0xDA, 0xC8]) add_insn("fcmovbe", "fcmovcc", modifiers=[0xDA, 0xD0]) add_insn("fcmovu", "fcmovcc", modifiers=[0xDA, 0xD8]) add_insn("fcmovnb", "fcmovcc", modifiers=[0xDB, 0xC0]) add_insn("fcmovne", "fcmovcc", modifiers=[0xDB, 0xC8]) add_insn("fcmovnbe", "fcmovcc", modifiers=[0xDB, 0xD0]) add_insn("fcmovnu", "fcmovcc", modifiers=[0xDB, 0xD8]) add_insn("fcomi", "fcom2", modifiers=[0xDB, 0xF0], cpu=["686", "FPU"]) add_insn("fucomi", "fcom2", modifiers=[0xDB, 0xE8], cpu=["686", "FPU"]) add_insn("fcomip", "fcom2", modifiers=[0xDF, 0xF0], cpu=["686", "FPU"]) add_insn("fucomip", "fcom2", modifiers=[0xDF, 0xE8], cpu=["686", "FPU"]) ##################################################################### # Pentium4 extensions ##################################################################### add_group("movnti", suffix="l", cpu=["P4"], opcode=[0x0F, 0xC3], operands=[Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="Reg", size=32, dest="Spare")]) add_group("movnti", suffix="q", cpu=["P4"], opersize=64, opcode=[0x0F, 0xC3], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="Reg", size=64, dest="Spare")]) add_insn("movnti", "movnti") add_group("clflush", cpu=["P3"], opcode=[0x0F, 0xAE], spare=7, operands=[Operand(type="Mem", size=8, relaxed=True, dest="EA")]) add_insn("clflush", "clflush") add_insn("lfence", "threebyte", modifiers=[0x0F, 0xAE, 0xE8], cpu=["P3"]) add_insn("mfence", "threebyte", modifiers=[0x0F, 0xAE, 0xF0], cpu=["P3"]) add_insn("pause", "onebyte_prefix", modifiers=[0xF3, 0x90], cpu=["P4"]) ##################################################################### # MMX/SSE2 instructions ##################################################################### add_insn("emms", "twobyte", modifiers=[0x0F, 0x77], cpu=["MMX"]) # # movd # add_group("movd", cpu=["MMX"], opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_group("movd", cpu=["MMX"], opersize=64, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("movd", cpu=["MMX"], opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) add_group("movd", cpu=["MMX"], opersize=64, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) add_group("movd", cpu=["SSE2"], prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_group("movd", cpu=["SSE2"], opersize=64, prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("movd", cpu=["SSE2"], prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movd", cpu=["SSE2"], opersize=64, prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("movd", "movd") # # movq # # MMX forms add_group("movq", cpu=["MMX"], parsers=["nasm"], opcode=[0x0F, 0x6F], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("movq", cpu=["MMX"], parsers=["nasm"], opersize=64, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("movq", cpu=["MMX"], parsers=["nasm"], opcode=[0x0F, 0x7F], operands=[Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) add_group("movq", cpu=["MMX"], parsers=["nasm"], opersize=64, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) # SSE2 forms add_group("movq", cpu=["SSE2"], parsers=["nasm"], prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movq", cpu=["SSE2"], parsers=["nasm"], prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("movq", cpu=["SSE2"], parsers=["nasm"], opersize=64, prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("movq", cpu=["SSE2"], parsers=["nasm"], prefix=0x66, opcode=[0x0F, 0xD6], operands=[Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movq", cpu=["SSE2"], parsers=["nasm"], opersize=64, prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("movq", "movq") add_group("mmxsse2", cpu=["MMX"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("mmxsse2", cpu=["SSE2"], modifiers=["Op1Add"], prefix=0x66, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("packssdw", "mmxsse2", modifiers=[0x6B]) add_insn("packsswb", "mmxsse2", modifiers=[0x63]) add_insn("packuswb", "mmxsse2", modifiers=[0x67]) add_insn("paddb", "mmxsse2", modifiers=[0xFC]) add_insn("paddw", "mmxsse2", modifiers=[0xFD]) add_insn("paddd", "mmxsse2", modifiers=[0xFE]) add_insn("paddq", "mmxsse2", modifiers=[0xD4]) add_insn("paddsb", "mmxsse2", modifiers=[0xEC]) add_insn("paddsw", "mmxsse2", modifiers=[0xED]) add_insn("paddusb", "mmxsse2", modifiers=[0xDC]) add_insn("paddusw", "mmxsse2", modifiers=[0xDD]) add_insn("pand", "mmxsse2", modifiers=[0xDB]) add_insn("pandn", "mmxsse2", modifiers=[0xDF]) add_insn("pcmpeqb", "mmxsse2", modifiers=[0x74]) add_insn("pcmpeqw", "mmxsse2", modifiers=[0x75]) add_insn("pcmpeqd", "mmxsse2", modifiers=[0x76]) add_insn("pcmpgtb", "mmxsse2", modifiers=[0x64]) add_insn("pcmpgtw", "mmxsse2", modifiers=[0x65]) add_insn("pcmpgtd", "mmxsse2", modifiers=[0x66]) add_insn("pmaddwd", "mmxsse2", modifiers=[0xF5]) add_insn("pmulhw", "mmxsse2", modifiers=[0xE5]) add_insn("pmullw", "mmxsse2", modifiers=[0xD5]) add_insn("por", "mmxsse2", modifiers=[0xEB]) add_insn("psubb", "mmxsse2", modifiers=[0xF8]) add_insn("psubw", "mmxsse2", modifiers=[0xF9]) add_insn("psubd", "mmxsse2", modifiers=[0xFA]) add_insn("psubq", "mmxsse2", modifiers=[0xFB]) add_insn("psubsb", "mmxsse2", modifiers=[0xE8]) add_insn("psubsw", "mmxsse2", modifiers=[0xE9]) add_insn("psubusb", "mmxsse2", modifiers=[0xD8]) add_insn("psubusw", "mmxsse2", modifiers=[0xD9]) add_insn("punpckhbw", "mmxsse2", modifiers=[0x68]) add_insn("punpckhwd", "mmxsse2", modifiers=[0x69]) add_insn("punpckhdq", "mmxsse2", modifiers=[0x6A]) add_insn("punpcklbw", "mmxsse2", modifiers=[0x60]) add_insn("punpcklwd", "mmxsse2", modifiers=[0x61]) add_insn("punpckldq", "mmxsse2", modifiers=[0x62]) add_insn("pxor", "mmxsse2", modifiers=[0xEF]) # AVX versions don't support the MMX registers add_insn("vpackssdw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6B, VEXL0], avx=True) add_insn("vpacksswb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x63, VEXL0], avx=True) add_insn("vpackuswb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x67, VEXL0], avx=True) add_insn("vpaddb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFC, VEXL0], avx=True) add_insn("vpaddw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFD, VEXL0], avx=True) add_insn("vpaddd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFE, VEXL0], avx=True) add_insn("vpaddq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD4, VEXL0], avx=True) add_insn("vpaddsb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEC, VEXL0], avx=True) add_insn("vpaddsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xED, VEXL0], avx=True) add_insn("vpaddusb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDC, VEXL0], avx=True) add_insn("vpaddusw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDD, VEXL0], avx=True) add_insn("vpand", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDB, VEXL0], avx=True) add_insn("vpandn", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDF, VEXL0], avx=True) add_insn("vpcmpeqb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x74, VEXL0], avx=True) add_insn("vpcmpeqw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x75, VEXL0], avx=True) add_insn("vpcmpeqd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x76, VEXL0], avx=True) add_insn("vpcmpgtb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x64, VEXL0], avx=True) add_insn("vpcmpgtw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x65, VEXL0], avx=True) add_insn("vpcmpgtd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x66, VEXL0], avx=True) add_insn("vpmaddwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF5, VEXL0], avx=True) add_insn("vpmulhw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE5, VEXL0], avx=True) add_insn("vpmullw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD5, VEXL0], avx=True) add_insn("vpor", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEB, VEXL0], avx=True) add_insn("vpsubb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF8, VEXL0], avx=True) add_insn("vpsubw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF9, VEXL0], avx=True) add_insn("vpsubd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFA, VEXL0], avx=True) add_insn("vpsubq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFB, VEXL0], avx=True) add_insn("vpsubsb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE8, VEXL0], avx=True) add_insn("vpsubsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE9, VEXL0], avx=True) add_insn("vpsubusb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD8, VEXL0], avx=True) add_insn("vpsubusw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD9, VEXL0], avx=True) add_insn("vpunpckhbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x68, VEXL0], avx=True) add_insn("vpunpckhwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x69, VEXL0], avx=True) add_insn("vpunpckhdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6A, VEXL0], avx=True) add_insn("vpunpcklbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x60, VEXL0], avx=True) add_insn("vpunpcklwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x61, VEXL0], avx=True) add_insn("vpunpckldq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x62, VEXL0], avx=True) add_insn("vpxor", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEF, VEXL0], avx=True) add_group("pshift", cpu=["MMX"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("pshift", cpu=["MMX"], modifiers=["Gap", "Op1Add", "SpAdd"], opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="SIMDReg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pshift", cpu=["SSE2"], modifiers=["Op1Add"], prefix=0x66, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("pshift", cpu=["SSE2"], modifiers=["Gap", "Op1Add", "SpAdd"], prefix=0x66, opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("psllw", "pshift", modifiers=[0xF1, 0x71, 6]) add_insn("pslld", "pshift", modifiers=[0xF2, 0x72, 6]) add_insn("psllq", "pshift", modifiers=[0xF3, 0x73, 6]) add_insn("psraw", "pshift", modifiers=[0xE1, 0x71, 4]) add_insn("psrad", "pshift", modifiers=[0xE2, 0x72, 4]) add_insn("psrlw", "pshift", modifiers=[0xD1, 0x71, 2]) add_insn("psrld", "pshift", modifiers=[0xD2, 0x72, 2]) add_insn("psrlq", "pshift", modifiers=[0xD3, 0x73, 2]) # Ran out of modifiers, so AVX has to be separate for cpu, sz in zip(["AVX", "AVX2"], [128, 256]): add_group("vpshift", cpu=[cpu], modifiers=["Op1Add"], vex=sz, prefix=0x66, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vpshift", cpu=[cpu], modifiers=["Gap", "Op1Add", "SpAdd"], vex=sz, prefix=0x66, opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="SIMDReg", size=sz, dest="EAVEX"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("vpshift", cpu=[cpu], modifiers=["Op1Add"], vex=sz, prefix=0x66, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vpshift", cpu=[cpu], modifiers=["Gap", "Op1Add", "SpAdd"], vex=sz, prefix=0x66, opcode=[0x0F, 0x00], spare=0, operands=[Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDReg", size=sz, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vpsllw", "vpshift", modifiers=[0xF1, 0x71, 6]) add_insn("vpslld", "vpshift", modifiers=[0xF2, 0x72, 6]) add_insn("vpsllq", "vpshift", modifiers=[0xF3, 0x73, 6]) add_insn("vpsraw", "vpshift", modifiers=[0xE1, 0x71, 4]) add_insn("vpsrad", "vpshift", modifiers=[0xE2, 0x72, 4]) add_insn("vpsrlw", "vpshift", modifiers=[0xD1, 0x71, 2]) add_insn("vpsrld", "vpshift", modifiers=[0xD2, 0x72, 2]) add_insn("vpsrlq", "vpshift", modifiers=[0xD3, 0x73, 2]) # # PIII (Katmai) new instructions / SIMD instructions # add_insn("pavgb", "mmxsse2", modifiers=[0xE0], cpu=["P3", "MMX"]) add_insn("pavgw", "mmxsse2", modifiers=[0xE3], cpu=["P3", "MMX"]) add_insn("pmaxsw", "mmxsse2", modifiers=[0xEE], cpu=["P3", "MMX"]) add_insn("pmaxub", "mmxsse2", modifiers=[0xDE], cpu=["P3", "MMX"]) add_insn("pminsw", "mmxsse2", modifiers=[0xEA], cpu=["P3", "MMX"]) add_insn("pminub", "mmxsse2", modifiers=[0xDA], cpu=["P3", "MMX"]) add_insn("pmulhuw", "mmxsse2", modifiers=[0xE4], cpu=["P3", "MMX"]) add_insn("psadbw", "mmxsse2", modifiers=[0xF6], cpu=["P3", "MMX"]) # AVX versions don't support MMX register add_insn("vpavgb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE0, VEXL0], avx=True) add_insn("vpavgw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE3, VEXL0], avx=True) add_insn("vpmaxsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEE, VEXL0], avx=True) add_insn("vpmaxub", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDE, VEXL0], avx=True) add_insn("vpminsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEA, VEXL0], avx=True) add_insn("vpminub", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDA, VEXL0], avx=True) add_insn("vpmulhuw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE4, VEXL0], avx=True) add_insn("vpsadbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF6, VEXL0], avx=True) add_insn("prefetchnta", "twobytemem", modifiers=[0, 0x0F, 0x18], cpu=["P3"]) add_insn("prefetcht0", "twobytemem", modifiers=[1, 0x0F, 0x18], cpu=["P3"]) add_insn("prefetcht1", "twobytemem", modifiers=[2, 0x0F, 0x18], cpu=["P3"]) add_insn("prefetcht2", "twobytemem", modifiers=[3, 0x0F, 0x18], cpu=["P3"]) add_insn("sfence", "threebyte", modifiers=[0x0F, 0xAE, 0xF8], cpu=["P3"]) add_group("xmm_xmm128_256", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) # Same as above, except 256-bit version only available in AVX2 add_group("xmm_xmm128_256avx2", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256avx2", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256avx2", cpu=["AVX2"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("xmm_xmm128_256avx2", cpu=["AVX2"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) # Version that does not allow YMM registers add_group("xmm_xmm128", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("xmm_xmm128", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("addps", "xmm_xmm128", modifiers=[0, 0x58]) add_insn("andnps", "xmm_xmm128", modifiers=[0, 0x55]) add_insn("andps", "xmm_xmm128", modifiers=[0, 0x54]) add_insn("divps", "xmm_xmm128", modifiers=[0, 0x5E]) add_insn("maxps", "xmm_xmm128", modifiers=[0, 0x5F]) add_insn("minps", "xmm_xmm128", modifiers=[0, 0x5D]) add_insn("mulps", "xmm_xmm128", modifiers=[0, 0x59]) add_insn("orps", "xmm_xmm128", modifiers=[0, 0x56]) add_insn("rcpps", "xmm_xmm128", modifiers=[0, 0x53]) add_insn("rsqrtps", "xmm_xmm128", modifiers=[0, 0x52]) add_insn("sqrtps", "xmm_xmm128", modifiers=[0, 0x51]) add_insn("subps", "xmm_xmm128", modifiers=[0, 0x5C]) add_insn("unpckhps", "xmm_xmm128", modifiers=[0, 0x15]) add_insn("unpcklps", "xmm_xmm128", modifiers=[0, 0x14]) add_insn("xorps", "xmm_xmm128", modifiers=[0, 0x57]) add_insn("vaddps", "xmm_xmm128_256", modifiers=[0, 0x58, VEXL0], avx=True) add_insn("vandnps", "xmm_xmm128_256", modifiers=[0, 0x55, VEXL0], avx=True) add_insn("vandps", "xmm_xmm128_256", modifiers=[0, 0x54, VEXL0], avx=True) add_insn("vdivps", "xmm_xmm128_256", modifiers=[0, 0x5E, VEXL0], avx=True) add_insn("vmaxps", "xmm_xmm128_256", modifiers=[0, 0x5F, VEXL0], avx=True) add_insn("vminps", "xmm_xmm128_256", modifiers=[0, 0x5D, VEXL0], avx=True) add_insn("vmulps", "xmm_xmm128_256", modifiers=[0, 0x59, VEXL0], avx=True) add_insn("vorps", "xmm_xmm128_256", modifiers=[0, 0x56, VEXL0], avx=True) # vrcpps, vrsqrtps, and vsqrtps don't add third operand add_insn("vsubps", "xmm_xmm128_256", modifiers=[0, 0x5C, VEXL0], avx=True) add_insn("vunpckhps", "xmm_xmm128_256", modifiers=[0, 0x15, VEXL0], avx=True) add_insn("vunpcklps", "xmm_xmm128_256", modifiers=[0, 0x14, VEXL0], avx=True) add_insn("vxorps", "xmm_xmm128_256", modifiers=[0, 0x57, VEXL0], avx=True) add_group("cvt_rx_xmm32", suffix="l", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("cvt_rx_xmm32", suffix="l", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) # REX add_group("cvt_rx_xmm32", suffix="q", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("cvt_rx_xmm32", suffix="q", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("cvtss2si", "cvt_rx_xmm32", modifiers=[0xF3, 0x2D]) add_insn("cvttss2si", "cvt_rx_xmm32", modifiers=[0xF3, 0x2C]) add_insn("vcvtss2si", "cvt_rx_xmm32", modifiers=[0xF3, 0x2D, VEXL0], avx=True) add_insn("vcvttss2si", "cvt_rx_xmm32", modifiers=[0xF3, 0x2C, VEXL0], avx=True) add_group("cvt_mm_xmm64", cpu=["SSE"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("cvt_mm_xmm64", cpu=["SSE"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("cvtps2pi", "cvt_mm_xmm64", modifiers=[0x2D]) add_insn("cvttps2pi", "cvt_mm_xmm64", modifiers=[0x2C]) add_group("cvt_xmm_mm_ps", cpu=["SSE"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_insn("cvtpi2ps", "cvt_xmm_mm_ps", modifiers=[0x2A]) # Memory size can be relaxed only in BITS=32 case, where there's no # ambiguity. add_group("cvt_xmm_rmx", suffix="l", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="RM", size=32, dest="EA")]) add_group("cvt_xmm_rmx", suffix="l", cpu=["SSE"], not64=True, modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) # REX add_group("cvt_xmm_rmx", suffix="q", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="RM", size=64, dest="EA")]) add_group("cvt_xmm_rmx", suffix="l", cpu=["AVX"], not64=True, modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_group("cvt_xmm_rmx", suffix="l", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="RM", size=32, dest="EA")]) add_group("cvt_xmm_rmx", suffix="q", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="RM", size=64, dest="EA")]) add_insn("cvtsi2ss", "cvt_xmm_rmx", modifiers=[0xF3, 0x2A]) add_insn("vcvtsi2ss", "cvt_xmm_rmx", modifiers=[0xF3, 0x2A, VEXL0], avx=True) add_group("xmm_xmm32", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("xmm_xmm32", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("xmm_xmm32", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("xmm_xmm32", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("addss", "xmm_xmm32", modifiers=[0xF3, 0x58]) add_insn("comiss", "xmm_xmm32", modifiers=[0, 0x2F]) add_insn("divss", "xmm_xmm32", modifiers=[0xF3, 0x5E]) add_insn("maxss", "xmm_xmm32", modifiers=[0xF3, 0x5F]) add_insn("minss", "xmm_xmm32", modifiers=[0xF3, 0x5D]) add_insn("mulss", "xmm_xmm32", modifiers=[0xF3, 0x59]) add_insn("rcpss", "xmm_xmm32", modifiers=[0xF3, 0x53]) add_insn("rsqrtss", "xmm_xmm32", modifiers=[0xF3, 0x52]) add_insn("sqrtss", "xmm_xmm32", modifiers=[0xF3, 0x51]) add_insn("subss", "xmm_xmm32", modifiers=[0xF3, 0x5C]) add_insn("ucomiss", "xmm_xmm32", modifiers=[0, 0x2E]) add_insn("vaddss", "xmm_xmm32", modifiers=[0xF3, 0x58, VEXL0], avx=True) # vcomiss and vucomiss are only two operand add_insn("vdivss", "xmm_xmm32", modifiers=[0xF3, 0x5E, VEXL0], avx=True) add_insn("vmaxss", "xmm_xmm32", modifiers=[0xF3, 0x5F, VEXL0], avx=True) add_insn("vminss", "xmm_xmm32", modifiers=[0xF3, 0x5D, VEXL0], avx=True) add_insn("vmulss", "xmm_xmm32", modifiers=[0xF3, 0x59, VEXL0], avx=True) add_insn("vrcpss", "xmm_xmm32", modifiers=[0xF3, 0x53, VEXL0], avx=True) add_insn("vrsqrtss", "xmm_xmm32", modifiers=[0xF3, 0x52, VEXL0], avx=True) add_insn("vsqrtss", "xmm_xmm32", modifiers=[0xF3, 0x51, VEXL0], avx=True) add_insn("vsubss", "xmm_xmm32", modifiers=[0xF3, 0x5C, VEXL0], avx=True) add_group("ssecmp_128", cpu=["SSE"], modifiers=["Imm8", "PreAdd", "SetVEX"], opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("ssecmp_128", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=128, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("ssecmp_128", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=256, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("ssecmp_32", cpu=["SSE"], modifiers=["Imm8", "PreAdd", "SetVEX"], prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("ssecmp_32", cpu=["SSE"], modifiers=["Imm8", "PreAdd", "SetVEX"], prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("ssecmp_32", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=128, prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("ssecmp_32", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=128, prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) ssecoms = [(0x0, "eq"), (0x1, "lt"), (0x2, "le"), (0x3, "unord"), (0x4, "neq"), (0x5, "nlt"), (0x6, "nle"), (0x7, "ord")] for ib, cc in ssecoms: add_insn("cmp"+cc+"ps", "ssecmp_128", modifiers=[ib]) add_insn("cmp"+cc+"ss", "ssecmp_32", modifiers=[ib, 0xF3]) avxcoms = [(0x00, "eq"), (0x01, "lt"), (0x02, "le"), (0x03, "unord"), (0x04, "neq"), (0x05, "nlt"), (0x06, "nle"), (0x07, "ord"), (0x08, "eq_uq"), (0x09, "nge"), (0x0a, "ngt"), (0x0b, "false"), (0x0c, "neq_oq"), (0x0d, "ge"), (0x0e, "gt"), (0x0f, "true"), (0x10, "eq_os"), (0x11, "lt_oq"), (0x12, "le_oq"), (0x13, "unord_s"), (0x14, "neq_us"), (0x15, "nlt_uq"), (0x16, "nle_uq"), (0x17, "ord_s"), (0x18, "eq_us"), (0x19, "nge_uq"), (0x1a, "ngt_uq"), (0x1b, "false_os"), (0x1c, "neq_os"), (0x1d, "ge_oq"), (0x1e, "gt_oq"), (0x1f, "true_us")] for ib, cc in avxcoms: add_insn("vcmp"+cc+"ps", "ssecmp_128", modifiers=[ib, 0, VEXL0], avx=True) add_insn("vcmp"+cc+"ss", "ssecmp_32", modifiers=[ib, 0xF3, VEXL0], avx=True) add_group("xmm_xmm128_imm", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("cmpps", "xmm_xmm128_imm", modifiers=[0, 0xC2]) add_insn("shufps", "xmm_xmm128_imm", modifiers=[0, 0xC6]) # YMM register AVX2 version of above add_group("xmm_xmm128_imm_256avx2", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm128_imm_256avx2", cpu=["AVX2"], modifiers=["PreAdd", "Op1Add"], vex=256, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # YMM register and 4-operand version of above add_group("xmm_xmm128_imm_256", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm128_imm_256", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm128_imm_256", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vcmpps", "xmm_xmm128_imm_256", modifiers=[0, 0xC2, VEXL0], avx=True) add_insn("vshufps", "xmm_xmm128_imm_256", modifiers=[0, 0xC6, VEXL0], avx=True) add_group("xmm_xmm32_imm", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm32_imm", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm32_imm", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("xmm_xmm32_imm", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("cmpss", "xmm_xmm32_imm", modifiers=[0xF3, 0xC2]) add_insn("vcmpss", "xmm_xmm32_imm", modifiers=[0xF3, 0xC2, VEXL0], avx=True) add_group("ldstmxcsr", cpu=["SSE"], modifiers=["SpAdd", "SetVEX"], opcode=[0x0F, 0xAE], spare=0, operands=[Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("ldmxcsr", "ldstmxcsr", modifiers=[2]) add_insn("stmxcsr", "ldstmxcsr", modifiers=[3]) add_insn("vldmxcsr", "ldstmxcsr", modifiers=[2, VEXL0], avx=True) add_insn("vstmxcsr", "ldstmxcsr", modifiers=[3, VEXL0], avx=True) add_group("maskmovq", cpu=["MMX", "P3"], opcode=[0x0F, 0xF7], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA")]) add_insn("maskmovq", "maskmovq") # Too many modifiers, so can't reuse first two cases for AVX version # Just repeat and disable first two with noavx. add_group("movau", cpu=["SSE"], modifiers=["PreAdd", "Op1Add"], notavx=True, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("movau", cpu=["SSE"], notavx=True, modifiers=["PreAdd", "Op1Add", "Op1Add"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movau", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("movau", cpu=["AVX"], modifiers=["PreAdd", "Op1Add", "Op1Add"], vex=128, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movau", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("movau", cpu=["AVX"], modifiers=["PreAdd", "Op1Add", "Op1Add"], vex=256, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="Spare")]) add_insn("movaps", "movau", modifiers=[0, 0x28, 0x01]) add_insn("movups", "movau", modifiers=[0, 0x10, 0x01]) add_insn("vmovaps", "movau", modifiers=[0, 0x28, 0x01], avx=True) add_insn("vmovups", "movau", modifiers=[0, 0x10, 0x01], avx=True) add_group("movhllhps", cpu=["SSE"], modifiers=["Op1Add", "SetVEX"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movhllhps", cpu=["AVX"], modifiers=["Op1Add"], vex=128, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("movhlps", "movhllhps", modifiers=[0x12]) add_insn("movlhps", "movhllhps", modifiers=[0x16]) add_insn("vmovhlps", "movhllhps", modifiers=[0x12, VEXL0], avx=True) add_insn("vmovlhps", "movhllhps", modifiers=[0x16, VEXL0], avx=True) add_group("movhlp", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("movhlp", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x01], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movhlp", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("movhps", "movhlp", modifiers=[0, 0x16]) add_insn("movlps", "movhlp", modifiers=[0, 0x12]) add_insn("vmovhps", "movhlp", modifiers=[0, 0x16, VEXL0], avx=True) add_insn("vmovlps", "movhlp", modifiers=[0, 0x12, VEXL0], avx=True) add_group("movmsk", suffix="l", cpu=["SSE"], modifiers=["PreAdd", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x50], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movmsk", suffix="q", cpu=["SSE"], modifiers=["PreAdd", "SetVEX"], prefix=0x00, opersize=64, opcode=[0x0F, 0x50], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movmsk", suffix="l", cpu=["AVX"], modifiers=["PreAdd"], vex=256, prefix=0x00, opcode=[0x0F, 0x50], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=256, dest="EA")]) add_group("movmsk", suffix="q", cpu=["SSE"], modifiers=["PreAdd"], vex=256, prefix=0x00, opersize=64, opcode=[0x0F, 0x50], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=256, dest="EA")]) add_insn("movmskps", "movmsk") add_insn("vmovmskps", "movmsk", modifiers=[0, VEXL0], avx=True) add_group("movnt", cpu=["SSE"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Mem", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movnt", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Mem", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="Spare")]) add_insn("movntps", "movnt", modifiers=[0, 0x2B]) add_insn("vmovntps", "movnt", modifiers=[0, 0x2B, VEXL0], avx=True) add_group("movntq", cpu=["SSE"], opcode=[0x0F, 0xE7], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=64, dest="Spare")]) add_insn("movntq", "movntq") add_group("movss", cpu=["SSE"], modifiers=["SetVEX"], prefix=0xF3, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movss", cpu=["SSE"], modifiers=["SetVEX"], prefix=0xF3, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("movss", cpu=["SSE"], modifiers=["SetVEX"], prefix=0xF3, opcode=[0x0F, 0x11], operands=[Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movss", cpu=["AVX"], vex=128, prefix=0xF3, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("movss", "movss") add_insn("vmovss", "movss", modifiers=[VEXL0], avx=True) add_group("pextrw", suffix="l", cpu=["MMX", "P3"], notavx=True, opcode=[0x0F, 0xC5], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrw", suffix="l", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0xC5], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrw", suffix="q", cpu=["MMX", "P3"], notavx=True, opersize=64, opcode=[0x0F, 0xC5], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrw", suffix="q", cpu=["SSE2"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0xC5], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # SSE41 instructions add_group("pextrw", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x15], operands=[Operand(type="Mem", size=16, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrw", cpu=["SSE41"], modifiers=["SetVEX"], opersize=32, prefix=0x66, opcode=[0x0F, 0x3A, 0x15], operands=[Operand(type="Reg", size=32, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrw", cpu=["SSE41"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x15], operands=[Operand(type="Reg", size=64, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pextrw", "pextrw") add_insn("vpextrw", "pextrw", modifiers=[VEXL0], avx=True) add_group("pinsrw", suffix="l", cpu=["MMX", "P3"], notavx=True, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="Reg", size=32, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="q", cpu=["MMX", "P3"], notavx=True, def_opersize_64=64, opersize=64, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="Reg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="l", cpu=["MMX", "P3"], notavx=True, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="Mem", size=16, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="l", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Reg", size=32, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="q", cpu=["SSE2"], modifiers=["SetVEX"], def_opersize_64=64, opersize=64, prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Reg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="l", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=16, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="l", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Reg", size=32, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="q", cpu=["AVX"], vex=128, def_opersize_64=64, opersize=64, prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Reg", size=64, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrw", suffix="l", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0xC4], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=16, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pinsrw", "pinsrw") add_insn("vpinsrw", "pinsrw", modifiers=[VEXL0], avx=True) add_group("pmovmskb", suffix="l", cpu=["MMX", "P3"], notavx=True, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA")]) add_group("pmovmskb", suffix="l", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("pmovmskb", suffix="l", cpu=["AVX2"], vex=256, prefix=0x66, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=256, dest="EA")]) add_group("pmovmskb", suffix="q", cpu=["MMX", "P3"], notavx=True, opersize=64, def_opersize_64=64, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA")]) add_group("pmovmskb", suffix="q", cpu=["SSE2"], modifiers=["SetVEX"], opersize=64, def_opersize_64=64, prefix=0x66, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("pmovmskb", suffix="q", cpu=["SSE2"], vex=256, opersize=64, def_opersize_64=64, prefix=0x66, opcode=[0x0F, 0xD7], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=256, dest="EA")]) add_insn("pmovmskb", "pmovmskb") add_insn("vpmovmskb", "pmovmskb", modifiers=[VEXL0], avx=True) add_group("pshufw", cpu=["MMX", "P3"], opcode=[0x0F, 0x70], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pshufw", "pshufw") ##################################################################### # SSE2 instructions ##################################################################### add_group("xmm_xmm64", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("xmm_xmm64", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("xmm_xmm64", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("xmm_xmm64", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("addsd", "xmm_xmm64", modifiers=[0xF2, 0x58]) add_insn("comisd", "xmm_xmm64", modifiers=[0x66, 0x2F]) add_insn("cvtdq2pd", "xmm_xmm64", modifiers=[0xF3, 0xE6]) add_insn("cvtps2pd", "xmm_xmm64", modifiers=[0, 0x5A]) add_insn("cvtsd2ss", "xmm_xmm64", modifiers=[0xF2, 0x5A]) add_insn("divsd", "xmm_xmm64", modifiers=[0xF2, 0x5E]) add_insn("maxsd", "xmm_xmm64", modifiers=[0xF2, 0x5F]) add_insn("minsd", "xmm_xmm64", modifiers=[0xF2, 0x5D]) add_insn("mulsd", "xmm_xmm64", modifiers=[0xF2, 0x59]) add_insn("subsd", "xmm_xmm64", modifiers=[0xF2, 0x5C]) add_insn("sqrtsd", "xmm_xmm64", modifiers=[0xF2, 0x51]) add_insn("ucomisd", "xmm_xmm64", modifiers=[0x66, 0x2E]) add_insn("vaddsd", "xmm_xmm64", modifiers=[0xF2, 0x58, VEXL0], avx=True) # vcomisd and vucomisd are only two operand # vcvtdq2pd and vcvtps2pd can take ymm, xmm version add_insn("vcvtsd2ss", "xmm_xmm64", modifiers=[0xF2, 0x5A, VEXL0], avx=True) add_insn("vdivsd", "xmm_xmm64", modifiers=[0xF2, 0x5E, VEXL0], avx=True) add_insn("vmaxsd", "xmm_xmm64", modifiers=[0xF2, 0x5F, VEXL0], avx=True) add_insn("vminsd", "xmm_xmm64", modifiers=[0xF2, 0x5D, VEXL0], avx=True) add_insn("vmulsd", "xmm_xmm64", modifiers=[0xF2, 0x59, VEXL0], avx=True) add_insn("vsubsd", "xmm_xmm64", modifiers=[0xF2, 0x5C, VEXL0], avx=True) add_insn("vsqrtsd", "xmm_xmm64", modifiers=[0xF2, 0x51, VEXL0], avx=True) add_insn("addpd", "xmm_xmm128", modifiers=[0x66, 0x58], cpu=["SSE2"]) add_insn("andnpd", "xmm_xmm128", modifiers=[0x66, 0x55], cpu=["SSE2"]) add_insn("andpd", "xmm_xmm128", modifiers=[0x66, 0x54], cpu=["SSE2"]) add_insn("cvtdq2ps", "xmm_xmm128", modifiers=[0, 0x5B], cpu=["SSE2"]) add_insn("cvtpd2dq", "xmm_xmm128", modifiers=[0xF2, 0xE6], cpu=["SSE2"]) add_insn("cvtpd2ps", "xmm_xmm128", modifiers=[0x66, 0x5A], cpu=["SSE2"]) add_insn("cvtps2dq", "xmm_xmm128", modifiers=[0x66, 0x5B], cpu=["SSE2"]) add_insn("divpd", "xmm_xmm128", modifiers=[0x66, 0x5E], cpu=["SSE2"]) add_insn("maxpd", "xmm_xmm128", modifiers=[0x66, 0x5F], cpu=["SSE2"]) add_insn("minpd", "xmm_xmm128", modifiers=[0x66, 0x5D], cpu=["SSE2"]) add_insn("mulpd", "xmm_xmm128", modifiers=[0x66, 0x59], cpu=["SSE2"]) add_insn("orpd", "xmm_xmm128", modifiers=[0x66, 0x56], cpu=["SSE2"]) add_insn("sqrtpd", "xmm_xmm128", modifiers=[0x66, 0x51], cpu=["SSE2"]) add_insn("subpd", "xmm_xmm128", modifiers=[0x66, 0x5C], cpu=["SSE2"]) add_insn("unpckhpd", "xmm_xmm128", modifiers=[0x66, 0x15], cpu=["SSE2"]) add_insn("unpcklpd", "xmm_xmm128", modifiers=[0x66, 0x14], cpu=["SSE2"]) add_insn("xorpd", "xmm_xmm128", modifiers=[0x66, 0x57], cpu=["SSE2"]) add_insn("vaddpd", "xmm_xmm128_256", modifiers=[0x66, 0x58, VEXL0], avx=True) add_insn("vandnpd", "xmm_xmm128_256", modifiers=[0x66, 0x55, VEXL0], avx=True) add_insn("vandpd", "xmm_xmm128_256", modifiers=[0x66, 0x54, VEXL0], avx=True) # vcvtdq2ps and vcvtps2dq are 2-operand, YMM capable # vcvtpd2dq and vcvtpd2ps take xmm, ymm combination add_insn("vdivpd", "xmm_xmm128_256", modifiers=[0x66, 0x5E, VEXL0], avx=True) add_insn("vmaxpd", "xmm_xmm128_256", modifiers=[0x66, 0x5F, VEXL0], avx=True) add_insn("vminpd", "xmm_xmm128_256", modifiers=[0x66, 0x5D, VEXL0], avx=True) add_insn("vmulpd", "xmm_xmm128_256", modifiers=[0x66, 0x59, VEXL0], avx=True) add_insn("vorpd", "xmm_xmm128_256", modifiers=[0x66, 0x56, VEXL0], avx=True) # vsqrtpd doesn't add third operand add_insn("vsubpd", "xmm_xmm128_256", modifiers=[0x66, 0x5C, VEXL0], avx=True) add_insn("vunpckhpd", "xmm_xmm128_256", modifiers=[0x66, 0x15, VEXL0], avx=True) add_insn("vunpcklpd", "xmm_xmm128_256", modifiers=[0x66, 0x14, VEXL0], avx=True) add_insn("vxorpd", "xmm_xmm128_256", modifiers=[0x66, 0x57, VEXL0], avx=True) add_group("ssecmp_64", cpu=["SSE2"], modifiers=["Imm8", "PreAdd", "SetVEX"], prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("ssecmp_64", cpu=["SSE2"], modifiers=["Imm8", "PreAdd", "SetVEX"], prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("ssecmp_64", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=128, prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("ssecmp_64", cpu=["AVX"], modifiers=["Imm8", "PreAdd"], vex=128, prefix=0x00, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) for ib, cc in ssecoms: add_insn("cmp"+cc+"sd", "ssecmp_64", modifiers=[ib, 0xF2]) add_insn("cmp"+cc+"pd", "ssecmp_128", modifiers=[ib, 0x66]) for ib, cc in avxcoms: add_insn("vcmp"+cc+"sd", "ssecmp_64", modifiers=[ib, 0xF2, VEXL0], avx=True) add_insn("vcmp"+cc+"pd", "ssecmp_128", modifiers=[ib, 0x66, VEXL0], avx=True) add_insn("cmppd", "xmm_xmm128_imm", modifiers=[0x66, 0xC2], cpu=["SSE2"]) add_insn("shufpd", "xmm_xmm128_imm", modifiers=[0x66, 0xC6], cpu=["SSE2"]) add_insn("vcmppd", "xmm_xmm128_imm_256", modifiers=[0x66, 0xC2, VEXL0], avx=True) add_insn("vshufpd", "xmm_xmm128_imm_256", modifiers=[0x66, 0xC6, VEXL0], avx=True) add_insn("cvtsi2sd", "cvt_xmm_rmx", modifiers=[0xF2, 0x2A], cpu=["SSE2"]) add_insn("vcvtsi2sd", "cvt_xmm_rmx", modifiers=[0xF2, 0x2A, VEXL0], avx=True) add_group("cvt_rx_xmm64", suffix="l", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("cvt_rx_xmm64", suffix="l", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) # REX add_group("cvt_rx_xmm64", suffix="q", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("cvt_rx_xmm64", suffix="q", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add", "SetVEX"], opersize=64, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("cvtsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2D]) add_insn("vcvtsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2D, VEXL0], avx=True) add_group("cvt_mm_xmm", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("cvtpd2pi", "cvt_mm_xmm", modifiers=[0x66, 0x2D], cpu=["SSE2"]) add_group("cvt_xmm_mm_ss", cpu=["SSE"], modifiers=["PreAdd", "Op1Add"], prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_insn("cvtpi2pd", "cvt_xmm_mm_ss", modifiers=[0x66, 0x2A], cpu=["SSE2"]) # cmpsd SSE2 form add_group("cmpsd", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("cmpsd", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("cmpsd", cpu=["AVX"], vex=128, prefix=0xF2, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("cmpsd", cpu=["AVX"], vex=128, prefix=0xF2, opcode=[0x0F, 0xC2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # cmpsd is added in string instructions above, so don't re-add_insn() add_insn("vcmpsd", "cmpsd", modifiers=[VEXL0], avx=True) add_insn("movapd", "movau", modifiers=[0x66, 0x28, 0x01], cpu=["SSE2"]) add_insn("movupd", "movau", modifiers=[0x66, 0x10, 0x01], cpu=["SSE2"]) add_insn("vmovapd", "movau", modifiers=[0x66, 0x28, 0x01], avx=True) add_insn("vmovupd", "movau", modifiers=[0x66, 0x10, 0x01], avx=True) add_insn("movhpd", "movhlp", modifiers=[0x66, 0x16], cpu=["SSE2"]) add_insn("movlpd", "movhlp", modifiers=[0x66, 0x12], cpu=["SSE2"]) add_insn("vmovhpd", "movhlp", modifiers=[0x66, 0x16, VEXL0], avx=True) add_insn("vmovlpd", "movhlp", modifiers=[0x66, 0x12, VEXL0], avx=True) add_insn("movmskpd", "movmsk", modifiers=[0x66], cpu=["SSE2"]) add_insn("vmovmskpd", "movmsk", modifiers=[0x66, VEXL0], avx=True) add_insn("movntpd", "movnt", modifiers=[0x66, 0x2B], cpu=["SSE2"]) add_insn("movntdq", "movnt", modifiers=[0x66, 0xE7], cpu=["SSE2"]) add_insn("vmovntpd", "movnt", modifiers=[0x66, 0x2B, VEXL0], avx=True) add_insn("vmovntdq", "movnt", modifiers=[0x66, 0xE7, VEXL0], avx=True) # movsd SSE2 forms add_group("movsd", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("movsd", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("movsd", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0x11], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("movsd", cpu=["AVX"], vex=128, prefix=0xF2, opcode=[0x0F, 0x10], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) # movsd is added in string instructions above, so don't re-add_insn() add_insn("vmovsd", "movsd", modifiers=[VEXL0], avx=True) ##################################################################### # P4 VMX Instructions ##################################################################### add_group("eptvpid", modifiers=["Op2Add"], suffix="l", not64=True, cpu=["EPTVPID"], opersize=32, prefix=0x66, opcode=[0x0F, 0x38, 0x80], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_group("eptvpid", modifiers=["Op2Add"], suffix="q", cpu=["EPTVPID"], opersize=64, prefix=0x66, opcode=[0x0F, 0x38, 0x80], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_insn("invept", "eptvpid", modifiers=[0]) add_insn("invvpid", "eptvpid", modifiers=[1]) add_insn("vmcall", "threebyte", modifiers=[0x0F, 0x01, 0xC1], cpu=["P4"]) add_insn("vmlaunch", "threebyte", modifiers=[0x0F, 0x01, 0xC2], cpu=["P4"]) add_insn("vmresume", "threebyte", modifiers=[0x0F, 0x01, 0xC3], cpu=["P4"]) add_insn("vmxoff", "threebyte", modifiers=[0x0F, 0x01, 0xC4], cpu=["P4"]) add_group("vmxmemrd", suffix="l", not64=True, cpu=["P4"], opersize=32, opcode=[0x0F, 0x78], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Reg", size=32, dest="Spare")]) add_group("vmxmemrd", suffix="q", cpu=["P4"], opersize=64, def_opersize_64=64, opcode=[0x0F, 0x78], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="Reg", size=64, dest="Spare")]) add_insn("vmread", "vmxmemrd") add_group("vmxmemwr", suffix="l", not64=True, cpu=["P4"], opersize=32, opcode=[0x0F, 0x79], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_group("vmxmemwr", suffix="q", cpu=["P4"], opersize=64, def_opersize_64=64, opcode=[0x0F, 0x79], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_insn("vmwrite", "vmxmemwr") add_group("vmxtwobytemem", modifiers=["SpAdd"], cpu=["P4"], opcode=[0x0F, 0xC7], spare=0, operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("vmptrld", "vmxtwobytemem", modifiers=[6]) add_insn("vmptrst", "vmxtwobytemem", modifiers=[7]) add_group("vmxthreebytemem", modifiers=["PreAdd"], cpu=["P4"], prefix=0x00, opcode=[0x0F, 0xC7], spare=6, operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("vmclear", "vmxthreebytemem", modifiers=[0x66]) add_insn("vmxon", "vmxthreebytemem", modifiers=[0xF3]) ##################################################################### # Intel SMX Instructions ##################################################################### add_insn("getsec", "twobyte", modifiers=[0x0F, 0x37], cpu=["SMX"]) add_insn("cvttpd2pi", "cvt_mm_xmm", modifiers=[0x66, 0x2C], cpu=["SSE2"]) add_insn("cvttsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2C], cpu=["SSE2"]) add_insn("cvttpd2dq", "xmm_xmm128", modifiers=[0x66, 0xE6], cpu=["SSE2"]) add_insn("cvttps2dq", "xmm_xmm128", modifiers=[0xF3, 0x5B], cpu=["SSE2"]) add_insn("pmuludq", "mmxsse2", modifiers=[0xF4], cpu=["SSE2"]) add_insn("pshufd", "xmm_xmm128_imm", modifiers=[0x66, 0x70], cpu=["SSE2"]) add_insn("pshufhw", "xmm_xmm128_imm", modifiers=[0xF3, 0x70], cpu=["SSE2"]) add_insn("pshuflw", "xmm_xmm128_imm", modifiers=[0xF2, 0x70], cpu=["SSE2"]) add_insn("punpckhqdq", "xmm_xmm128", modifiers=[0x66, 0x6D], cpu=["SSE2"]) add_insn("punpcklqdq", "xmm_xmm128", modifiers=[0x66, 0x6C], cpu=["SSE2"]) add_insn("vcvttsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2C, VEXL0], avx=True) # vcvttpd2dq takes xmm, ymm combination # vcvttps2dq is two-operand add_insn("vpmuludq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF4, VEXL0], avx=True) add_insn("vpshufd", "xmm_xmm128_imm_256avx2", modifiers=[0x66, 0x70, VEXL0], avx=True) add_insn("vpshufhw", "xmm_xmm128_imm_256avx2", modifiers=[0xF3, 0x70, VEXL0], avx=True) add_insn("vpshuflw", "xmm_xmm128_imm_256avx2", modifiers=[0xF2, 0x70, VEXL0], avx=True) add_insn("vpunpckhqdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6D, VEXL0], avx=True) add_insn("vpunpcklqdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6C, VEXL0], avx=True) add_insn("cvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A], cpu=["SSE2"]) add_insn("vcvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A, VEXL0], avx=True) add_group("maskmovdqu", cpu=["SSE2"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0xF7], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("maskmovdqu", "maskmovdqu") add_insn("vmaskmovdqu", "maskmovdqu", modifiers=[VEXL0], avx=True) add_insn("movdqa", "movau", modifiers=[0x66, 0x6F, 0x10], cpu=["SSE2"]) add_insn("movdqu", "movau", modifiers=[0xF3, 0x6F, 0x10], cpu=["SSE2"]) add_insn("vmovdqa", "movau", modifiers=[0x66, 0x6F, 0x10], avx=True) add_insn("vmovdqu", "movau", modifiers=[0xF3, 0x6F, 0x10], avx=True) add_group("movdq2q", cpu=["SSE2"], prefix=0xF2, opcode=[0x0F, 0xD6], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("movdq2q", "movdq2q") add_group("movq2dq", cpu=["SSE2"], prefix=0xF3, opcode=[0x0F, 0xD6], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=64, dest="EA")]) add_insn("movq2dq", "movq2dq") add_group("pslrldq", cpu=["SSE2"], modifiers=["SpAdd", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x73], spare=0, operands=[Operand(type="SIMDReg", size=128, dest="EAVEX"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pslrldq", cpu=["SSE2"], modifiers=["SpAdd", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x73], spare=0, operands=[Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pslrldq", cpu=["AVX2"], modifiers=["SpAdd"], vex=256, prefix=0x66, opcode=[0x0F, 0x73], spare=0, operands=[Operand(type="SIMDReg", size=256, dest="EAVEX"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pslrldq", cpu=["AVX2"], modifiers=["SpAdd"], vex=256, prefix=0x66, opcode=[0x0F, 0x73], spare=0, operands=[Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDReg", size=256, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pslldq", "pslrldq", modifiers=[7]) add_insn("psrldq", "pslrldq", modifiers=[3]) add_insn("vpslldq", "pslrldq", modifiers=[7, VEXL0], avx=True) add_insn("vpsrldq", "pslrldq", modifiers=[3, VEXL0], avx=True) ##################################################################### # SSE3 / PNI Prescott New Instructions instructions ##################################################################### add_insn("addsubpd", "xmm_xmm128", modifiers=[0x66, 0xD0], cpu=["SSE3"]) add_insn("addsubps", "xmm_xmm128", modifiers=[0xF2, 0xD0], cpu=["SSE3"]) add_insn("haddpd", "xmm_xmm128", modifiers=[0x66, 0x7C], cpu=["SSE3"]) add_insn("haddps", "xmm_xmm128", modifiers=[0xF2, 0x7C], cpu=["SSE3"]) add_insn("hsubpd", "xmm_xmm128", modifiers=[0x66, 0x7D], cpu=["SSE3"]) add_insn("hsubps", "xmm_xmm128", modifiers=[0xF2, 0x7D], cpu=["SSE3"]) add_insn("vaddsubpd", "xmm_xmm128_256", modifiers=[0x66, 0xD0, VEXL0], avx=True) add_insn("vaddsubps", "xmm_xmm128_256", modifiers=[0xF2, 0xD0, VEXL0], avx=True) add_insn("vhaddpd", "xmm_xmm128_256", modifiers=[0x66, 0x7C, VEXL0], avx=True) add_insn("vhaddps", "xmm_xmm128_256", modifiers=[0xF2, 0x7C, VEXL0], avx=True) add_insn("vhsubpd", "xmm_xmm128_256", modifiers=[0x66, 0x7D, VEXL0], avx=True) add_insn("vhsubps", "xmm_xmm128_256", modifiers=[0xF2, 0x7D, VEXL0], avx=True) add_insn("movshdup", "xmm_xmm128", modifiers=[0xF3, 0x16], cpu=["SSE3"]) add_insn("movsldup", "xmm_xmm128", modifiers=[0xF3, 0x12], cpu=["SSE3"]) add_insn("fisttp", "fildstp", modifiers=[1, 0, 1], cpu=["SSE3"]) add_insn("fisttpll", "fildstp", suffix="q", modifiers=[7], cpu=["SSE3"]) add_insn("movddup", "xmm_xmm64", modifiers=[0xF2, 0x12], cpu=["SSE3"]) add_insn("monitor", "threebyte", modifiers=[0x0F, 0x01, 0xC8], cpu=["SSE3"]) add_insn("mwait", "threebyte", modifiers=[0x0F, 0x01, 0xC9], cpu=["SSE3"]) add_group("lddqu", cpu=["SSE3"], modifiers=["SetVEX"], prefix=0xF2, opcode=[0x0F, 0xF0], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_group("lddqu", cpu=["AVX"], vex=256, prefix=0xF2, opcode=[0x0F, 0xF0], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=256, relaxed=True, dest="EA")]) add_insn("lddqu", "lddqu") add_insn("vlddqu", "lddqu", modifiers=[VEXL0], avx=True) ##################################################################### # SSSE3 / TNI Tejas New Intructions instructions ##################################################################### add_group("ssse3", cpu=["SSSE3"], notavx=True, modifiers=["Op2Add"], opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_group("ssse3", cpu=["SSSE3"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("ssse3", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("ssse3", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("ssse3", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("pshufb", "ssse3", modifiers=[0x00]) add_insn("phaddw", "ssse3", modifiers=[0x01]) add_insn("phaddd", "ssse3", modifiers=[0x02]) add_insn("phaddsw", "ssse3", modifiers=[0x03]) add_insn("pmaddubsw", "ssse3", modifiers=[0x04]) add_insn("phsubw", "ssse3", modifiers=[0x05]) add_insn("phsubd", "ssse3", modifiers=[0x06]) add_insn("phsubsw", "ssse3", modifiers=[0x07]) add_insn("psignb", "ssse3", modifiers=[0x08]) add_insn("psignw", "ssse3", modifiers=[0x09]) add_insn("psignd", "ssse3", modifiers=[0x0A]) add_insn("pmulhrsw", "ssse3", modifiers=[0x0B]) add_insn("pabsb", "ssse3", modifiers=[0x1C]) add_insn("pabsw", "ssse3", modifiers=[0x1D]) add_insn("pabsd", "ssse3", modifiers=[0x1E]) add_insn("vpshufb", "ssse3", modifiers=[0x00, VEXL0], avx=True) add_insn("vphaddw", "ssse3", modifiers=[0x01, VEXL0], avx=True) add_insn("vphaddd", "ssse3", modifiers=[0x02, VEXL0], avx=True) add_insn("vphaddsw", "ssse3", modifiers=[0x03, VEXL0], avx=True) add_insn("vpmaddubsw", "ssse3", modifiers=[0x04, VEXL0], avx=True) add_insn("vphsubw", "ssse3", modifiers=[0x05, VEXL0], avx=True) add_insn("vphsubd", "ssse3", modifiers=[0x06, VEXL0], avx=True) add_insn("vphsubsw", "ssse3", modifiers=[0x07, VEXL0], avx=True) add_insn("vpsignb", "ssse3", modifiers=[0x08, VEXL0], avx=True) add_insn("vpsignw", "ssse3", modifiers=[0x09, VEXL0], avx=True) add_insn("vpsignd", "ssse3", modifiers=[0x0A, VEXL0], avx=True) add_insn("vpmulhrsw", "ssse3", modifiers=[0x0B, VEXL0], avx=True) # vpabsb/vpabsw/vpabsd are 2 operand only add_group("ssse3imm", cpu=["SSSE3"], modifiers=["Op2Add"], opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("ssse3imm", cpu=["SSSE3"], modifiers=["Op2Add"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("palignr", "ssse3imm", modifiers=[0x0F]) add_insn("vpalignr", "sse4imm_256avx2", modifiers=[0x0F, VEXL0], avx=True) ##################################################################### # SSE4.1 / SSE4.2 instructions ##################################################################### add_group("sse4", cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("sse4", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("packusdw", "sse4", modifiers=[0x2B]) add_insn("pcmpeqq", "sse4", modifiers=[0x29]) add_insn("pcmpgtq", "sse4", modifiers=[0x37]) add_insn("phminposuw", "sse4", modifiers=[0x41]) add_insn("pmaxsb", "sse4", modifiers=[0x3C]) add_insn("pmaxsd", "sse4", modifiers=[0x3D]) add_insn("pmaxud", "sse4", modifiers=[0x3F]) add_insn("pmaxuw", "sse4", modifiers=[0x3E]) add_insn("pminsb", "sse4", modifiers=[0x38]) add_insn("pminsd", "sse4", modifiers=[0x39]) add_insn("pminud", "sse4", modifiers=[0x3B]) add_insn("pminuw", "sse4", modifiers=[0x3A]) add_insn("pmuldq", "sse4", modifiers=[0x28]) add_insn("pmulld", "sse4", modifiers=[0x40]) add_insn("ptest", "sse4", modifiers=[0x17]) # AVX versions use ssse3, and disable MMX version, as they're 3-operand add_insn("vpackusdw", "ssse3", modifiers=[0x2B, VEXL0], avx=True) add_insn("vpcmpeqq", "ssse3", modifiers=[0x29, VEXL0], avx=True) add_insn("vpcmpgtq", "ssse3", modifiers=[0x37, VEXL0], avx=True) # vphminposuw is 2 operand only add_insn("vpmaxsb", "ssse3", modifiers=[0x3C, VEXL0], avx=True) add_insn("vpmaxsd", "ssse3", modifiers=[0x3D, VEXL0], avx=True) add_insn("vpmaxud", "ssse3", modifiers=[0x3F, VEXL0], avx=True) add_insn("vpmaxuw", "ssse3", modifiers=[0x3E, VEXL0], avx=True) add_insn("vpminsb", "ssse3", modifiers=[0x38, VEXL0], avx=True) add_insn("vpminsd", "ssse3", modifiers=[0x39, VEXL0], avx=True) add_insn("vpminud", "ssse3", modifiers=[0x3B, VEXL0], avx=True) add_insn("vpminuw", "ssse3", modifiers=[0x3A, VEXL0], avx=True) add_insn("vpmuldq", "ssse3", modifiers=[0x28, VEXL0], avx=True) add_insn("vpmulld", "ssse3", modifiers=[0x40, VEXL0], avx=True) # vptest uses SSE4 style (2 operand only), and takes 256-bit operands add_insn("vptest", "sse4", modifiers=[0x17, VEXL0], avx=True) add_group("sse4imm_256", cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # Same as above except AVX2 required for 256-bit. add_group("sse4imm_256avx2", cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256avx2", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm_256avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) # Version that does not allow YMM registers add_group("sse4imm", cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4imm", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for sz in [32, 64]: add_group("sse4m%dimm" % sz, cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4m%dimm" % sz, cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4m%dimm" % sz, cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("sse4m%dimm" % sz, cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("blendpd", "sse4imm", modifiers=[0x0D]) add_insn("blendps", "sse4imm", modifiers=[0x0C]) add_insn("dppd", "sse4imm", modifiers=[0x41]) add_insn("dpps", "sse4imm", modifiers=[0x40]) add_insn("mpsadbw", "sse4imm", modifiers=[0x42]) add_insn("pblendw", "sse4imm", modifiers=[0x0E]) add_insn("roundpd", "sse4imm", modifiers=[0x09]) add_insn("roundps", "sse4imm", modifiers=[0x08]) add_insn("roundsd", "sse4m64imm", modifiers=[0x0B]) add_insn("roundss", "sse4m32imm", modifiers=[0x0A]) # vdppd does not allow YMM registers # vmpsadbw and vpblendw do not allow YMM registers unless AVX2 add_insn("vblendpd", "sse4imm_256", modifiers=[0x0D, VEXL0], avx=True) add_insn("vblendps", "sse4imm_256", modifiers=[0x0C, VEXL0], avx=True) add_insn("vdppd", "sse4imm", modifiers=[0x41, VEXL0], avx=True) add_insn("vdpps", "sse4imm_256", modifiers=[0x40, VEXL0], avx=True) add_insn("vmpsadbw", "sse4imm_256avx2", modifiers=[0x42, VEXL0], avx=True) add_insn("vpblendw", "sse4imm_256avx2", modifiers=[0x0E, VEXL0], avx=True) # vroundpd and vroundps don't add another register operand add_insn("vroundsd", "sse4m64imm", modifiers=[0x0B, VEXL0], avx=True) add_insn("vroundss", "sse4m32imm", modifiers=[0x0A, VEXL0], avx=True) add_group("sse4xmm0", cpu=["SSE41"], modifiers=["Op2Add"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("sse4xmm0", cpu=["SSE41"], modifiers=["Op2Add"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="XMM0", size=128, dest=None)]) add_insn("blendvpd", "sse4xmm0", modifiers=[0x15]) add_insn("blendvps", "sse4xmm0", modifiers=[0x14]) add_insn("pblendvb", "sse4xmm0", modifiers=[0x10]) # implicit XMM0 can't be VEX-encoded add_group("avx_sse4xmm0", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("avx_sse4xmm0", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) add_insn("vblendvpd", "avx_sse4xmm0", modifiers=[0x4B]) add_insn("vblendvps", "avx_sse4xmm0", modifiers=[0x4A]) # vpblendvb didn't have a 256-bit form until AVX2 add_group("avx2_sse4xmm0", cpu=["AVX2"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("avx2_sse4xmm0", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) add_insn("vpblendvb", "avx2_sse4xmm0", modifiers=[0x4C]) for sfx, sz in zip("bwl", [8, 16, 32]): add_group("crc32", suffix=sfx, cpu=["SSE42"], opersize=sz, prefix=0xF2, opcode=[0x0F, 0x38, 0xF0+(sz!=8)], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=sz, relaxed=(sz==32), dest="EA")]) for sfx, sz in zip("bq", [8, 64]): add_group("crc32", suffix=sfx, cpu=["SSE42"], opersize=64, prefix=0xF2, opcode=[0x0F, 0x38, 0xF0+(sz!=8)], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="RM", size=sz, relaxed=(sz==64), dest="EA")]) add_insn("crc32", "crc32") add_group("extractps", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x17], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("extractps", cpu=["SSE41"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x17], operands=[Operand(type="Reg", size=64, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("extractps", "extractps") add_insn("vextractps", "extractps", modifiers=[VEXL0], avx=True) add_group("insertps", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x21], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("insertps", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x21], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("insertps", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x21], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("insertps", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x21], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("insertps", "insertps") add_insn("vinsertps", "insertps", modifiers=[VEXL0], avx=True) add_group("movntdqa", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x38, 0x2A], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_group("movntdqa", cpu=["AVX2"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x2A], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=256, relaxed=True, dest="EA")]) add_insn("movntdqa", "movntdqa") add_insn("vmovntdqa", "movntdqa", modifiers=[VEXL0], avx=True) add_group("sse4pcmpstr", cpu=["SSE42"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pcmpestri", "sse4pcmpstr", modifiers=[0x61]) add_insn("pcmpestrm", "sse4pcmpstr", modifiers=[0x60]) add_insn("pcmpistri", "sse4pcmpstr", modifiers=[0x63]) add_insn("pcmpistrm", "sse4pcmpstr", modifiers=[0x62]) add_insn("vpcmpestri", "sse4pcmpstr", modifiers=[0x61, VEXL0], avx=True) add_insn("vpcmpestrm", "sse4pcmpstr", modifiers=[0x60, VEXL0], avx=True) add_insn("vpcmpistri", "sse4pcmpstr", modifiers=[0x63, VEXL0], avx=True) add_insn("vpcmpistrm", "sse4pcmpstr", modifiers=[0x62, VEXL0], avx=True) add_group("pextrb", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x14], operands=[Operand(type="Mem", size=8, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrb", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x14], operands=[Operand(type="Reg", size=32, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pextrb", cpu=["SSE41"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x14], operands=[Operand(type="Reg", size=64, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pextrb", "pextrb") add_insn("vpextrb", "pextrb", modifiers=[VEXL0], avx=True) add_group("pextrd", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x16], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pextrd", "pextrd") add_insn("vpextrd", "pextrd", modifiers=[VEXL0], avx=True) add_group("pextrq", cpu=["SSE41"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x16], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pextrq", "pextrq") add_insn("vpextrq", "pextrq", modifiers=[VEXL0], avx=True) add_group("pinsrb", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x20], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Mem", size=8, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrb", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x20], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="Reg", size=32, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrb", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x20], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=8, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrb", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x20], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Reg", size=32, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pinsrb", "pinsrb") add_insn("vpinsrb", "pinsrb", modifiers=[VEXL0], avx=True) add_group("pinsrd", cpu=["SSE41"], modifiers=["SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x22], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrd", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x22], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pinsrd", "pinsrd") add_insn("vpinsrd", "pinsrd", modifiers=[VEXL0], avx=True) add_group("pinsrq", cpu=["SSE41"], modifiers=["SetVEX"], opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x22], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pinsrq", cpu=["AVX"], vex=128, opersize=64, prefix=0x66, opcode=[0x0F, 0x3A, 0x22], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pinsrq", "pinsrq") add_insn("vpinsrq", "pinsrq", modifiers=[VEXL0], avx=True) for sz in [16, 32, 64]: add_group("sse4m%d" % sz, cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) add_group("sse4m%d" % sz, cpu=["SSE41"], modifiers=["Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("sse4m%d" % sz, cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=sz*2, relaxed=True, dest="EA")]) add_group("sse4m%d" % sz, cpu=["AVX2"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("pmovsxbw", "sse4m64", modifiers=[0x20]) add_insn("pmovsxwd", "sse4m64", modifiers=[0x23]) add_insn("pmovsxdq", "sse4m64", modifiers=[0x25]) add_insn("pmovzxbw", "sse4m64", modifiers=[0x30]) add_insn("pmovzxwd", "sse4m64", modifiers=[0x33]) add_insn("pmovzxdq", "sse4m64", modifiers=[0x35]) add_insn("vpmovsxbw", "sse4m64", modifiers=[0x20, VEXL0], avx=True) add_insn("vpmovsxwd", "sse4m64", modifiers=[0x23, VEXL0], avx=True) add_insn("vpmovsxdq", "sse4m64", modifiers=[0x25, VEXL0], avx=True) add_insn("vpmovzxbw", "sse4m64", modifiers=[0x30, VEXL0], avx=True) add_insn("vpmovzxwd", "sse4m64", modifiers=[0x33, VEXL0], avx=True) add_insn("vpmovzxdq", "sse4m64", modifiers=[0x35, VEXL0], avx=True) add_insn("pmovsxbd", "sse4m32", modifiers=[0x21]) add_insn("pmovsxwq", "sse4m32", modifiers=[0x24]) add_insn("pmovzxbd", "sse4m32", modifiers=[0x31]) add_insn("pmovzxwq", "sse4m32", modifiers=[0x34]) add_insn("vpmovsxbd", "sse4m32", modifiers=[0x21, VEXL0], avx=True) add_insn("vpmovsxwq", "sse4m32", modifiers=[0x24, VEXL0], avx=True) add_insn("vpmovzxbd", "sse4m32", modifiers=[0x31, VEXL0], avx=True) add_insn("vpmovzxwq", "sse4m32", modifiers=[0x34, VEXL0], avx=True) add_insn("pmovsxbq", "sse4m16", modifiers=[0x22]) add_insn("pmovzxbq", "sse4m16", modifiers=[0x32]) add_insn("vpmovsxbq", "sse4m16", modifiers=[0x22, VEXL0], avx=True) add_insn("vpmovzxbq", "sse4m16", modifiers=[0x32, VEXL0], avx=True) for sfx, sz in zip("wlq", [16, 32, 64]): add_group("cnt", suffix=sfx, modifiers=["Op1Add"], opersize=sz, prefix=0xF3, opcode=[0x0F, 0x00], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("popcnt", "cnt", modifiers=[0xB8], cpu=["SSE42"]) ##################################################################### # Intel AVX instructions ##################################################################### # Most AVX instructions are mixed in with above SSEx groups. # Some make more sense to have separate groups due to naming conflicts # that the v-named versions don't have to deal with. add_group("vmovd", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_group("vmovd", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("vmovd", "vmovd") add_group("vmovq", cpu=["AVX"], vex=128, prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vmovq", cpu=["AVX"], vex=128, prefix=0xF3, opcode=[0x0F, 0x7E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("vmovq", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0xD6], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("vmovq", cpu=["AVX"], vex=128, opersize=64, prefix=0x66, opcode=[0x0F, 0x6E], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_group("vmovq", cpu=["AVX"], vex=128, opersize=64, prefix=0x66, opcode=[0x0F, 0x7E], operands=[Operand(type="RM", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("vmovq", "vmovq") # Some AVX variants don't add third operand add_group("avx_xmm_xmm128", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("avx_xmm_xmm128", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vmovshdup", "avx_xmm_xmm128", modifiers=[0xF3, 0x16]) add_insn("vmovsldup", "avx_xmm_xmm128", modifiers=[0xF3, 0x12]) add_insn("vrcpps", "avx_xmm_xmm128", modifiers=[0, 0x53]) add_insn("vrsqrtps", "avx_xmm_xmm128", modifiers=[0, 0x52]) add_insn("vsqrtps", "avx_xmm_xmm128", modifiers=[0, 0x51]) add_insn("vsqrtpd", "avx_xmm_xmm128", modifiers=[0x66, 0x51]) add_insn("vcvtdq2ps", "avx_xmm_xmm128", modifiers=[0, 0x5B]) add_insn("vcvtps2dq", "avx_xmm_xmm128", modifiers=[0x66, 0x5B]) add_insn("vcvttps2dq", "avx_xmm_xmm128", modifiers=[0xF3, 0x5B]) add_group("avx_sse4imm", cpu=["SSE41"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("avx_sse4imm", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("avx_sse4imm", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vroundpd", "avx_sse4imm", modifiers=[0x09]) add_insn("vroundps", "avx_sse4imm", modifiers=[0x08]) add_group("vmovddup", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vmovddup", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("vmovddup", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vmovddup", "vmovddup", modifiers=[0xF2, 0x12]) # Some xmm_xmm64 combinations only take two operands in AVX # (VEX.vvvv must be 1111b) add_group("avx_xmm_xmm64", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("avx_xmm_xmm64", cpu=["SSE2"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("vcomisd", "avx_xmm_xmm64", modifiers=[0x66, 0x2F], avx=True) add_insn("vucomisd", "avx_xmm_xmm64", modifiers=[0x66, 0x2E], avx=True) # Some xmm_xmm64 combinations only take two operands in AVX # (VEX.vvvv must be 1111b) add_group("avx_xmm_xmm32", cpu=["SSE"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("avx_xmm_xmm32", cpu=["SSE"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("vcomiss", "avx_xmm_xmm32", modifiers=[0, 0x2F], avx=True) add_insn("vucomiss", "avx_xmm_xmm32", modifiers=[0, 0x2E], avx=True) # Some conversion functions take ymm, xmm combination add_group("avx_cvt_xmm64", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("avx_cvt_xmm64", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("avx_cvt_xmm64", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("vcvtdq2pd", "avx_cvt_xmm64", modifiers=[0xF3, 0xE6]) add_insn("vcvtps2pd", "avx_cvt_xmm64", modifiers=[0, 0x5A]) # Some SSE3 opcodes are only two operand in AVX # (VEX.vvvv must be 1111b) add_group("avx_ssse3_2op", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("vphminposuw", "avx_ssse3_2op", modifiers=[0x41], avx=True) # VPABS* are extended to 256-bit in AVX2 for cpu, sz in zip(["AVX", "AVX2"], [128, 256]): add_group("avx2_ssse3_2op", cpu=[cpu], modifiers=["Op2Add"], vex=sz, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) add_insn("vpabsb", "avx2_ssse3_2op", modifiers=[0x1C], avx=True) add_insn("vpabsw", "avx2_ssse3_2op", modifiers=[0x1D], avx=True) add_insn("vpabsd", "avx2_ssse3_2op", modifiers=[0x1E], avx=True) # Some conversion functions take xmm, ymm combination # Need separate x and y versions for gas mode add_group("avx_cvt_xmm128_x", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("avx_cvt_xmm128_y", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("avx_cvt_xmm128", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=128, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, dest="EA")]) add_group("avx_cvt_xmm128", cpu=["AVX"], modifiers=["PreAdd", "Op1Add"], vex=256, prefix=0x00, opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=256, dest="EA")]) add_insn("vcvtpd2dqx", "avx_cvt_xmm128_x", modifiers=[0xF2, 0xE6], parser="gas") add_insn("vcvtpd2dqy", "avx_cvt_xmm128_y", modifiers=[0xF2, 0xE6], parser="gas") add_insn("vcvtpd2dq", "avx_cvt_xmm128", modifiers=[0xF2, 0xE6]) add_insn("vcvtpd2psx", "avx_cvt_xmm128_x", modifiers=[0x66, 0x5A], parser="gas") add_insn("vcvtpd2psy", "avx_cvt_xmm128_y", modifiers=[0x66, 0x5A], parser="gas") add_insn("vcvtpd2ps", "avx_cvt_xmm128", modifiers=[0x66, 0x5A]) add_insn("vcvttpd2dqx", "avx_cvt_xmm128_x", modifiers=[0x66, 0xE6], parser="gas") add_insn("vcvttpd2dqy", "avx_cvt_xmm128_y", modifiers=[0x66, 0xE6], parser="gas") add_insn("vcvttpd2dq", "avx_cvt_xmm128", modifiers=[0x66, 0xE6]) # Instructions new to AVX add_insn("vtestps", "sse4", modifiers=[0x0E, VEXL0], avx=True) add_insn("vtestpd", "sse4", modifiers=[0x0F, VEXL0], avx=True) add_group("vbroadcastss", cpu=["AVX"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x18], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("vbroadcastss", cpu=["AVX"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x18], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("vbroadcastss", cpu=["AVX2"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x18], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vbroadcastss", cpu=["AVX2"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x18], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("vbroadcastss", "vbroadcastss") add_group("vbroadcastsd", cpu=["AVX"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x19], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_group("vbroadcastsd", cpu=["AVX2"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x19], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("vbroadcastsd", "vbroadcastsd") add_group("vbroadcastif128", modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_insn("vbroadcastf128", "vbroadcastif128", modifiers=[0x1A], cpu=["AVX"]) add_insn("vbroadcasti128", "vbroadcastif128", modifiers=[0x5A], cpu=["AVX2"]) add_group("vextractif128", modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vextractf128", "vextractif128", modifiers=[0x19], cpu=["AVX"]) add_insn("vextracti128", "vextractif128", modifiers=[0x39], cpu=["AVX2"]) add_group("vinsertif128", modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vinsertf128", "vinsertif128", modifiers=[0x18], cpu=["AVX"]) add_insn("vinserti128", "vinsertif128", modifiers=[0x38], cpu=["AVX2"]) add_group("vzero", cpu=["AVX"], modifiers=["SetVEX"], opcode=[0x0F, 0x77], operands=[]) add_insn("vzeroall", "vzero", modifiers=[VEXL1]) add_insn("vzeroupper", "vzero", modifiers=[VEXL0]) add_group("vmaskmov", modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vmaskmov", modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("vmaskmov", modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x02], operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_group("vmaskmov", modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x02], operands=[Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDReg", size=256, dest="Spare")]) add_insn("vmaskmovps", "vmaskmov", modifiers=[0x2C], cpu=["AVX"]) add_insn("vmaskmovpd", "vmaskmov", modifiers=[0x2D], cpu=["AVX"]) add_group("vpermil", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x38, 0x08], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vpermil", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x38, 0x08], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("vpermil", cpu=["AVX"], modifiers=["Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("vpermil", cpu=["AVX"], modifiers=["Op2Add"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vpermilpd", "vpermil", modifiers=[0x05]) add_insn("vpermilps", "vpermil", modifiers=[0x04]) add_group("vperm2f128", cpu=["AVX"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x06], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vperm2f128", "vperm2f128") ##################################################################### # Intel AVX2 instructions ##################################################################### # Most AVX2 instructions are mixed in with above SSEx/AVX groups. # Some make more sense to have separate groups. # vex.vvvv=1111b add_group("vperm_var_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vpermd", "vperm_var_avx2", modifiers=[0x36]) add_insn("vpermps", "vperm_var_avx2", modifiers=[0x16]) # vex.vvvv=1111b add_group("vperm_imm_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=1, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vpermq", "vperm_imm_avx2", modifiers=[0x00]) add_insn("vpermpd", "vperm_imm_avx2", modifiers=[0x01]) add_group("vperm2i128_avx2", cpu=["AVX2"], vex=256, prefix=0x66, opcode=[0x0F, 0x3A, 0x46], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vperm2i128", "vperm2i128_avx2") # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastb_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x78], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastb_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x78], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="RM", size=8, relaxed=True, dest="EA")]) add_insn("vpbroadcastb", "vpbroadcastb_avx2") # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastw_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x79], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastw_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x79], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="RM", size=16, relaxed=True, dest="EA")]) add_insn("vpbroadcastw", "vpbroadcastw_avx2") # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastd_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x58], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastd_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x58], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_insn("vpbroadcastd", "vpbroadcastd_avx2") # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastq_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x59], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) # vex.vvvv=1111b for sz in [128, 256]: add_group("vpbroadcastq_avx2", cpu=["AVX2"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x59], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="RM", size=64, relaxed=True, dest="EA")]) add_insn("vpbroadcastq", "vpbroadcastq_avx2") for sz in [128, 256]: add_group("vpshiftv_vexw0_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) for sz in [128, 256]: add_group("vpshiftv_vexw1_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=sz, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) add_insn("vpsrlvd", "vpshiftv_vexw0_avx2", modifiers=[0x45]) add_insn("vpsrlvq", "vpshiftv_vexw1_avx2", modifiers=[0x45]) add_insn("vpsravd", "vpshiftv_vexw0_avx2", modifiers=[0x46]) add_insn("vpsllvd", "vpshiftv_vexw0_avx2", modifiers=[0x47]) add_insn("vpsllvq", "vpshiftv_vexw1_avx2", modifiers=[0x47]) add_insn("vpmaskmovd", "vmaskmov", modifiers=[0x8C], cpu=["AVX2"]) # vex.vvvv=1111b for sz in [128, 256]: add_group("vmaskmov_vexw1_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=sz, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) for sz in [128, 256]: add_group("vmaskmov_vexw1_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=sz, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x02], operands=[Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDReg", size=sz, dest="Spare")]) add_insn("vpmaskmovq", "vmaskmov_vexw1_avx2", modifiers=[0x8C]) for sz in [128, 256]: add_group("vex_66_0F3A_imm8_avx2", cpu=["AVX2"], modifiers=["Op2Add"], vex=sz, vexw=0, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), Operand(type="SIMDReg", size=sz, dest="VEX"), Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("vpblendd", "vex_66_0F3A_imm8_avx2", modifiers=[0x02]) # Vector register in EA. add_group("gather_64x_64x", cpu=["AVX2"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("gather_64x_64x", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEX")]) add_insn("vgatherdpd", "gather_64x_64x", modifiers=[0x92]) add_insn("vpgatherdq", "gather_64x_64x", modifiers=[0x90]) add_group("gather_64x_64y", cpu=["AVX2"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("gather_64x_64y", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="MemYMMIndex", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEX")]) add_insn("vgatherqpd", "gather_64x_64y", modifiers=[0x93]) add_insn("vpgatherqq", "gather_64x_64y", modifiers=[0x91]) add_group("gather_32x_32y", cpu=["AVX2"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="MemXMMIndex", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("gather_32x_32y", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="MemYMMIndex", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEX")]) add_insn("vgatherdps", "gather_32x_32y", modifiers=[0x92]) add_insn("vpgatherdd", "gather_32x_32y", modifiers=[0x90]) add_group("gather_32x_32y_128", cpu=["AVX2"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="MemXMMIndex", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("gather_32x_32y_128", cpu=["AVX2"], modifiers=["Op2Add"], vex=256, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="MemYMMIndex", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_insn("vgatherqps", "gather_32x_32y_128", modifiers=[0x93]) add_insn("vpgatherqd", "gather_32x_32y_128", modifiers=[0x91]) ##################################################################### # Intel FMA instructions ##################################################################### ### 128/256b FMA PS add_group("vfma_ps", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=0, # single precision prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vfma_ps", cpu=["FMA"], modifiers=["Op2Add"], vex=256, vexw=0, # single precision prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) ### 128/256b FMA PD(W=1) add_group("vfma_pd", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=1, # double precision prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vfma_pd", cpu=["FMA"], modifiers=["Op2Add"], vex=256, vexw=1, # double precision prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_group("vfma_ss", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vfma_ss", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_group("vfma_sd", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vfma_sd", cpu=["FMA"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x38, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) for orderval, order in enumerate(["132", "213", "231"]): ov = orderval << 4 for combval, comb in enumerate(["ps", "pd", "ss", "sd"]): cv = combval >> 1 add_insn("vfmadd"+order+comb, "vfma_"+comb, modifiers=[0x98+ov+cv]) add_insn("vfmsub"+order+comb, "vfma_"+comb, modifiers=[0x9A+ov+cv]) add_insn("vfnmsub"+order+comb, "vfma_"+comb, modifiers=[0x9E+ov+cv]) add_insn("vfnmadd"+order+comb, "vfma_"+comb, modifiers=[0x9C+ov+cv]) # no ss/sd for these for comb in ["ps", "pd"]: add_insn("vfmaddsub"+order+comb, "vfma_"+comb, modifiers=[0x96+ov]) add_insn("vfmsubadd"+order+comb, "vfma_"+comb, modifiers=[0x97+ov]) ##################################################################### # Intel AES instructions ##################################################################### add_group("aes", cpu=["AES"], modifiers=["Op1Add", "Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("aes", cpu=["AES", "AVX"], modifiers=["Op1Add", "Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("aesenc", "aes", modifiers=[0x38, 0xDC]) add_insn("aesenclast", "aes", modifiers=[0x38, 0xDD]) add_insn("aesdec", "aes", modifiers=[0x38, 0xDE]) add_insn("aesdeclast", "aes", modifiers=[0x38, 0xDF]) add_insn("vaesenc", "aes", modifiers=[0x38, 0xDC, VEXL0], avx=True) add_insn("vaesenclast", "aes", modifiers=[0x38, 0xDD, VEXL0], avx=True) add_insn("vaesdec", "aes", modifiers=[0x38, 0xDE, VEXL0], avx=True) add_insn("vaesdeclast", "aes", modifiers=[0x38, 0xDF, VEXL0], avx=True) add_group("aesimc", cpu=["AES"], modifiers=["Op1Add", "Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("aesimc", "aesimc", modifiers=[0x38, 0xDB]) add_insn("vaesimc", "aesimc", modifiers=[0x38, 0xDB, VEXL0], avx=True) add_group("aes_imm", cpu=["AES"], modifiers=["Op1Add", "Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("aeskeygenassist", "aes_imm", modifiers=[0x3A, 0xDF]) add_insn("vaeskeygenassist", "aes_imm", modifiers=[0x3A, 0xDF, VEXL0], avx=True) ##################################################################### # Intel PCLMULQDQ instruction ##################################################################### add_group("pclmulqdq", cpu=["CLMUL"], modifiers=["Op1Add", "Op2Add", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("pclmulqdq", cpu=["CLMUL", "AVX"], modifiers=["Op1Add", "Op2Add"], vex=128, prefix=0x66, opcode=[0x0F, 0x00, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("pclmulqdq", "pclmulqdq", modifiers=[0x3A, 0x44]) add_insn("vpclmulqdq", "pclmulqdq", modifiers=[0x3A, 0x44, VEXL0], avx=True) add_group("pclmulqdq_fixed", cpu=["CLMUL"], modifiers=["Imm8", "SetVEX"], prefix=0x66, opcode=[0x0F, 0x3A, 0x44], operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("pclmulqdq_fixed", cpu=["CLMUL", "AVX"], modifiers=["Imm8"], vex=128, prefix=0x66, opcode=[0x0F, 0x3A, 0x44], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) for comb, combval in zip(["lql","hql","lqh","hqh"], [0x00,0x01,0x10,0x11]): add_insn("pclmul"+comb+"qdq", "pclmulqdq_fixed", modifiers=[combval]) add_insn("vpclmul"+comb+"qdq", "pclmulqdq_fixed", modifiers=[combval, VEXL0], avx=True) ##################################################################### # AVX Post-32nm instructions ##################################################################### # RDRAND add_group("rdrand", cpu=["RDRAND"], opersize=16, opcode=[0x0F, 0xC7], spare=6, operands=[Operand(type="Reg", size=16, dest="EA")]) add_group("rdrand", #suffix="l", cpu=["RDRAND"], opersize=32, opcode=[0x0F, 0xC7], spare=6, operands=[Operand(type="Reg", size=32, dest="EA")]) add_group("rdrand", cpu=["RDRAND"], opersize=64, opcode=[0x0F, 0xC7], spare=6, operands=[Operand(type="Reg", size=64, dest="EA")]) add_insn("rdrand", "rdrand") # FSGSBASE instructions add_group("fs_gs_base", only64=True, cpu=["FSGSBASE"], modifiers=['SpAdd'], opersize=32, prefix=0xF3, opcode=[0x0F, 0xAE], operands=[Operand(type="Reg", size=32, dest="EA")]) add_group("fs_gs_base", only64=True, cpu=["FSGSBASE"], opersize=64, modifiers=['SpAdd'], prefix=0xF3, opcode=[0x0F, 0xAE], operands=[Operand(type="Reg", size=64, dest="EA")]) add_insn("rdfsbase", "fs_gs_base", modifiers=[0], only64=True) add_insn("rdgsbase", "fs_gs_base", modifiers=[1], only64=True) add_insn("wrfsbase", "fs_gs_base", modifiers=[2], only64=True) add_insn("wrgsbase", "fs_gs_base", modifiers=[3], only64=True) # Float-16 conversion instructions for g in ['ps2ph', 'ph2ps']: operands1=[] operands1.append(Operand(type="SIMDReg", size=128, dest="EA")) operands1.append(Operand(type="SIMDReg", size=128, dest="Spare")) operands2=[] operands2.append(Operand(type="Mem", size=64, dest="EA")) operands2.append(Operand(type="SIMDReg", size=128, dest="Spare")) operands3=[] operands3.append(Operand(type="SIMDReg", size=128, dest="EA")) operands3.append(Operand(type="SIMDReg", size=256, dest="Spare")) operands4=[] operands4.append(Operand(type="Mem", size=128, dest="EA")) operands4.append(Operand(type="SIMDReg", size=256, dest="Spare")) if g == 'ph2ps': operands1.reverse() operands2.reverse() operands3.reverse() operands4.reverse() map = 0x38 elif g == 'ps2ph': immop = Operand(type="Imm", size=8, relaxed=True, dest="Imm") operands1.append(immop) operands2.append(immop) operands3.append(immop) operands4.append(immop) map = 0x3A add_group("avx_cvt" + g, cpu=["F16C", "AVX"], modifiers=["PreAdd", "Op2Add"], vex=128, prefix=0x00, opcode=[0x0F, map, 0x00], operands=operands1) add_group("avx_cvt" + g, cpu=["F16C", "AVX"], modifiers=["PreAdd", "Op2Add"], vex=128, prefix=0x00, opcode=[0x0F, map, 0x00], operands=operands2) add_group("avx_cvt" + g, cpu=["F16C", "AVX"], modifiers=["PreAdd", "Op2Add"], vex=256, prefix=0x00, opcode=[0x0F, map, 0x00], operands=operands3) add_group("avx_cvt" + g, cpu=["F16C", "AVX"], modifiers=["PreAdd", "Op2Add"], vex=256, prefix=0x00, opcode=[0x0F, map, 0x00], operands=operands4) add_insn("vcvtps2ph", "avx_cvtps2ph", modifiers=[0x66, 0x1D], avx=True) add_insn("vcvtph2ps", "avx_cvtph2ps", modifiers=[0x66, 0x13], avx=True) ##################################################################### # AMD SSE4a instructions ##################################################################### add_group("extrq", cpu=["SSE4a"], prefix=0x66, opcode=[0x0F, 0x78], operands=[Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("extrq", cpu=["SSE4a"], prefix=0x66, opcode=[0x0F, 0x79], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("extrq", "extrq") add_group("insertq", cpu=["SSE4a"], prefix=0xF2, opcode=[0x0F, 0x78], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_group("insertq", cpu=["SSE4a"], prefix=0xF2, opcode=[0x0F, 0x79], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_insn("insertq", "insertq") add_group("movntsd", cpu=["SSE4a"], prefix=0xF2, opcode=[0x0F, 0x2B], operands=[Operand(type="Mem", size=64, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("movntsd", "movntsd") add_group("movntss", cpu=["SSE4a"], prefix=0xF3, opcode=[0x0F, 0x2B], operands=[Operand(type="Mem", size=32, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="Spare")]) add_insn("movntss", "movntss") ##################################################################### # AMD XOP instructions ##################################################################### add_group("vfrc_pdps", cpu=["XOP"], modifiers=["Op1Add"], xop=128, opcode=[0x09, 0x80], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vfrc_pdps", cpu=["XOP"], modifiers=["Op1Add"], xop=256, opcode=[0x09, 0x80], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vfrczpd", "vfrc_pdps", modifiers=[0x01]) add_insn("vfrczps", "vfrc_pdps", modifiers=[0x00]) add_group("vfrczsd", cpu=["XOP"], xop=128, opcode=[0x09, 0x83], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vfrczsd", cpu=["XOP"], xop=128, opcode=[0x09, 0x83], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("vfrczsd", "vfrczsd") add_group("vfrczss", cpu=["XOP"], xop=128, opcode=[0x09, 0x82], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="EA")]) add_group("vfrczss", cpu=["XOP"], xop=128, opcode=[0x09, 0x82], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("vfrczss", "vfrczss") add_group("vpcmov", cpu=["XOP"], xop=128, xopw=0, opcode=[0x08, 0xA2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("vpcmov", cpu=["XOP"], xop=128, xopw=1, opcode=[0x08, 0xA2], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vpcmov", cpu=["XOP"], xop=256, xopw=0, opcode=[0x08, 0xA2], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) add_group("vpcmov", cpu=["XOP"], xop=256, xopw=1, opcode=[0x08, 0xA2], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vpcmov", "vpcmov") add_group("vpcom", cpu=["XOP"], modifiers=["Op1Add", "Imm8"], xop=128, opcode=[0x08, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vpcom_imm", cpu=["XOP"], modifiers=["Op1Add"], xop=128, opcode=[0x08, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for opc, sfx in [(0xCC, "b"), (0xCE, "d"), (0xCD, "w"), (0xCF, "q"), (0xEC, "ub"), (0xEE, "ud"), (0xEF, "uq"), (0xED, "uw")]: add_insn("vpcom"+sfx, "vpcom_imm", modifiers=[opc]) for ib, cc in enumerate(["lt", "le", "gt", "ge", "eq", "neq", "false", "true"]): add_insn("vpcom"+cc+sfx, "vpcom", modifiers=[opc, ib]) # ne alias for neq add_insn("vpcomne"+sfx, "vpcom", modifiers=[opc, 5]) add_group("vphaddsub", cpu=["XOP"], modifiers=["Op1Add"], xop=128, opcode=[0x09, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("vphaddbw", "vphaddsub", modifiers=[0xC1]) add_insn("vphaddbd", "vphaddsub", modifiers=[0xC2]) add_insn("vphaddbq", "vphaddsub", modifiers=[0xC3]) add_insn("vphaddwd", "vphaddsub", modifiers=[0xC6]) add_insn("vphaddwq", "vphaddsub", modifiers=[0xC7]) add_insn("vphadddq", "vphaddsub", modifiers=[0xCB]) add_insn("vphaddubw", "vphaddsub", modifiers=[0xD1]) add_insn("vphaddubd", "vphaddsub", modifiers=[0xD2]) add_insn("vphaddubq", "vphaddsub", modifiers=[0xD3]) add_insn("vphadduwd", "vphaddsub", modifiers=[0xD6]) add_insn("vphadduwq", "vphaddsub", modifiers=[0xD7]) add_insn("vphaddudq", "vphaddsub", modifiers=[0xD8]) add_insn("vphsubbw", "vphaddsub", modifiers=[0xE1]) add_insn("vphsubwd", "vphaddsub", modifiers=[0xE2]) add_insn("vphsubdq", "vphaddsub", modifiers=[0xE3]) add_group("vpma", cpu=["XOP"], modifiers=["Op1Add"], xop=128, opcode=[0x08, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_insn("vpmacsdd", "vpma", modifiers=[0x9E]) add_insn("vpmacsdqh", "vpma", modifiers=[0x9F]) add_insn("vpmacsdql", "vpma", modifiers=[0x97]) add_insn("vpmacssdd", "vpma", modifiers=[0x8E]) add_insn("vpmacssdqh", "vpma", modifiers=[0x8F]) add_insn("vpmacssdql", "vpma", modifiers=[0x87]) add_insn("vpmacsswd", "vpma", modifiers=[0x86]) add_insn("vpmacssww", "vpma", modifiers=[0x85]) add_insn("vpmacswd", "vpma", modifiers=[0x96]) add_insn("vpmacsww", "vpma", modifiers=[0x95]) add_insn("vpmadcsswd", "vpma", modifiers=[0xA6]) add_insn("vpmadcswd", "vpma", modifiers=[0xB6]) add_group("vpperm", cpu=["XOP"], xop=128, xopw=0, opcode=[0x08, 0xA3], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("vpperm", cpu=["XOP"], xop=128, xopw=1, opcode=[0x08, 0xA3], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_insn("vpperm", "vpperm") add_group("vprot", cpu=["XOP"], modifiers=["Op1Add"], xop=128, xopw=0, opcode=[0x09, 0x90], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("vprot", cpu=["XOP"], modifiers=["Op1Add"], xop=128, xopw=1, opcode=[0x09, 0x90], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("vprot", cpu=["XOP"], modifiers=["Op1Add"], xop=128, opcode=[0x08, 0xC0], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) for opc, sfx in enumerate(["b", "w", "d", "q"]): add_insn("vprot"+sfx, "vprot", modifiers=[opc]) add_group("amd_vpshift", cpu=["XOP"], modifiers=["Op1Add"], xop=128, xopw=0, opcode=[0x09, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEX")]) add_group("amd_vpshift", cpu=["XOP"], modifiers=["Op1Add"], xop=128, xopw=1, opcode=[0x09, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) for opc, sfx in enumerate(["b", "w", "d", "q"]): add_insn("vpsha"+sfx, "amd_vpshift", modifiers=[0x98+opc]) add_insn("vpshl"+sfx, "amd_vpshift", modifiers=[0x94+opc]) ##################################################################### # AMD FMA4 instructions (same as original Intel FMA instructions) ##################################################################### add_group("fma_128_256", cpu=["FMA4"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("fma_128_256", cpu=["FMA4"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc"), Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) add_group("fma_128_256", cpu=["FMA4"], modifiers=["Op2Add"], vex=256, vexw=0, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) add_group("fma_128_256", cpu=["FMA4"], modifiers=["Op2Add"], vex=256, vexw=1, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=256, dest="Spare"), Operand(type="SIMDReg", size=256, dest="VEX"), Operand(type="SIMDReg", size=256, dest="VEXImmSrc"), Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) add_insn("vfmaddpd", "fma_128_256", modifiers=[0x69]) add_insn("vfmaddps", "fma_128_256", modifiers=[0x68]) add_insn("vfmaddsubpd", "fma_128_256", modifiers=[0x5D]) add_insn("vfmaddsubps", "fma_128_256", modifiers=[0x5C]) add_insn("vfmsubaddpd", "fma_128_256", modifiers=[0x5F]) add_insn("vfmsubaddps", "fma_128_256", modifiers=[0x5E]) add_insn("vfmsubpd", "fma_128_256", modifiers=[0x6D]) add_insn("vfmsubps", "fma_128_256", modifiers=[0x6C]) add_insn("vfnmaddpd", "fma_128_256", modifiers=[0x79]) add_insn("vfnmaddps", "fma_128_256", modifiers=[0x78]) add_insn("vfnmsubpd", "fma_128_256", modifiers=[0x7D]) add_insn("vfnmsubps", "fma_128_256", modifiers=[0x7C]) for sz in [32, 64]: add_group("fma_128_m%d" % sz, cpu=["FMA4"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("fma_128_m%d" % sz, cpu=["FMA4"], modifiers=["Op2Add"], vex=128, vexw=0, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="Mem", size=sz, relaxed=True, dest="EA"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) add_group("fma_128_m%d" % sz, cpu=["FMA4"], modifiers=["Op2Add"], vex=128, vexw=1, prefix=0x66, opcode=[0x0F, 0x3A, 0x00], operands=[Operand(type="SIMDReg", size=128, dest="Spare"), Operand(type="SIMDReg", size=128, dest="VEX"), Operand(type="SIMDReg", size=128, dest="VEXImmSrc"), Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) add_insn("vfmaddsd", "fma_128_m64", modifiers=[0x6B]) add_insn("vfmaddss", "fma_128_m32", modifiers=[0x6A]) add_insn("vfmsubsd", "fma_128_m64", modifiers=[0x6F]) add_insn("vfmsubss", "fma_128_m32", modifiers=[0x6E]) add_insn("vfnmaddsd", "fma_128_m64", modifiers=[0x7B]) add_insn("vfnmaddss", "fma_128_m32", modifiers=[0x7A]) add_insn("vfnmsubsd", "fma_128_m64", modifiers=[0x7F]) add_insn("vfnmsubss", "fma_128_m32", modifiers=[0x7E]) ##################################################################### # Intel XSAVE and XSAVEOPT instructions ##################################################################### add_insn("xgetbv", "threebyte", modifiers=[0x0F, 0x01, 0xD0], cpu=["XSAVE", "386"]) add_insn("xsetbv", "threebyte", modifiers=[0x0F, 0x01, 0xD1], cpu=["XSAVE", "386", "Priv"]) add_insn("xsave", "twobytemem", modifiers=[4, 0x0F, 0xAE], cpu=["XSAVE", "386"]) add_insn("xrstor", "twobytemem", modifiers=[5, 0x0F, 0xAE], cpu=["XSAVE", "386"]) add_insn("xsaveopt", "twobytemem", modifiers=[6, 0x0F, 0xAE], cpu=["XSAVEOPT"]) add_group("xsaveopt64", modifiers=["SpAdd", "Op0Add", "Op1Add"], opcode=[0x00, 0x00], spare=0, opersize=64, operands=[Operand(type="Mem", relaxed=True, dest="EA")]) add_insn("xsaveopt64", "xsaveopt64", modifiers=[6, 0x0F, 0xAE], cpu=["XSAVEOPT"], only64=True) ##################################################################### # Intel MOVBE instruction ##################################################################### for sz in (16, 32, 64): add_group("movbe", cpu=["MOVBE"], opersize=sz, opcode=[0x0F, 0x38, 0xF0], operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) add_group("movbe", cpu=["MOVBE"], opersize=sz, opcode=[0x0F, 0x38, 0xF1], operands=[Operand(type="Mem", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="Spare")]) add_insn("movbe", "movbe") ##################################################################### # Intel advanced bit manipulations (BMI1/2) ##################################################################### add_insn("tzcnt", "cnt", modifiers=[0xBC], cpu=["BMI1"]) # LZCNT is present as AMD ext for sfx, sz in zip("wlq", [32, 64]): add_group("vex_gpr_ndd_rm_0F38_regext", suffix=sfx, modifiers=["PreAdd", "Op2Add", "SpAdd" ], opersize=sz, prefix=0x00, opcode=[0x0F, 0x38, 0x00], vex=0, ## VEX.L=0 operands=[Operand(type="Reg", size=sz, dest="VEX"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("blsr", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 1], cpu=["BMI1"]) add_insn("blsmsk", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 2], cpu=["BMI1"]) add_insn("blsi", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 3], cpu=["BMI1"]) for sfx, sz in zip("wlq", [32, 64]): add_group("vex_gpr_reg_rm_0F_imm8", suffix=sfx, modifiers=["PreAdd", "Op1Add", "Op2Add"], opersize=sz, prefix=0x00, opcode=[0x0F, 0x00, 0x00], vex=0, ## VEX.L=0 operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) add_insn("rorx", "vex_gpr_reg_rm_0F_imm8", modifiers=[0xF2, 0x3A, 0xF0], cpu=["BMI2"]) for sfx, sz in zip("lq", [32, 64]): # no 16-bit forms add_group("vex_gpr_reg_nds_rm_0F", suffix=sfx, modifiers=["PreAdd", "Op1Add", "Op2Add"], opersize=sz, prefix=0x00, opcode=[0x0F, 0x00, 0x00], vex=0, operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="Reg", size=sz, dest="VEX"), Operand(type="RM", size=sz, relaxed=True, dest="EA")]) add_insn("andn", "vex_gpr_reg_nds_rm_0F", modifiers=[0x00, 0x38, 0xF2], cpu=["BMI1"]) add_insn("pdep", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF2, 0x38, 0xF5], cpu=["BMI2"]) add_insn("pext", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF3, 0x38, 0xF5], cpu=["BMI2"]) for sfx, sz in zip("lq", [32, 64]): # no 16-bit forms add_group("vex_gpr_reg_rm_nds_0F", suffix=sfx, modifiers=["PreAdd", "Op1Add", "Op2Add"], opersize=sz, prefix=0x00, opcode=[0x0F, 0x00, 0x00], vex=0, operands=[Operand(type="Reg", size=sz, dest="Spare"), Operand(type="RM", size=sz, relaxed=True, dest="EA"), Operand(type="Reg", size=sz, dest="VEX")]) add_insn("bzhi", "vex_gpr_reg_rm_nds_0F", modifiers=[0x00, 0x38, 0xF5], cpu=["BMI2"]) add_insn("bextr","vex_gpr_reg_rm_nds_0F", modifiers=[0x00, 0x38, 0xF7], cpu=["BMI1"]) add_insn("shlx", "vex_gpr_reg_rm_nds_0F", modifiers=[0x66, 0x38, 0xF7], cpu=["BMI2"]) add_insn("shrx", "vex_gpr_reg_rm_nds_0F", modifiers=[0xF2, 0x38, 0xF7], cpu=["BMI2"]) add_insn("sarx", "vex_gpr_reg_rm_nds_0F", modifiers=[0xF3, 0x38, 0xF7], cpu=["BMI2"]) add_insn("mulx", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF2, 0x38, 0xF6], cpu=["BMI2"]) ##################################################################### # Intel INVPCID instruction ##################################################################### add_group("invpcid", cpu=["INVPCID", "Priv"], not64=True, prefix=0x66, opcode=[0x0F, 0x38, 0x82], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_group("invpcid", cpu=["INVPCID", "Priv"], only64=True, def_opersize_64=64, prefix=0x66, opcode=[0x0F, 0x38, 0x82], operands=[Operand(type="Reg", size=64, dest="Spare"), Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_insn("invpcid", "invpcid") ##################################################################### # AMD 3DNow! instructions ##################################################################### add_insn("prefetch", "twobytemem", modifiers=[0x00, 0x0F, 0x0D], cpu=["3DNow"]) add_insn("prefetchw", "twobytemem", modifiers=[0x01, 0x0F, 0x0D], cpu=["3DNow"]) add_insn("femms", "twobyte", modifiers=[0x0F, 0x0E], cpu=["3DNow"]) add_group("now3d", cpu=["3DNow"], modifiers=["Imm8"], opcode=[0x0F, 0x0F], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_insn("pavgusb", "now3d", modifiers=[0xBF]) add_insn("pf2id", "now3d", modifiers=[0x1D]) add_insn("pf2iw", "now3d", modifiers=[0x1C], cpu=["Athlon", "3DNow"]) add_insn("pfacc", "now3d", modifiers=[0xAE]) add_insn("pfadd", "now3d", modifiers=[0x9E]) add_insn("pfcmpeq", "now3d", modifiers=[0xB0]) add_insn("pfcmpge", "now3d", modifiers=[0x90]) add_insn("pfcmpgt", "now3d", modifiers=[0xA0]) add_insn("pfmax", "now3d", modifiers=[0xA4]) add_insn("pfmin", "now3d", modifiers=[0x94]) add_insn("pfmul", "now3d", modifiers=[0xB4]) add_insn("pfnacc", "now3d", modifiers=[0x8A], cpu=["Athlon", "3DNow"]) add_insn("pfpnacc", "now3d", modifiers=[0x8E], cpu=["Athlon", "3DNow"]) add_insn("pfrcp", "now3d", modifiers=[0x96]) add_insn("pfrcpit1", "now3d", modifiers=[0xA6]) add_insn("pfrcpit2", "now3d", modifiers=[0xB6]) add_insn("pfrsqit1", "now3d", modifiers=[0xA7]) add_insn("pfrsqrt", "now3d", modifiers=[0x97]) add_insn("pfsub", "now3d", modifiers=[0x9A]) add_insn("pfsubr", "now3d", modifiers=[0xAA]) add_insn("pi2fd", "now3d", modifiers=[0x0D]) add_insn("pi2fw", "now3d", modifiers=[0x0C], cpu=["Athlon", "3DNow"]) add_insn("pmulhrwa", "now3d", modifiers=[0xB7]) add_insn("pswapd", "now3d", modifiers=[0xBB], cpu=["Athlon", "3DNow"]) ##################################################################### # AMD extensions ##################################################################### add_insn("syscall", "twobyte", modifiers=[0x0F, 0x05], cpu=["686", "AMD"]) for sfx in [None, "l", "q"]: add_insn("sysret"+(sfx or ""), "twobyte", suffix=sfx, modifiers=[0x0F, 0x07], cpu=["686", "AMD", "Priv"]) add_insn("lzcnt", "cnt", modifiers=[0xBD], cpu=["LZCNT"]) ##################################################################### # AMD x86-64 extensions ##################################################################### add_insn("swapgs", "threebyte", modifiers=[0x0F, 0x01, 0xF8], only64=True) add_insn("rdtscp", "threebyte", modifiers=[0x0F, 0x01, 0xF9], cpu=["686", "AMD", "Priv"]) add_group("cmpxchg16b", only64=True, opersize=64, opcode=[0x0F, 0xC7], spare=1, operands=[Operand(type="Mem", size=128, relaxed=True, dest="EA")]) add_insn("cmpxchg16b", "cmpxchg16b") ##################################################################### # AMD Pacifica SVM instructions ##################################################################### add_insn("clgi", "threebyte", modifiers=[0x0F, 0x01, 0xDD], cpu=["SVM"]) add_insn("stgi", "threebyte", modifiers=[0x0F, 0x01, 0xDC], cpu=["SVM"]) add_insn("vmmcall", "threebyte", modifiers=[0x0F, 0x01, 0xD9], cpu=["SVM"]) add_group("invlpga", cpu=["SVM"], opcode=[0x0F, 0x01, 0xDF], operands=[]) add_group("invlpga", cpu=["SVM"], opcode=[0x0F, 0x01, 0xDF], operands=[Operand(type="MemrAX", dest="AdSizeEA"), Operand(type="Creg", size=32, dest=None)]) add_insn("invlpga", "invlpga") add_group("skinit", cpu=["SVM"], opcode=[0x0F, 0x01, 0xDE], operands=[]) add_group("skinit", cpu=["SVM"], opcode=[0x0F, 0x01, 0xDE], operands=[Operand(type="MemEAX", dest=None)]) add_insn("skinit", "skinit") add_group("svm_rax", cpu=["SVM"], modifiers=["Op2Add"], opcode=[0x0F, 0x01, 0x00], operands=[]) add_group("svm_rax", cpu=["SVM"], modifiers=["Op2Add"], opcode=[0x0F, 0x01, 0x00], operands=[Operand(type="MemrAX", dest="AdSizeEA")]) add_insn("vmload", "svm_rax", modifiers=[0xDA]) add_insn("vmrun", "svm_rax", modifiers=[0xD8]) add_insn("vmsave", "svm_rax", modifiers=[0xDB]) ##################################################################### # VIA PadLock instructions ##################################################################### add_group("padlock", cpu=["PadLock"], modifiers=["Imm8", "PreAdd", "Op1Add"], prefix=0x00, opcode=[0x0F, 0x00], operands=[]) add_insn("xstore", "padlock", modifiers=[0xC0, 0x00, 0xA7]) add_insn("xstorerng", "padlock", modifiers=[0xC0, 0x00, 0xA7]) add_insn("xcryptecb", "padlock", modifiers=[0xC8, 0xF3, 0xA7]) add_insn("xcryptcbc", "padlock", modifiers=[0xD0, 0xF3, 0xA7]) add_insn("xcryptctr", "padlock", modifiers=[0xD8, 0xF3, 0xA7]) add_insn("xcryptcfb", "padlock", modifiers=[0xE0, 0xF3, 0xA7]) add_insn("xcryptofb", "padlock", modifiers=[0xE8, 0xF3, 0xA7]) add_insn("montmul", "padlock", modifiers=[0xC0, 0xF3, 0xA6]) add_insn("xsha1", "padlock", modifiers=[0xC8, 0xF3, 0xA6]) add_insn("xsha256", "padlock", modifiers=[0xD0, 0xF3, 0xA6]) ##################################################################### # Cyrix MMX instructions ##################################################################### add_group("cyrixmmx", cpu=["MMX", "Cyrix"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) add_insn("paddsiw", "cyrixmmx", modifiers=[0x51]) add_insn("paveb", "cyrixmmx", modifiers=[0x50]) add_insn("pdistib", "cyrixmmx", modifiers=[0x54]) add_insn("pmagw", "cyrixmmx", modifiers=[0x52]) add_insn("pmulhriw", "cyrixmmx", modifiers=[0x5D]) add_insn("pmulhrwc", "cyrixmmx", modifiers=[0x59]) add_insn("pmvgezb", "cyrixmmx", modifiers=[0x5C]) add_insn("pmvlzb", "cyrixmmx", modifiers=[0x5B]) add_insn("pmvnzb", "cyrixmmx", modifiers=[0x5A]) add_insn("pmvzb", "cyrixmmx", modifiers=[0x58]) add_insn("psubsiw", "cyrixmmx", modifiers=[0x55]) add_group("pmachriw", cpu=["MMX", "Cyrix"], opcode=[0x0F, 0x5E], operands=[Operand(type="SIMDReg", size=64, dest="Spare"), Operand(type="Mem", size=64, relaxed=True, dest="EA")]) add_insn("pmachriw", "pmachriw") ##################################################################### # Cyrix extensions ##################################################################### add_insn("smint", "twobyte", modifiers=[0x0F, 0x38], cpu=["686", "Cyrix"]) add_insn("smintold", "twobyte", modifiers=[0x0F, 0x7E], cpu=["486", "Cyrix", "Obs"]) add_group("rdwrshr", cpu=["Cyrix", "SMM", "686"], modifiers=["Op1Add"], opcode=[0x0F, 0x36], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_insn("rdshr", "rdwrshr", modifiers=[0x00]) add_insn("wrshr", "rdwrshr", modifiers=[0x01]) add_group("rsdc", cpu=["Cyrix", "SMM", "486"], opcode=[0x0F, 0x79], operands=[Operand(type="SegReg", size=16, relaxed=True, dest="Spare"), Operand(type="Mem", size=80, relaxed=True, dest="EA")]) add_insn("rsdc", "rsdc") add_group("cyrixsmm", cpu=["Cyrix", "SMM", "486"], modifiers=["Op1Add"], opcode=[0x0F, 0x00], operands=[Operand(type="Mem", size=80, relaxed=True, dest="EA")]) add_insn("rsldt", "cyrixsmm", modifiers=[0x7B]) add_insn("rsts", "cyrixsmm", modifiers=[0x7D]) add_insn("svldt", "cyrixsmm", modifiers=[0x7A]) add_insn("svts", "cyrixsmm", modifiers=[0x7C]) add_group("svdc", cpu=["Cyrix", "SMM", "486"], opcode=[0x0F, 0x78], operands=[Operand(type="Mem", size=80, relaxed=True, dest="EA"), Operand(type="SegReg", size=16, relaxed=True, dest="Spare")]) add_insn("svdc", "svdc") ##################################################################### # Obsolete/undocumented instructions ##################################################################### add_insn("fsetpm", "twobyte", modifiers=[0xDB, 0xE4], cpu=["286", "FPU", "Obs"]) add_insn("loadall", "twobyte", modifiers=[0x0F, 0x07], cpu=["386", "Undoc"]) add_insn("loadall286", "twobyte", modifiers=[0x0F, 0x05], cpu=["286", "Undoc"]) add_insn("salc", "onebyte", modifiers=[0xD6], cpu=["Undoc"], not64=True) add_insn("smi", "onebyte", modifiers=[0xF1], cpu=["386", "Undoc"]) add_group("ibts", cpu=["Undoc", "Obs", "386"], opersize=16, opcode=[0x0F, 0xA7], operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), Operand(type="Reg", size=16, dest="Spare")]) add_group("ibts", cpu=["Undoc", "Obs", "386"], opersize=32, opcode=[0x0F, 0xA7], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Reg", size=32, dest="Spare")]) add_insn("ibts", "ibts") add_group("umov", cpu=["Undoc", "386"], opcode=[0x0F, 0x10], operands=[Operand(type="RM", size=8, relaxed=True, dest="EA"), Operand(type="Reg", size=8, dest="Spare")]) add_group("umov", cpu=["Undoc", "386"], opersize=16, opcode=[0x0F, 0x11], operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), Operand(type="Reg", size=16, dest="Spare")]) add_group("umov", cpu=["Undoc", "386"], opersize=32, opcode=[0x0F, 0x11], operands=[Operand(type="RM", size=32, relaxed=True, dest="EA"), Operand(type="Reg", size=32, dest="Spare")]) add_group("umov", cpu=["Undoc", "386"], opcode=[0x0F, 0x12], operands=[Operand(type="Reg", size=8, dest="Spare"), Operand(type="RM", size=8, relaxed=True, dest="EA")]) add_group("umov", cpu=["Undoc", "386"], opersize=16, opcode=[0x0F, 0x13], operands=[Operand(type="Reg", size=16, dest="Spare"), Operand(type="RM", size=16, relaxed=True, dest="EA")]) add_group("umov", cpu=["Undoc", "386"], opersize=32, opcode=[0x0F, 0x13], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="RM", size=32, relaxed=True, dest="EA")]) add_insn("umov", "umov") add_group("xbts", cpu=["Undoc", "Obs", "386"], opersize=16, opcode=[0x0F, 0xA6], operands=[Operand(type="Reg", size=16, dest="Spare"), Operand(type="Mem", size=16, relaxed=True, dest="EA")]) add_group("xbts", cpu=["Undoc", "Obs", "386"], opersize=32, opcode=[0x0F, 0xA6], operands=[Operand(type="Reg", size=32, dest="Spare"), Operand(type="Mem", size=32, relaxed=True, dest="EA")]) add_insn("xbts", "xbts") finalize_insns() ##################################################################### # Prefixes ##################################################################### # operand size overrides for sz in [16, 32, 64]: add_prefix("o%d" % sz, "OPERSIZE", sz, parser="nasm", only64=(sz==64)) add_prefix("data%d" % sz, "OPERSIZE", sz, parser="gas", only64=(sz==64)) add_prefix("word", "OPERSIZE", 16, parser="gas") add_prefix("dword", "OPERSIZE", 32, parser="gas") add_prefix("qword", "OPERSIZE", 64, parser="gas", only64=True) # address size overrides for sz in [16, 32, 64]: add_prefix("a%d" % sz, "ADDRSIZE", sz, parser="nasm", only64=(sz==64)) add_prefix("addr%d" % sz, "ADDRSIZE", sz, parser="gas", only64=(sz==64)) add_prefix("aword", "ADDRSIZE", 16, parser="gas") add_prefix("adword", "ADDRSIZE", 32, parser="gas") add_prefix("aqword", "ADDRSIZE", 64, parser="gas", only64=True) # instruction prefixes add_prefix("lock", "LOCKREP", 0xF0) add_prefix("repne", "LOCKREP", 0xF2) add_prefix("repnz", "LOCKREP", 0xF2) add_prefix("rep", "LOCKREP", 0xF3) add_prefix("repe", "LOCKREP", 0xF3) add_prefix("repz", "LOCKREP", 0xF3) # other prefixes, limited to GAS-only at the moment # Hint taken/not taken for jumps add_prefix("ht", "SEGREG", 0x3E, parser="gas") add_prefix("hnt", "SEGREG", 0x2E, parser="gas") # REX byte explicit prefixes for val, suf in enumerate(["", "z", "y", "yz", "x", "xz", "xy", "xyz"]): add_prefix("rex" + suf, "REX", 0x40+val, parser="gas", only64=True) add_prefix("rex64" + suf, "REX", 0x48+val, parser="gas", only64=True) ##################################################################### # Output generation ##################################################################### out_dir = "" if len(sys.argv) > 1: out_dir = sys.argv[1] output_groups(file(os.path.join(out_dir, "x86insns.c"), "wt")) output_gas_insns(file(os.path.join(out_dir, "x86insn_gas.gperf"), "wt")) output_nasm_insns(file(os.path.join(out_dir, "x86insn_nasm.gperf"), "wt"))
bsd-3-clause
miguelfervi/SSBW-Restaurantes
restaurantes/lib/python2.7/site-packages/django/utils/_os.py
502
3581
from __future__ import unicode_literals import os import sys import tempfile from os.path import abspath, dirname, isabs, join, normcase, normpath, sep from django.core.exceptions import SuspiciousFileOperation from django.utils import six from django.utils.encoding import force_text if six.PY2: fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() # Under Python 2, define our own abspath function that can handle joining # unicode paths to a current working directory that has non-ASCII characters # in it. This isn't necessary on Windows since the Windows version of abspath # handles this correctly. It also handles drive letters differently than the # pure Python implementation, so it's best not to replace it. if six.PY3 or os.name == 'nt': abspathu = abspath else: def abspathu(path): """ Version of os.path.abspath that uses the unicode representation of the current working directory, thus avoiding a UnicodeDecodeError in join when the cwd has non-ASCII characters. """ if not isabs(path): path = join(os.getcwdu(), path) return normpath(path) def upath(path): """ Always return a unicode path. """ if six.PY2 and not isinstance(path, six.text_type): return path.decode(fs_encoding) return path def npath(path): """ Always return a native path, that is unicode on Python 3 and bytestring on Python 2. """ if six.PY2 and not isinstance(path, bytes): return path.encode(fs_encoding) return path def safe_join(base, *paths): """ Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised). """ base = force_text(base) paths = [force_text(p) for p in paths] final_path = abspathu(join(base, *paths)) base_path = abspathu(base) # Ensure final_path starts with base_path (using normcase to ensure we # don't false-negative on case insensitive operating systems like Windows), # further, one of the following conditions must be true: # a) The next character is the path separator (to prevent conditions like # safe_join("/dir", "/../d")) # b) The final path must be the same as the base path. # c) The base path must be the most root path (meaning either "/" or "C:\\") if (not normcase(final_path).startswith(normcase(base_path + sep)) and normcase(final_path) != normcase(base_path) and dirname(normcase(base_path)) != normcase(base_path)): raise SuspiciousFileOperation( 'The joined path ({}) is located outside of the base path ' 'component ({})'.format(final_path, base_path)) return final_path def symlinks_supported(): """ A function to check if creating symlinks are supported in the host platform and/or if they are allowed to be created (e.g. on Windows it requires admin permissions). """ tmpdir = tempfile.mkdtemp() original_path = os.path.join(tmpdir, 'original') symlink_path = os.path.join(tmpdir, 'symlink') os.makedirs(original_path) try: os.symlink(original_path, symlink_path) supported = True except (OSError, NotImplementedError, AttributeError): supported = False else: os.remove(symlink_path) finally: os.rmdir(original_path) os.rmdir(tmpdir) return supported
gpl-3.0
ufkapano/py-quats
src/qtools.py
1
1769
#!/usr/bin/python import math import random from quats import Quat # Rotates a vector with respect to a quat. # The vector is from R^3, the result is from R^3, rot_quat is a unit quat. def rotate1(vector, unit_quat): """Return the rotated vector (rot_quat used).""" # From a vector to a quaternion. vec_quat = Quat(0, vector[0], vector[1], vector[2]) # Rotate a vector. vec_quat = unit_quat * vec_quat * (~unit_quat) # Return a vector from R^3. return vec_quat.q[1:] # Rotates a vector angle 'radians' with respect to axis, # both are vectors but it uses quaternions. def rotate2(vector, axis, angle): """Return the rotated vector (axis and angle used).""" # Make a unit quat. unit_quat = Quat.rot_quat(axis, angle) return rotate1(vector, unit_quat) def rotate3(vector, phi, theta, psi): """Return the rotated vector (Euler angles used).""" unit_quat = Quat.rot_quat([0, 0, 1], phi) unit_quat *= Quat.rot_quat([0, 1, 0], theta) unit_quat *= Quat.rot_quat([0, 0, 1], psi) return rotate1(vector, unit_quat) def random_quat_uniax(): """Return a random rotation quat for uniaxial molecules.""" phi = random.uniform(0, 2*math.pi) ct = random.uniform(-1, 1) theta = math.acos(ct) # mozna tez -ct quat = Quat.rot_quat([0, 0, 1], phi) quat *= Quat.rot_quat([0, 1, 0], theta) return quat def random_quat_biax(): """Return a random rotation quat for biaxial molecules.""" phi = random.uniform(0, 2*math.pi) ct = random.uniform(-1, 1) theta = math.acos(ct) # mozna tez -ct psi = random.uniform(0, 2*math.pi) quat = Quat.rot_quat([0, 0, 1], phi) quat *= Quat.rot_quat([0, 1, 0], theta) quat *= Quat.rot_quat([0, 0, 1], psi) return quat # EOF
bsd-3-clause
Learningtribes/edx-platform
common/djangoapps/terrain/stubs/http.py
28
8643
""" Stub implementation of an HTTP service. """ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler import urllib import urlparse import threading import json from functools import wraps from lazy import lazy from logging import getLogger LOGGER = getLogger(__name__) def require_params(method, *required_keys): """ Decorator to ensure that the method has all the required parameters. Example: @require_params('GET', 'id', 'state') def handle_request(self): # .... would send a 400 response if no GET parameters were specified for 'id' or 'state' (or if those parameters had empty values). The wrapped function should be a method of a `StubHttpRequestHandler` subclass. Currently, "GET" and "POST" are the only supported methods. """ def decorator(func): @wraps(func) def wrapper(self, *args, **kwargs): # Read either GET querystring params or POST dict params if method == "GET": params = self.get_params elif method == "POST": params = self.post_dict else: raise ValueError("Unsupported method '{method}'".format(method=method)) # Check for required values missing = [] for key in required_keys: if params.get(key) is None: missing.append(key) if len(missing) > 0: msg = "Missing required key(s) {keys}".format(keys=",".join(missing)) self.send_response(400, content=msg, headers={'Content-type': 'text/plain'}) # If nothing is missing, execute the function as usual else: return func(self, *args, **kwargs) return wrapper return decorator class StubHttpRequestHandler(BaseHTTPRequestHandler, object): """ Handler for the stub HTTP service. """ protocol = "HTTP/1.0" def log_message(self, format_str, *args): """ Redirect messages to keep the test console clean. """ LOGGER.debug(self._format_msg(format_str, *args)) def log_error(self, format_str, *args): """ Helper to log a server error. """ LOGGER.error(self._format_msg(format_str, *args)) @lazy def request_content(self): """ Retrieve the content of the request. """ try: length = int(self.headers.getheader('content-length')) except (TypeError, ValueError): return "" else: return self.rfile.read(length) @lazy def post_dict(self): """ Retrieve the request POST parameters from the client as a dictionary. If no POST parameters can be interpreted, return an empty dict. """ contents = self.request_content # The POST dict will contain a list of values for each key. # None of our parameters are lists, however, so we map [val] --> val # If the list contains multiple entries, we pick the first one try: post_dict = urlparse.parse_qs(contents, keep_blank_values=True) return { key: list_val[0] for key, list_val in post_dict.items() } except: return dict() @lazy def get_params(self): """ Return the GET parameters (querystring in the URL). """ query = urlparse.urlparse(self.path).query # By default, `parse_qs` returns a list of values for each param # For convenience, we replace lists of 1 element with just the element return { key: value[0] if len(value) == 1 else value for key, value in urlparse.parse_qs(query).items() } @lazy def path_only(self): """ Return the URL path without GET parameters. Removes the trailing slash if there is one. """ path = urlparse.urlparse(self.path).path if path.endswith('/'): return path[:-1] else: return path def do_PUT(self): """ Allow callers to configure the stub server using the /set_config URL. The request should have POST data, such that: Each POST parameter is the configuration key. Each POST value is a JSON-encoded string value for the configuration. """ if self.path == "/set_config" or self.path == "/set_config/": if len(self.post_dict) > 0: for key, value in self.post_dict.iteritems(): # Decode the params as UTF-8 try: key = unicode(key, 'utf-8') value = unicode(value, 'utf-8') except UnicodeDecodeError: self.log_message("Could not decode request params as UTF-8") self.log_message(u"Set config '{0}' to '{1}'".format(key, value)) try: value = json.loads(value) except ValueError: self.log_message(u"Could not parse JSON: {0}".format(value)) self.send_response(400) else: self.server.config[key] = value self.send_response(200) # No parameters sent to configure, so return success by default else: self.send_response(200) else: self.send_response(404) def send_response(self, status_code, content=None, headers=None): """ Send a response back to the client with the HTTP `status_code` (int), `content` (str) and `headers` (dict). """ self.log_message( "Sent HTTP response: {0} with content '{1}' and headers {2}".format(status_code, content, headers) ) if headers is None: headers = { 'Access-Control-Allow-Origin': "*", } BaseHTTPRequestHandler.send_response(self, status_code) for (key, value) in headers.items(): self.send_header(key, value) if len(headers) > 0: self.end_headers() if content is not None: self.wfile.write(content) def send_json_response(self, content): """ Send a response with status code 200, the given content serialized as JSON, and the Content-Type header set appropriately """ self.send_response(200, json.dumps(content), {"Content-Type": "application/json"}) def _format_msg(self, format_str, *args): """ Format message for logging. `format_str` is a string with old-style Python format escaping; `args` is an array of values to fill into the string. """ if not args: format_str = urllib.unquote(format_str) return u"{0} - - [{1}] {2}\n".format( self.client_address[0], self.log_date_time_string(), format_str % args ) def do_HEAD(self): """ Respond to an HTTP HEAD request """ self.send_response(200) class StubHttpService(HTTPServer, object): """ Stub HTTP service implementation. """ # Subclasses override this to provide the handler class to use. # Should be a subclass of `StubHttpRequestHandler` HANDLER_CLASS = StubHttpRequestHandler def __init__(self, port_num=0): """ Configure the server to listen on localhost. Default is to choose an arbitrary open port. """ address = ('0.0.0.0', port_num) HTTPServer.__init__(self, address, self.HANDLER_CLASS) # Create a dict to store configuration values set by the client self.config = dict() # Start the server in a separate thread server_thread = threading.Thread(target=self.serve_forever) server_thread.daemon = True server_thread.start() # Log the port we're using to help identify port conflict errors LOGGER.debug('Starting service on port {0}'.format(self.port)) def shutdown(self): """ Stop the server and free up the port """ # First call superclass shutdown() HTTPServer.shutdown(self) # We also need to manually close the socket self.socket.close() @property def port(self): """ Return the port that the service is listening on. """ _, port = self.server_address return port
agpl-3.0
ToBaer94/PygameTowerDefense
level.py
1
4685
import pygame as pg import tilerenderer from tower import Tower, CannonTower, ExplosiveTower, FireTower, SlowTower, MultiTower, LaserTower, CrescentTower from trap import Mine from creep import Creep, Worm, Behemoth, SwiftWalker from os import path, pardir import random Vector = pg.math.Vector2 CREEP = 0 WORM = 1 BEHEMOTH = 2 SWIFTWALKER = 3 class Level(object): def __init__(self, level_dir, waves, money): self.tmx_file = path.join(level_dir) self.tile_renderer = tilerenderer.Renderer(self.tmx_file) self.map_surface = self.tile_renderer.make_map() self.map_rect = self.map_surface.get_rect() self.tower_group = pg.sprite.Group() self.trap_group = pg.sprite.Group() self.creep_group = pg.sprite.Group() self.bullet_group = pg.sprite.Group() self.beam_group = pg.sprite.Group() self.all_towers = [Tower, CannonTower, ExplosiveTower, FireTower, SlowTower, MultiTower, LaserTower, CrescentTower] self.trap_list = [Mine] """ self.start_pos = None self.end_pos = None for x in range(self.tile_renderer.tmx_data.width): for y in range(self.tile_renderer.tmx_data.height): proper = self.tile_renderer.tmx_data.get_tile_properties(x, y, 0) if "start" in proper: self.start_pos = (x, y) if "end" in proper: self.end_pos = (x, y) """ self.spawner = Spawner(self) self.waves = waves self.wave_number = 0 self.current_wave = self.waves[self.wave_number] self.current_spawn = 0 self.last_spawn = pg.time.get_ticks() self.money = money self.earned_money = 0 self.killed_creeps = 0 self.creep_path = [] self.game_over = False self.beaten = False def update(self, dt): # print self.current_spawn, self.current_wave can_spawn = False for wave in self.current_wave: if len(wave) > self.current_spawn: can_spawn = True break if can_spawn: now = pg.time.get_ticks() if now - self.last_spawn > 1500: self.last_spawn = now for lane, wave in enumerate(self.current_wave): try: enemy = wave[self.current_spawn] self.spawner.spawn_enemy(enemy, lane) except IndexError: print "end of wave, technically." print "wave length differs between lanes" self.current_spawn += 1 self.tower_group.update(dt) self.trap_group.update(dt) self.creep_group.update(dt) self.bullet_group.update(dt) self.beam_group.update(dt) if not can_spawn and self.creep_group.__len__() == 0: print "end of wave" if self.wave_number < len(self.waves) - 1: self.wave_number += 1 self.current_wave = self.waves[self.wave_number] self.current_spawn = 0 else: self.beaten = True def draw(self, screen): screen.blit(self.map_surface, (0, 0)) self.tower_group.draw(screen) self.trap_group.draw(screen) self.creep_group.draw(screen) self.bullet_group.draw(screen) #for bullet in self.bullet_group: #bullet.draw_debug(screen) self.beam_group.draw(screen) for creep in self.creep_group: creep.draw_ui(screen) creep.draw_debug(screen) #self.debug_beam(screen) def debug_beam(self, screen): for cir in self.beam_group: for cle in cir.circle_list: pos_x = int(cle[0].x) pos_y = int(cle[0].y) pg.draw.circle(screen, pg.Color("black"), (pos_x, pos_y), cle[1], 1) class Spawner(object): def __init__(self, level): self.level = level def spawn_enemy(self, enemy, lane): print lane path = random.randint(0, len(self.level.creep_path) - 1) if enemy == CREEP: creep = Creep(self.level, self.level.creep_path[lane]) elif enemy == WORM: creep = Worm(self.level, self.level.creep_path[lane]) elif enemy == BEHEMOTH: creep = Behemoth(self.level, self.level.creep_path[lane]) elif enemy == SWIFTWALKER: creep = SwiftWalker(self.level, self.level.creep_path[lane]) else: print enemy, " is not a defined creep" self.level.creep_group.add(creep)
lgpl-3.0
eul-721/The-Perfect-Pokemon-Team-Balancer
libs/env/Lib/distutils/__init__.py
1211
3983
import os import sys import warnings import imp import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib # Important! To work on pypy, this must be a module that resides in the # lib-python/modified-x.y.z directory dirname = os.path.dirname distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils') if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)): warnings.warn( "The virtualenv distutils package at %s appears to be in the same location as the system distutils?") else: __path__.insert(0, distutils_path) real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY)) # Copy the relevant attributes try: __revision__ = real_distutils.__revision__ except AttributeError: pass __version__ = real_distutils.__version__ from distutils import dist, sysconfig try: basestring except NameError: basestring = str ## patch build_ext (distutils doesn't know how to get the libs directory ## path on windows - it hardcodes the paths around the patched sys.prefix) if sys.platform == 'win32': from distutils.command.build_ext import build_ext as old_build_ext class build_ext(old_build_ext): def finalize_options (self): if self.library_dirs is None: self.library_dirs = [] elif isinstance(self.library_dirs, basestring): self.library_dirs = self.library_dirs.split(os.pathsep) self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs")) old_build_ext.finalize_options(self) from distutils.command import build_ext as build_ext_module build_ext_module.build_ext = build_ext ## distutils.dist patches: old_find_config_files = dist.Distribution.find_config_files def find_config_files(self): found = old_find_config_files(self) system_distutils = os.path.join(distutils_path, 'distutils.cfg') #if os.path.exists(system_distutils): # found.insert(0, system_distutils) # What to call the per-user config file if os.name == 'posix': user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" user_filename = os.path.join(sys.prefix, user_filename) if os.path.isfile(user_filename): for item in list(found): if item.endswith('pydistutils.cfg'): found.remove(item) found.append(user_filename) return found dist.Distribution.find_config_files = find_config_files ## distutils.sysconfig patches: old_get_python_inc = sysconfig.get_python_inc def sysconfig_get_python_inc(plat_specific=0, prefix=None): if prefix is None: prefix = sys.real_prefix return old_get_python_inc(plat_specific, prefix) sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__ sysconfig.get_python_inc = sysconfig_get_python_inc old_get_python_lib = sysconfig.get_python_lib def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None): if standard_lib and prefix is None: prefix = sys.real_prefix return old_get_python_lib(plat_specific, standard_lib, prefix) sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__ sysconfig.get_python_lib = sysconfig_get_python_lib old_get_config_vars = sysconfig.get_config_vars def sysconfig_get_config_vars(*args): real_vars = old_get_config_vars(*args) if sys.platform == 'win32': lib_dir = os.path.join(sys.real_prefix, "libs") if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars: real_vars['LIBDIR'] = lib_dir # asked for all elif isinstance(real_vars, list) and 'LIBDIR' in args: real_vars = real_vars + [lib_dir] # asked for list return real_vars sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__ sysconfig.get_config_vars = sysconfig_get_config_vars
gpl-2.0
AlanCoding/tower-cli
tower_cli/utils/__init__.py
2
1424
# Copyright 2015, Ansible, Inc. # Luke Sneeringer <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import click from tower_cli.conf import settings from tower_cli import exceptions # NOQA @functools.wraps(click.secho) def secho(message, **kwargs): """A wrapper around click.secho that disables any coloring being used if colors have been disabled. """ # If colors are disabled, remove any color or other style data # from keyword arguments. if not settings.color: for key in ('fg', 'bg', 'bold', 'blink'): kwargs.pop(key, None) # Okay, now call click.secho normally. return click.secho(message, **kwargs) def supports_oauth(): # Import here to avoid a circular import from tower_cli.api import client try: resp = client.head('/o/') except exceptions.NotFound: return False return resp.ok
apache-2.0
sssemil/cjdns
node_build/dependencies/libuv/build/gyp/test/no-cpp/gyptest-no-cpp.py
47
1500
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks that C-only targets aren't linked against libstdc++. """ import TestGyp import re import subprocess import sys # set |match| to ignore build stderr output. test = TestGyp.TestGyp(match = lambda a, b: True) if sys.platform != 'win32' and test.format not in ('make', 'android'): # TODO: This doesn't pass with make. # TODO: Does a test like this make sense with Windows? Android? CHDIR = 'src' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', 'no_cpp', chdir=CHDIR) def LinksLibStdCpp(path): path = test.built_file_path(path, chdir=CHDIR) if sys.platform == 'darwin': proc = subprocess.Popen(['otool', '-L', path], stdout=subprocess.PIPE) else: proc = subprocess.Popen(['ldd', path], stdout=subprocess.PIPE) output = proc.communicate()[0] assert not proc.returncode return 'libstdc++' in output or 'libc++' in output if LinksLibStdCpp('no_cpp'): test.fail_test() build_error_code = { 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`) 'make': 2, 'ninja': 1, 'cmake': 0, # CMake picks the compiler driver based on transitive checks. 'xcode-ninja': [1, 65], }[test.format] test.build('test.gyp', 'no_cpp_dep_on_cc_lib', chdir=CHDIR, status=build_error_code) test.pass_test()
gpl-3.0
cntnboys/410Lab6
build/django/build/lib.linux-x86_64-2.7/django/db/migrations/questioner.py
119
7690
from __future__ import print_function, unicode_literals import importlib import os import sys from django.apps import apps from django.db.models.fields import NOT_PROVIDED from django.utils import datetime_safe, six, timezone from django.utils.six.moves import input from .loader import MIGRATIONS_MODULE_NAME class MigrationQuestioner(object): """ Gives the autodetector responses to questions it might have. This base class has a built-in noninteractive mode, but the interactive subclass is what the command-line arguments will use. """ def __init__(self, defaults=None, specified_apps=None, dry_run=None): self.defaults = defaults or {} self.specified_apps = specified_apps or set() self.dry_run = dry_run def ask_initial(self, app_label): "Should we create an initial migration for the app?" # If it was specified on the command line, definitely true if app_label in self.specified_apps: return True # Otherwise, we look to see if it has a migrations module # without any Python files in it, apart from __init__.py. # Apps from the new app template will have these; the python # file check will ensure we skip South ones. try: app_config = apps.get_app_config(app_label) except LookupError: # It's a fake app. return self.defaults.get("ask_initial", False) migrations_import_path = "%s.%s" % (app_config.name, MIGRATIONS_MODULE_NAME) try: migrations_module = importlib.import_module(migrations_import_path) except ImportError: return self.defaults.get("ask_initial", False) else: if hasattr(migrations_module, "__file__"): filenames = os.listdir(os.path.dirname(migrations_module.__file__)) elif hasattr(migrations_module, "__path__"): if len(migrations_module.__path__) > 1: return False filenames = os.listdir(list(migrations_module.__path__)[0]) return not any(x.endswith(".py") for x in filenames if x != "__init__.py") def ask_not_null_addition(self, field_name, model_name): "Adding a NOT NULL field to a model" # None means quit return None def ask_not_null_alteration(self, field_name, model_name): "Changing a NULL field to NOT NULL" # None means quit return None def ask_rename(self, model_name, old_name, new_name, field_instance): "Was this field really renamed?" return self.defaults.get("ask_rename", False) def ask_rename_model(self, old_model_state, new_model_state): "Was this model really renamed?" return self.defaults.get("ask_rename_model", False) def ask_merge(self, app_label): "Do you really want to merge these migrations?" return self.defaults.get("ask_merge", False) class InteractiveMigrationQuestioner(MigrationQuestioner): def _boolean_input(self, question, default=None): result = input("%s " % question) if not result and default is not None: return default while len(result) < 1 or result[0].lower() not in "yn": result = input("Please answer yes or no: ") return result[0].lower() == "y" def _choice_input(self, question, choices): print(question) for i, choice in enumerate(choices): print(" %s) %s" % (i + 1, choice)) result = input("Select an option: ") while True: try: value = int(result) if 0 < value <= len(choices): return value except ValueError: pass result = input("Please select a valid option: ") def _ask_default(self): print("Please enter the default value now, as valid Python") print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now()") while True: if six.PY3: # Six does not correctly abstract over the fact that # py3 input returns a unicode string, while py2 raw_input # returns a bytestring. code = input(">>> ") else: code = input(">>> ").decode(sys.stdin.encoding) if not code: print("Please enter some code, or 'exit' (with no quotes) to exit.") elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {"datetime": datetime_safe, "timezone": timezone}) except (SyntaxError, NameError) as e: print("Invalid input: %s" % e) def ask_not_null_addition(self, field_name, model_name): "Adding a NOT NULL field to a model" if not self.dry_run: choice = self._choice_input( "You are trying to add a non-nullable field '%s' to %s without a default; " "we can't do that (the database needs something to populate existing rows).\n" "Please select a fix:" % (field_name, model_name), [ "Provide a one-off default now (will be set on all existing rows)", "Quit, and let me add a default in models.py", ] ) if choice == 2: sys.exit(3) else: return self._ask_default() return None def ask_not_null_alteration(self, field_name, model_name): "Changing a NULL field to NOT NULL" if not self.dry_run: choice = self._choice_input( "You are trying to change the nullable field '%s' on %s to non-nullable " "without a default; we can't do that (the database needs something to " "populate existing rows).\n" "Please select a fix:" % (field_name, model_name), [ "Provide a one-off default now (will be set on all existing rows)", ("Ignore for now, and let me handle existing rows with NULL myself " "(e.g. adding a RunPython or RunSQL operation in the new migration " "file before the AlterField operation)"), "Quit, and let me add a default in models.py", ] ) if choice == 2: return NOT_PROVIDED elif choice == 3: sys.exit(3) else: return self._ask_default() return None def ask_rename(self, model_name, old_name, new_name, field_instance): "Was this field really renamed?" msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]" return self._boolean_input(msg % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False) def ask_rename_model(self, old_model_state, new_model_state): "Was this model really renamed?" msg = "Did you rename the %s.%s model to %s? [y/N]" return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name, new_model_state.name), False) def ask_merge(self, app_label): return self._boolean_input( "\nMerging will only work if the operations printed above do not conflict\n" + "with each other (working on different fields or models)\n" + "Do you want to merge these migration branches? [y/N]", False, )
apache-2.0
Sidney84/pa-chromium
remoting/tools/zip2msi.py
76
8258
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates .msi from a .zip archive or an unpacked directory. The structure of the input archive or directory should look like this: +- archive.zip +- archive +- parameters.json The name of the archive and the top level directory in the archive must match. When an unpacked directory is used as the input "archive.zip/archive" should be passed via the command line. 'parameters.json' specifies the parameters to be passed to candle/light and must have the following structure: { "defines": { "name": "value" }, "extensions": [ "WixFirewallExtension.dll" ], "switches": [ '-nologo' ], "source": "chromoting.wxs", "bind_path": "files", "sign": [ ... ], "candle": { ... }, "light": { ... } } "source" specifies the name of the input .wxs relative to "archive.zip/archive". "bind_path" specifies the path where to look for binary files referenced by .wxs relative to "archive.zip/archive". This script is used for both building Chromoting Host installation during Chromuim build and for signing Chromoting Host installation later. There are two copies of this script because of that: - one in Chromium tree at src/remoting/tools/zip2msi.py. - another one next to the signing scripts. The copies of the script can be out of sync so make sure that a newer version is compatible with the older ones when updating the script. """ import copy import json from optparse import OptionParser import os import re import subprocess import sys import zipfile def UnpackZip(target, source): """Unpacks |source| archive to |target| directory.""" target = os.path.normpath(target) archive = zipfile.ZipFile(source, 'r') for f in archive.namelist(): target_file = os.path.normpath(os.path.join(target, f)) # Sanity check to make sure .zip uses relative paths. if os.path.commonprefix([target_file, target]) != target: print "Failed to unpack '%s': '%s' is not under '%s'" % ( source, target_file, target) return 1 # Create intermediate directories. target_dir = os.path.dirname(target_file) if not os.path.exists(target_dir): os.makedirs(target_dir) archive.extract(f, target) return 0 def Merge(left, right): """Merges two values. Raises: TypeError: |left| and |right| cannot be merged. Returns: - if both |left| and |right| are dictionaries, they are merged recursively. - if both |left| and |right| are lists, the result is a list containing elements from both lists. - if both |left| and |right| are simple value, |right| is returned. - |TypeError| exception is raised if a dictionary or a list are merged with a non-dictionary or non-list correspondingly. """ if isinstance(left, dict): if isinstance(right, dict): retval = copy.copy(left) for key, value in right.iteritems(): if key in retval: retval[key] = Merge(retval[key], value) else: retval[key] = value return retval else: raise TypeError('Error: merging a dictionary and non-dictionary value') elif isinstance(left, list): if isinstance(right, list): return left + right else: raise TypeError('Error: merging a list and non-list value') else: if isinstance(right, dict): raise TypeError('Error: merging a dictionary and non-dictionary value') elif isinstance(right, list): raise TypeError('Error: merging a dictionary and non-dictionary value') else: return right quote_matcher_regex = re.compile(r'\s|"') quote_replacer_regex = re.compile(r'(\\*)"') def QuoteArgument(arg): """Escapes a Windows command-line argument. So that the Win32 CommandLineToArgv function will turn the escaped result back into the original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx ("Parsing C++ Command-Line Arguments") to understand why we have to do this. Args: arg: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a literal quote, CommandLineToArgv requires an odd number of # backslashes preceding it, and it produces half as many literal backslashes # (rounded down). So we need to produce 2n+1 backslashes. return 2 * match.group(1) + '\\"' if re.search(quote_matcher_regex, arg): # Escape all quotes so that they are interpreted literally. arg = quote_replacer_regex.sub(_Replace, arg) # Now add unescaped quotes so that any whitespace is interpreted literally. return '"' + arg + '"' else: return arg def GenerateCommandLine(tool, source, dest, parameters): """Generates the command line for |tool|.""" # Merge/apply tool-specific parameters params = copy.copy(parameters) if tool in parameters: params = Merge(params, params[tool]) wix_path = os.path.normpath(params.get('wix_path', '')) switches = [os.path.join(wix_path, tool), '-nologo'] # Append the list of defines and extensions to the command line switches. for name, value in params.get('defines', {}).iteritems(): switches.append('-d%s=%s' % (name, value)) for ext in params.get('extensions', []): switches += ('-ext', os.path.join(wix_path, ext)) # Append raw switches switches += params.get('switches', []) # Append the input and output files switches += ('-out', dest, source) # Generate the actual command line #return ' '.join(map(QuoteArgument, switches)) return switches def Run(args): """Runs a command interpreting the passed |args| as a command line.""" command = ' '.join(map(QuoteArgument, args)) popen = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() if popen.returncode: print command for line in out.splitlines(): print line print '%s returned %d' % (args[0], popen.returncode) return popen.returncode def GenerateMsi(target, source, parameters): """Generates .msi from the installation files prepared by Chromium build.""" parameters['basename'] = os.path.splitext(os.path.basename(source))[0] # The script can handle both forms of input a directory with unpacked files or # a ZIP archive with the same files. In the latter case the archive should be # unpacked to the intermediate directory. source_dir = None if os.path.isdir(source): # Just use unpacked files from the supplied directory. source_dir = source else: # Unpack .zip rc = UnpackZip(parameters['intermediate_dir'], source) if rc != 0: return rc source_dir = '%(intermediate_dir)s\\%(basename)s' % parameters # Read parameters from 'parameters.json'. f = open(os.path.join(source_dir, 'parameters.json')) parameters = Merge(json.load(f), parameters) f.close() if 'source' not in parameters: print 'The source .wxs is not specified' return 1 if 'bind_path' not in parameters: print 'The binding path is not specified' return 1 wxs = os.path.join(source_dir, parameters['source']) # Add the binding path to the light-specific parameters. bind_path = os.path.join(source_dir, parameters['bind_path']) parameters = Merge(parameters, {'light': {'switches': ['-b', bind_path]}}) # Run candle and light to generate the installation. wixobj = '%(intermediate_dir)s\\%(basename)s.wixobj' % parameters args = GenerateCommandLine('candle', wxs, wixobj, parameters) rc = Run(args) if rc: return rc args = GenerateCommandLine('light', wixobj, target, parameters) rc = Run(args) if rc: return rc return 0 def main(): usage = 'Usage: zip2msi [options] <input.zip> <output.msi>' parser = OptionParser(usage=usage) parser.add_option('--intermediate_dir', dest='intermediate_dir', default='.') parser.add_option('--wix_path', dest='wix_path', default='.') options, args = parser.parse_args() if len(args) != 2: parser.error('two positional arguments expected') return GenerateMsi(args[1], args[0], dict(options.__dict__)) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
seales/PHINet
PHINetCloud/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
292
114315
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from compiler.ast import Const from compiler.ast import Dict from compiler.ast import Discard from compiler.ast import List from compiler.ast import Module from compiler.ast import Node from compiler.ast import Stmt import compiler import gyp.common import gyp.simple_copy import multiprocessing import optparse import os.path import re import shlex import signal import subprocess import sys import threading import time import traceback from gyp.common import GypError from gyp.common import OrderedSet # A list of types that are treated as linkable. linkable_types = ['executable', 'shared_library', 'loadable_module'] # A list of sections that contain links to other targets. dependency_sections = ['dependencies', 'export_dependent_settings'] # base_path_sections is a list of sections defined by GYP that contain # pathnames. The generators can provide more keys, the two lists are merged # into path_sections, but you should call IsPathSection instead of using either # list directly. base_path_sections = [ 'destination', 'files', 'include_dirs', 'inputs', 'libraries', 'outputs', 'sources', ] path_sections = set() # These per-process dictionaries are used to cache build file data when loading # in parallel mode. per_process_data = {} per_process_aux_data = {} def IsPathSection(section): # If section ends in one of the '=+?!' characters, it's applied to a section # without the trailing characters. '/' is notably absent from this list, # because there's no way for a regular expression to be treated as a path. while section[-1:] in '=+?!': section = section[:-1] if section in path_sections: return True # Sections mathing the regexp '_(dir|file|path)s?$' are also # considered PathSections. Using manual string matching since that # is much faster than the regexp and this can be called hundreds of # thousands of times so micro performance matters. if "_" in section: tail = section[-6:] if tail[-1] == 's': tail = tail[:-1] if tail[-5:] in ('_file', '_path'): return True return tail[-4:] == '_dir' return False # base_non_configuration_keys is a list of key names that belong in the target # itself and should not be propagated into its configurations. It is merged # with a list that can come from the generator to # create non_configuration_keys. base_non_configuration_keys = [ # Sections that must exist inside targets and not configurations. 'actions', 'configurations', 'copies', 'default_configuration', 'dependencies', 'dependencies_original', 'libraries', 'postbuilds', 'product_dir', 'product_extension', 'product_name', 'product_prefix', 'rules', 'run_as', 'sources', 'standalone_static_library', 'suppress_wildcard', 'target_name', 'toolset', 'toolsets', 'type', # Sections that can be found inside targets or configurations, but that # should not be propagated from targets into their configurations. 'variables', ] non_configuration_keys = [] # Keys that do not belong inside a configuration dictionary. invalid_configuration_keys = [ 'actions', 'all_dependent_settings', 'configurations', 'dependencies', 'direct_dependent_settings', 'libraries', 'link_settings', 'sources', 'standalone_static_library', 'target_name', 'type', ] # Controls whether or not the generator supports multiple toolsets. multiple_toolsets = False # Paths for converting filelist paths to output paths: { # toplevel, # qualified_output_dir, # } generator_filelist_paths = None def GetIncludedBuildFiles(build_file_path, aux_data, included=None): """Return a list of all build files included into build_file_path. The returned list will contain build_file_path as well as all other files that it included, either directly or indirectly. Note that the list may contain files that were included into a conditional section that evaluated to false and was not merged into build_file_path's dict. aux_data is a dict containing a key for each build file or included build file. Those keys provide access to dicts whose "included" keys contain lists of all other files included by the build file. included should be left at its default None value by external callers. It is used for recursion. The returned list will not contain any duplicate entries. Each build file in the list will be relative to the current directory. """ if included == None: included = [] if build_file_path in included: return included included.append(build_file_path) for included_build_file in aux_data[build_file_path].get('included', []): GetIncludedBuildFiles(included_build_file, aux_data, included) return included def CheckedEval(file_contents): """Return the eval of a gyp file. The gyp file is restricted to dictionaries and lists only, and repeated keys are not allowed. Note that this is slower than eval() is. """ ast = compiler.parse(file_contents) assert isinstance(ast, Module) c1 = ast.getChildren() assert c1[0] is None assert isinstance(c1[1], Stmt) c2 = c1[1].getChildren() assert isinstance(c2[0], Discard) c3 = c2[0].getChildren() assert len(c3) == 1 return CheckNode(c3[0], []) def CheckNode(node, keypath): if isinstance(node, Dict): c = node.getChildren() dict = {} for n in range(0, len(c), 2): assert isinstance(c[n], Const) key = c[n].getChildren()[0] if key in dict: raise GypError("Key '" + key + "' repeated at level " + repr(len(keypath) + 1) + " with key path '" + '.'.join(keypath) + "'") kp = list(keypath) # Make a copy of the list for descending this node. kp.append(key) dict[key] = CheckNode(c[n + 1], kp) return dict elif isinstance(node, List): c = node.getChildren() children = [] for index, child in enumerate(c): kp = list(keypath) # Copy list. kp.append(repr(index)) children.append(CheckNode(child, kp)) return children elif isinstance(node, Const): return node.getChildren()[0] else: raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) + "': " + repr(node)) def LoadOneBuildFile(build_file_path, data, aux_data, includes, is_target, check): if build_file_path in data: return data[build_file_path] if os.path.exists(build_file_path): build_file_contents = open(build_file_path).read() else: raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd())) build_file_data = None try: if check: build_file_data = CheckedEval(build_file_contents) else: build_file_data = eval(build_file_contents, {'__builtins__': None}, None) except SyntaxError, e: e.filename = build_file_path raise except Exception, e: gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path) raise if type(build_file_data) is not dict: raise GypError("%s does not evaluate to a dictionary." % build_file_path) data[build_file_path] = build_file_data aux_data[build_file_path] = {} # Scan for includes and merge them in. if ('skip_includes' not in build_file_data or not build_file_data['skip_includes']): try: if is_target: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, includes, check) else: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, None, check) except Exception, e: gyp.common.ExceptionAppend(e, 'while reading includes of ' + build_file_path) raise return build_file_data def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data, includes, check): includes_list = [] if includes != None: includes_list.extend(includes) if 'includes' in subdict: for include in subdict['includes']: # "include" is specified relative to subdict_path, so compute the real # path to include by appending the provided "include" to the directory # in which subdict_path resides. relative_include = \ os.path.normpath(os.path.join(os.path.dirname(subdict_path), include)) includes_list.append(relative_include) # Unhook the includes list, it's no longer needed. del subdict['includes'] # Merge in the included files. for include in includes_list: if not 'included' in aux_data[subdict_path]: aux_data[subdict_path]['included'] = [] aux_data[subdict_path]['included'].append(include) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include) MergeDicts(subdict, LoadOneBuildFile(include, data, aux_data, None, False, check), subdict_path, include) # Recurse into subdictionaries. for k, v in subdict.iteritems(): if type(v) is dict: LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, None, check) elif type(v) is list: LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, check) # This recurses into lists so that it can look for dicts. def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check): for item in sublist: if type(item) is dict: LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data, None, check) elif type(item) is list: LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check) # Processes toolsets in all the targets. This recurses into condition entries # since they can contain toolsets as well. def ProcessToolsetsInDict(data): if 'targets' in data: target_list = data['targets'] new_target_list = [] for target in target_list: # If this target already has an explicit 'toolset', and no 'toolsets' # list, don't modify it further. if 'toolset' in target and 'toolsets' not in target: new_target_list.append(target) continue if multiple_toolsets: toolsets = target.get('toolsets', ['target']) else: toolsets = ['target'] # Make sure this 'toolsets' definition is only processed once. if 'toolsets' in target: del target['toolsets'] if len(toolsets) > 0: # Optimization: only do copies if more than one toolset is specified. for build in toolsets[1:]: new_target = gyp.simple_copy.deepcopy(target) new_target['toolset'] = build new_target_list.append(new_target) target['toolset'] = toolsets[0] new_target_list.append(target) data['targets'] = new_target_list if 'conditions' in data: for condition in data['conditions']: if type(condition) is list: for condition_dict in condition[1:]: if type(condition_dict) is dict: ProcessToolsetsInDict(condition_dict) # TODO(mark): I don't love this name. It just means that it's going to load # a build file that contains targets and is expected to provide a targets dict # that contains the targets... def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes, depth, check, load_dependencies): # If depth is set, predefine the DEPTH variable to be a relative path from # this build file's directory to the directory identified by depth. if depth: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path)) if d == '': variables['DEPTH'] = '.' else: variables['DEPTH'] = d.replace('\\', '/') # The 'target_build_files' key is only set when loading target build files in # the non-parallel code path, where LoadTargetBuildFile is called # recursively. In the parallel code path, we don't need to check whether the # |build_file_path| has already been loaded, because the 'scheduled' set in # ParallelState guarantees that we never load the same |build_file_path| # twice. if 'target_build_files' in data: if build_file_path in data['target_build_files']: # Already loaded. return False data['target_build_files'].add(build_file_path) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Target Build File '%s'", build_file_path) build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, includes, True, check) # Store DEPTH for later use in generators. build_file_data['_DEPTH'] = depth # Set up the included_files key indicating which .gyp files contributed to # this target dict. if 'included_files' in build_file_data: raise GypError(build_file_path + ' must not contain included_files key') included = GetIncludedBuildFiles(build_file_path, aux_data) build_file_data['included_files'] = [] for included_file in included: # included_file is relative to the current directory, but it needs to # be made relative to build_file_path's directory. included_relative = \ gyp.common.RelativePath(included_file, os.path.dirname(build_file_path)) build_file_data['included_files'].append(included_relative) # Do a first round of toolsets expansion so that conditions can be defined # per toolset. ProcessToolsetsInDict(build_file_data) # Apply "pre"/"early" variable expansions and condition evaluations. ProcessVariablesAndConditionsInDict( build_file_data, PHASE_EARLY, variables, build_file_path) # Since some toolsets might have been defined conditionally, perform # a second round of toolsets expansion now. ProcessToolsetsInDict(build_file_data) # Look at each project's target_defaults dict, and merge settings into # targets. if 'target_defaults' in build_file_data: if 'targets' not in build_file_data: raise GypError("Unable to find targets in build file %s" % build_file_path) index = 0 while index < len(build_file_data['targets']): # This procedure needs to give the impression that target_defaults is # used as defaults, and the individual targets inherit from that. # The individual targets need to be merged into the defaults. Make # a deep copy of the defaults for each target, merge the target dict # as found in the input file into that copy, and then hook up the # copy with the target-specific data merged into it as the replacement # target dict. old_target_dict = build_file_data['targets'][index] new_target_dict = gyp.simple_copy.deepcopy( build_file_data['target_defaults']) MergeDicts(new_target_dict, old_target_dict, build_file_path, build_file_path) build_file_data['targets'][index] = new_target_dict index += 1 # No longer needed. del build_file_data['target_defaults'] # Look for dependencies. This means that dependency resolution occurs # after "pre" conditionals and variable expansion, but before "post" - # in other words, you can't put a "dependencies" section inside a "post" # conditional within a target. dependencies = [] if 'targets' in build_file_data: for target_dict in build_file_data['targets']: if 'dependencies' not in target_dict: continue for dependency in target_dict['dependencies']: dependencies.append( gyp.common.ResolveTarget(build_file_path, dependency, None)[0]) if load_dependencies: for dependency in dependencies: try: LoadTargetBuildFile(dependency, data, aux_data, variables, includes, depth, check, load_dependencies) except Exception, e: gyp.common.ExceptionAppend( e, 'while loading dependencies of %s' % build_file_path) raise else: return (build_file_path, dependencies) def CallLoadTargetBuildFile(global_flags, build_file_path, variables, includes, depth, check, generator_input_info): """Wrapper around LoadTargetBuildFile for parallel processing. This wrapper is used when LoadTargetBuildFile is executed in a worker process. """ try: signal.signal(signal.SIGINT, signal.SIG_IGN) # Apply globals so that the worker process behaves the same. for key, value in global_flags.iteritems(): globals()[key] = value SetGeneratorGlobals(generator_input_info) result = LoadTargetBuildFile(build_file_path, per_process_data, per_process_aux_data, variables, includes, depth, check, False) if not result: return result (build_file_path, dependencies) = result # We can safely pop the build_file_data from per_process_data because it # will never be referenced by this process again, so we don't need to keep # it in the cache. build_file_data = per_process_data.pop(build_file_path) # This gets serialized and sent back to the main process via a pipe. # It's handled in LoadTargetBuildFileCallback. return (build_file_path, build_file_data, dependencies) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return None except Exception, e: print >>sys.stderr, 'Exception:', e print >>sys.stderr, traceback.format_exc() return None class ParallelProcessingError(Exception): pass class ParallelState(object): """Class to keep track of state when processing input files in parallel. If build files are loaded in parallel, use this to keep track of state during farming out and processing parallel jobs. It's stored in a global so that the callback function can have access to it. """ def __init__(self): # The multiprocessing pool. self.pool = None # The condition variable used to protect this object and notify # the main loop when there might be more data to process. self.condition = None # The "data" dict that was passed to LoadTargetBuildFileParallel self.data = None # The number of parallel calls outstanding; decremented when a response # was received. self.pending = 0 # The set of all build files that have been scheduled, so we don't # schedule the same one twice. self.scheduled = set() # A list of dependency build file paths that haven't been scheduled yet. self.dependencies = [] # Flag to indicate if there was an error in a child process. self.error = False def LoadTargetBuildFileCallback(self, result): """Handle the results of running LoadTargetBuildFile in another process. """ self.condition.acquire() if not result: self.error = True self.condition.notify() self.condition.release() return (build_file_path0, build_file_data0, dependencies0) = result self.data[build_file_path0] = build_file_data0 self.data['target_build_files'].add(build_file_path0) for new_dependency in dependencies0: if new_dependency not in self.scheduled: self.scheduled.add(new_dependency) self.dependencies.append(new_dependency) self.pending -= 1 self.condition.notify() self.condition.release() def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info): parallel_state = ParallelState() parallel_state.condition = threading.Condition() # Make copies of the build_files argument that we can modify while working. parallel_state.dependencies = list(build_files) parallel_state.scheduled = set(build_files) parallel_state.pending = 0 parallel_state.data = data try: parallel_state.condition.acquire() while parallel_state.dependencies or parallel_state.pending: if parallel_state.error: break if not parallel_state.dependencies: parallel_state.condition.wait() continue dependency = parallel_state.dependencies.pop() parallel_state.pending += 1 global_flags = { 'path_sections': globals()['path_sections'], 'non_configuration_keys': globals()['non_configuration_keys'], 'multiple_toolsets': globals()['multiple_toolsets']} if not parallel_state.pool: parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count()) parallel_state.pool.apply_async( CallLoadTargetBuildFile, args = (global_flags, dependency, variables, includes, depth, check, generator_input_info), callback = parallel_state.LoadTargetBuildFileCallback) except KeyboardInterrupt, e: parallel_state.pool.terminate() raise e parallel_state.condition.release() parallel_state.pool.close() parallel_state.pool.join() parallel_state.pool = None if parallel_state.error: sys.exit(1) # Look for the bracket that matches the first bracket seen in a # string, and return the start and end as a tuple. For example, if # the input is something like "<(foo <(bar)) blah", then it would # return (1, 13), indicating the entire string except for the leading # "<" and trailing " blah". LBRACKETS= set('{[(') BRACKETS = {'}': '{', ']': '[', ')': '('} def FindEnclosingBracketGroup(input_str): stack = [] start = -1 for index, char in enumerate(input_str): if char in LBRACKETS: stack.append(char) if start == -1: start = index elif char in BRACKETS: if not stack: return (-1, -1) if stack.pop() != BRACKETS[char]: return (-1, -1) if not stack: return (start, index + 1) return (-1, -1) def IsStrCanonicalInt(string): """Returns True if |string| is in its canonical integer form. The canonical form is such that str(int(string)) == string. """ if type(string) is str: # This function is called a lot so for maximum performance, avoid # involving regexps which would otherwise make the code much # shorter. Regexps would need twice the time of this function. if string: if string == "0": return True if string[0] == "-": string = string[1:] if not string: return False if '1' <= string[0] <= '9': return string.isdigit() return False # This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)", # "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())". # In the last case, the inner "<()" is captured in match['content']. early_variable_re = re.compile( r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '>' instead of '<'. late_variable_re = re.compile( r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '^' instead of '<'. latelate_variable_re = re.compile( r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # Global cache of results from running commands so they don't have to be run # more then once. cached_command_results = {} def FixupPlatformCommand(cmd): if sys.platform == 'win32': if type(cmd) is list: cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:] else: cmd = re.sub('^cat ', 'type ', cmd) return cmd PHASE_EARLY = 0 PHASE_LATE = 1 PHASE_LATELATE = 2 def ExpandVariables(input, phase, variables, build_file): # Look for the pattern that gets expanded into variables if phase == PHASE_EARLY: variable_re = early_variable_re expansion_symbol = '<' elif phase == PHASE_LATE: variable_re = late_variable_re expansion_symbol = '>' elif phase == PHASE_LATELATE: variable_re = latelate_variable_re expansion_symbol = '^' else: assert False input_str = str(input) if IsStrCanonicalInt(input_str): return int(input_str) # Do a quick scan to determine if an expensive regex search is warranted. if expansion_symbol not in input_str: return input_str # Get the entire list of matches as a list of MatchObject instances. # (using findall here would return strings instead of MatchObjects). matches = list(variable_re.finditer(input_str)) if not matches: return input_str output = input_str # Reverse the list of matches so that replacements are done right-to-left. # That ensures that earlier replacements won't mess up the string in a # way that causes later calls to find the earlier substituted text instead # of what's intended for replacement. matches.reverse() for match_group in matches: match = match_group.groupdict() gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match) # match['replace'] is the substring to look for, match['type'] # is the character code for the replacement type (< > <! >! <| >| <@ # >@ <!@ >!@), match['is_array'] contains a '[' for command # arrays, and match['content'] is the name of the variable (< >) # or command to run (<! >!). match['command_string'] is an optional # command string. Currently, only 'pymod_do_main' is supported. # run_command is true if a ! variant is used. run_command = '!' in match['type'] command_string = match['command_string'] # file_list is true if a | variant is used. file_list = '|' in match['type'] # Capture these now so we can adjust them later. replace_start = match_group.start('replace') replace_end = match_group.end('replace') # Find the ending paren, and re-evaluate the contained string. (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:]) # Adjust the replacement range to match the entire command # found by FindEnclosingBracketGroup (since the variable_re # probably doesn't match the entire command if it contained # nested variables). replace_end = replace_start + c_end # Find the "real" replacement, matching the appropriate closing # paren, and adjust the replacement start and end. replacement = input_str[replace_start:replace_end] # Figure out what the contents of the variable parens are. contents_start = replace_start + c_start + 1 contents_end = replace_end - 1 contents = input_str[contents_start:contents_end] # Do filter substitution now for <|(). # Admittedly, this is different than the evaluation order in other # contexts. However, since filtration has no chance to run on <|(), # this seems like the only obvious way to give them access to filters. if file_list: processed_variables = gyp.simple_copy.deepcopy(variables) ProcessListFiltersInDict(contents, processed_variables) # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, processed_variables, build_file) else: # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, variables, build_file) # Strip off leading/trailing whitespace so that variable matches are # simpler below (and because they are rarely needed). contents = contents.strip() # expand_to_list is true if an @ variant is used. In that case, # the expansion should result in a list. Note that the caller # is to be expecting a list in return, and not all callers do # because not all are working in list context. Also, for list # expansions, there can be no other text besides the variable # expansion in the input string. expand_to_list = '@' in match['type'] and input_str == replacement if run_command or file_list: # Find the build file's directory, so commands can be run or file lists # generated relative to it. build_file_dir = os.path.dirname(build_file) if build_file_dir == '' and not file_list: # If build_file is just a leaf filename indicating a file in the # current directory, build_file_dir might be an empty string. Set # it to None to signal to subprocess.Popen that it should run the # command in the current directory. build_file_dir = None # Support <|(listfile.txt ...) which generates a file # containing items from a gyp list, generated at gyp time. # This works around actions/rules which have more inputs than will # fit on the command line. if file_list: if type(contents) is list: contents_list = contents else: contents_list = contents.split(' ') replacement = contents_list[0] if os.path.isabs(replacement): raise GypError('| cannot handle absolute paths, got "%s"' % replacement) if not generator_filelist_paths: path = os.path.join(build_file_dir, replacement) else: if os.path.isabs(build_file_dir): toplevel = generator_filelist_paths['toplevel'] rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel) else: rel_build_file_dir = build_file_dir qualified_out_dir = generator_filelist_paths['qualified_out_dir'] path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement) gyp.common.EnsureDirExists(path) replacement = gyp.common.RelativePath(path, build_file_dir) f = gyp.common.WriteOnDiff(path) for i in contents_list[1:]: f.write('%s\n' % i) f.close() elif run_command: use_shell = True if match['is_array']: contents = eval(contents) use_shell = False # Check for a cached value to avoid executing commands, or generating # file lists more than once. The cache key contains the command to be # run as well as the directory to run it from, to account for commands # that depend on their current directory. # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory, # someone could author a set of GYP files where each time the command # is invoked it produces different output by design. When the need # arises, the syntax should be extended to support no caching off a # command's output so it is run every time. cache_key = (str(contents), build_file_dir) cached_value = cached_command_results.get(cache_key, None) if cached_value is None: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Executing command '%s' in directory '%s'", contents, build_file_dir) replacement = '' if command_string == 'pymod_do_main': # <!pymod_do_main(modulename param eters) loads |modulename| as a # python module and then calls that module's DoMain() function, # passing ["param", "eters"] as a single list argument. For modules # that don't load quickly, this can be faster than # <!(python modulename param eters). Do this in |build_file_dir|. oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir. if build_file_dir: # build_file_dir may be None (see above). os.chdir(build_file_dir) try: parsed_contents = shlex.split(contents) try: py_module = __import__(parsed_contents[0]) except ImportError as e: raise GypError("Error importing pymod_do_main" "module (%s): %s" % (parsed_contents[0], e)) replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip() finally: os.chdir(oldwd) assert replacement != None elif command_string: raise GypError("Unknown command string '%s' in '%s'." % (command_string, contents)) else: # Fix up command with platform specific workarounds. contents = FixupPlatformCommand(contents) p = subprocess.Popen(contents, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=build_file_dir) p_stdout, p_stderr = p.communicate('') if p.wait() != 0 or p_stderr: sys.stderr.write(p_stderr) # Simulate check_call behavior, since check_call only exists # in python 2.5 and later. raise GypError("Call to '%s' returned exit status %d." % (contents, p.returncode)) replacement = p_stdout.rstrip() cached_command_results[cache_key] = replacement else: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Had cache value for command '%s' in directory '%s'", contents,build_file_dir) replacement = cached_value else: if not contents in variables: if contents[-1] in ['!', '/']: # In order to allow cross-compiles (nacl) to happen more naturally, # we will allow references to >(sources/) etc. to resolve to # and empty list if undefined. This allows actions to: # 'action!': [ # '>@(_sources!)', # ], # 'action/': [ # '>@(_sources/)', # ], replacement = [] else: raise GypError('Undefined variable ' + contents + ' in ' + build_file) else: replacement = variables[contents] if type(replacement) is list: for item in replacement: if not contents[-1] == '/' and type(item) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'list contains a ' + item.__class__.__name__) # Run through the list and handle variable expansions in it. Since # the list is guaranteed not to contain dicts, this won't do anything # with conditions sections. ProcessVariablesAndConditionsInList(replacement, phase, variables, build_file) elif type(replacement) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'found a ' + replacement.__class__.__name__) if expand_to_list: # Expanding in list context. It's guaranteed that there's only one # replacement to do in |input_str| and that it's this replacement. See # above. if type(replacement) is list: # If it's already a list, make a copy. output = replacement[:] else: # Split it the same way sh would split arguments. output = shlex.split(str(replacement)) else: # Expanding in string context. encoded_replacement = '' if type(replacement) is list: # When expanding a list into string context, turn the list items # into a string in a way that will work with a subprocess call. # # TODO(mark): This isn't completely correct. This should # call a generator-provided function that observes the # proper list-to-argument quoting rules on a specific # platform instead of just calling the POSIX encoding # routine. encoded_replacement = gyp.common.EncodePOSIXShellList(replacement) else: encoded_replacement = replacement output = output[:replace_start] + str(encoded_replacement) + \ output[replace_end:] # Prepare for the next match iteration. input_str = output if output == input: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found only identity matches on %r, avoiding infinite " "recursion.", output) else: # Look for more matches now that we've replaced some, to deal with # expanding local variables (variables defined in the same # variables block as this one). gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output) if type(output) is list: if output and type(output[0]) is list: # Leave output alone if it's a list of lists. # We don't want such lists to be stringified. pass else: new_output = [] for item in output: new_output.append( ExpandVariables(item, phase, variables, build_file)) output = new_output else: output = ExpandVariables(output, phase, variables, build_file) # Convert all strings that are canonically-represented integers into integers. if type(output) is list: for index in xrange(0, len(output)): if IsStrCanonicalInt(output[index]): output[index] = int(output[index]) elif IsStrCanonicalInt(output): output = int(output) return output # The same condition is often evaluated over and over again so it # makes sense to cache as much as possible between evaluations. cached_conditions_asts = {} def EvalCondition(condition, conditions_key, phase, variables, build_file): """Returns the dict that should be used or None if the result was that nothing should be used.""" if type(condition) is not list: raise GypError(conditions_key + ' must be a list') if len(condition) < 2: # It's possible that condition[0] won't work in which case this # attempt will raise its own IndexError. That's probably fine. raise GypError(conditions_key + ' ' + condition[0] + ' must be at least length 2, not ' + str(len(condition))) i = 0 result = None while i < len(condition): cond_expr = condition[i] true_dict = condition[i + 1] if type(true_dict) is not dict: raise GypError('{} {} must be followed by a dictionary, not {}'.format( conditions_key, cond_expr, type(true_dict))) if len(condition) > i + 2 and type(condition[i + 2]) is dict: false_dict = condition[i + 2] i = i + 3 if i != len(condition): raise GypError('{} {} has {} unexpected trailing items'.format( conditions_key, cond_expr, len(condition) - i)) else: false_dict = None i = i + 2 if result == None: result = EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file) return result def EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file): """Returns true_dict if cond_expr evaluates to true, and false_dict otherwise.""" # Do expansions on the condition itself. Since the conditon can naturally # contain variable references without needing to resort to GYP expansion # syntax, this is of dubious value for variables, but someone might want to # use a command expansion directly inside a condition. cond_expr_expanded = ExpandVariables(cond_expr, phase, variables, build_file) if type(cond_expr_expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + cond_expr_expanded.__class__.__name__) try: if cond_expr_expanded in cached_conditions_asts: ast_code = cached_conditions_asts[cond_expr_expanded] else: ast_code = compile(cond_expr_expanded, '<string>', 'eval') cached_conditions_asts[cond_expr_expanded] = ast_code if eval(ast_code, {'__builtins__': None}, variables): return true_dict return false_dict except SyntaxError, e: syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s ' 'at character %d.' % (str(e.args[0]), e.text, build_file, e.offset), e.filename, e.lineno, e.offset, e.text) raise syntax_error except NameError, e: gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' % (cond_expr_expanded, build_file)) raise GypError(e) def ProcessConditionsInDict(the_dict, phase, variables, build_file): # Process a 'conditions' or 'target_conditions' section in the_dict, # depending on phase. # early -> conditions # late -> target_conditions # latelate -> no conditions # # Each item in a conditions list consists of cond_expr, a string expression # evaluated as the condition, and true_dict, a dict that will be merged into # the_dict if cond_expr evaluates to true. Optionally, a third item, # false_dict, may be present. false_dict is merged into the_dict if # cond_expr evaluates to false. # # Any dict merged into the_dict will be recursively processed for nested # conditionals and other expansions, also according to phase, immediately # prior to being merged. if phase == PHASE_EARLY: conditions_key = 'conditions' elif phase == PHASE_LATE: conditions_key = 'target_conditions' elif phase == PHASE_LATELATE: return else: assert False if not conditions_key in the_dict: return conditions_list = the_dict[conditions_key] # Unhook the conditions list, it's no longer needed. del the_dict[conditions_key] for condition in conditions_list: merge_dict = EvalCondition(condition, conditions_key, phase, variables, build_file) if merge_dict != None: # Expand variables and nested conditinals in the merge_dict before # merging it. ProcessVariablesAndConditionsInDict(merge_dict, phase, variables, build_file) MergeDicts(the_dict, merge_dict, build_file, build_file) def LoadAutomaticVariablesFromDict(variables, the_dict): # Any keys with plain string values in the_dict become automatic variables. # The variable name is the key name with a "_" character prepended. for key, value in the_dict.iteritems(): if type(value) in (str, int, list): variables['_' + key] = value def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key): # Any keys in the_dict's "variables" dict, if it has one, becomes a # variable. The variable name is the key name in the "variables" dict. # Variables that end with the % character are set only if they are unset in # the variables dict. the_dict_key is the name of the key that accesses # the_dict in the_dict's parent dict. If the_dict's parent is not a dict # (it could be a list or it could be parentless because it is a root dict), # the_dict_key will be None. for key, value in the_dict.get('variables', {}).iteritems(): if type(value) not in (str, int, list): continue if key.endswith('%'): variable_name = key[:-1] if variable_name in variables: # If the variable is already set, don't set it. continue if the_dict_key is 'variables' and variable_name in the_dict: # If the variable is set without a % in the_dict, and the_dict is a # variables dict (making |variables| a varaibles sub-dict of a # variables dict), use the_dict's definition. value = the_dict[variable_name] else: variable_name = key variables[variable_name] = value def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in, build_file, the_dict_key=None): """Handle all variable and command expansion and conditional evaluation. This function is the public entry point for all variable expansions and conditional evaluations. The variables_in dictionary will not be modified by this function. """ # Make a copy of the variables_in dict that can be modified during the # loading of automatics and the loading of the variables dict. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) if 'variables' in the_dict: # Make sure all the local variables are added to the variables # list before we process them so that you can reference one # variable from another. They will be fully expanded by recursion # in ExpandVariables. for key, value in the_dict['variables'].iteritems(): variables[key] = value # Handle the associated variables dict first, so that any variable # references within can be resolved prior to using them as variables. # Pass a copy of the variables dict to avoid having it be tainted. # Otherwise, it would have extra automatics added for everything that # should just be an ordinary variable in this scope. ProcessVariablesAndConditionsInDict(the_dict['variables'], phase, variables, build_file, 'variables') LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) for key, value in the_dict.iteritems(): # Skip "variables", which was already processed if present. if key != 'variables' and type(value) is str: expanded = ExpandVariables(value, phase, variables, build_file) if type(expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + expanded.__class__.__name__ + ' for ' + key) the_dict[key] = expanded # Variable expansion may have resulted in changes to automatics. Reload. # TODO(mark): Optimization: only reload if no changes were made. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Process conditions in this dict. This is done after variable expansion # so that conditions may take advantage of expanded variables. For example, # if the_dict contains: # {'type': '<(library_type)', # 'conditions': [['_type=="static_library"', { ... }]]}, # _type, as used in the condition, will only be set to the value of # library_type if variable expansion is performed before condition # processing. However, condition processing should occur prior to recursion # so that variables (both automatic and "variables" dict type) may be # adjusted by conditions sections, merged into the_dict, and have the # intended impact on contained dicts. # # This arrangement means that a "conditions" section containing a "variables" # section will only have those variables effective in subdicts, not in # the_dict. The workaround is to put a "conditions" section within a # "variables" section. For example: # {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]], # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will not result in "IS_MAC" being appended to the "defines" list in the # current scope but would result in it being appended to the "defines" list # within "my_subdict". By comparison: # {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]}, # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will append "IS_MAC" to both "defines" lists. # Evaluate conditions sections, allowing variable expansions within them # as well as nested conditionals. This will process a 'conditions' or # 'target_conditions' section, perform appropriate merging and recursive # conditional and variable processing, and then remove the conditions section # from the_dict if it is present. ProcessConditionsInDict(the_dict, phase, variables, build_file) # Conditional processing may have resulted in changes to automatics or the # variables dict. Reload. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Recurse into child dicts, or process child lists which may result in # further recursion into descendant dicts. for key, value in the_dict.iteritems(): # Skip "variables" and string values, which were already processed if # present. if key == 'variables' or type(value) is str: continue if type(value) is dict: # Pass a copy of the variables dict so that subdicts can't influence # parents. ProcessVariablesAndConditionsInDict(value, phase, variables, build_file, key) elif type(value) is list: # The list itself can't influence the variables dict, and # ProcessVariablesAndConditionsInList will make copies of the variables # dict if it needs to pass it to something that can influence it. No # copy is necessary here. ProcessVariablesAndConditionsInList(value, phase, variables, build_file) elif type(value) is not int: raise TypeError('Unknown type ' + value.__class__.__name__ + \ ' for ' + key) def ProcessVariablesAndConditionsInList(the_list, phase, variables, build_file): # Iterate using an index so that new values can be assigned into the_list. index = 0 while index < len(the_list): item = the_list[index] if type(item) is dict: # Make a copy of the variables dict so that it won't influence anything # outside of its own scope. ProcessVariablesAndConditionsInDict(item, phase, variables, build_file) elif type(item) is list: ProcessVariablesAndConditionsInList(item, phase, variables, build_file) elif type(item) is str: expanded = ExpandVariables(item, phase, variables, build_file) if type(expanded) in (str, int): the_list[index] = expanded elif type(expanded) is list: the_list[index:index+1] = expanded index += len(expanded) # index now identifies the next item to examine. Continue right now # without falling into the index increment below. continue else: raise ValueError( 'Variable expansion in this context permits strings and ' + \ 'lists only, found ' + expanded.__class__.__name__ + ' at ' + \ index) elif type(item) is not int: raise TypeError('Unknown type ' + item.__class__.__name__ + \ ' at index ' + index) index = index + 1 def BuildTargetsDict(data): """Builds a dict mapping fully-qualified target names to their target dicts. |data| is a dict mapping loaded build files by pathname relative to the current directory. Values in |data| are build file contents. For each |data| value with a "targets" key, the value of the "targets" key is taken as a list containing target dicts. Each target's fully-qualified name is constructed from the pathname of the build file (|data| key) and its "target_name" property. These fully-qualified names are used as the keys in the returned dict. These keys provide access to the target dicts, the dicts in the "targets" lists. """ targets = {} for build_file in data['target_build_files']: for target in data[build_file].get('targets', []): target_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if target_name in targets: raise GypError('Duplicate target definitions for ' + target_name) targets[target_name] = target return targets def QualifyDependencies(targets): """Make dependency links fully-qualified relative to the current directory. |targets| is a dict mapping fully-qualified target names to their target dicts. For each target in this dict, keys known to contain dependency links are examined, and any dependencies referenced will be rewritten so that they are fully-qualified and relative to the current directory. All rewritten dependencies are suitable for use as keys to |targets| or a similar dict. """ all_dependency_sections = [dep + op for dep in dependency_sections for op in ('', '!', '/')] for target, target_dict in targets.iteritems(): target_build_file = gyp.common.BuildFile(target) toolset = target_dict['toolset'] for dependency_key in all_dependency_sections: dependencies = target_dict.get(dependency_key, []) for index in xrange(0, len(dependencies)): dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget( target_build_file, dependencies[index], toolset) if not multiple_toolsets: # Ignore toolset specification in the dependency if it is specified. dep_toolset = toolset dependency = gyp.common.QualifiedTarget(dep_file, dep_target, dep_toolset) dependencies[index] = dependency # Make sure anything appearing in a list other than "dependencies" also # appears in the "dependencies" list. if dependency_key != 'dependencies' and \ dependency not in target_dict['dependencies']: raise GypError('Found ' + dependency + ' in ' + dependency_key + ' of ' + target + ', but not in dependencies') def ExpandWildcardDependencies(targets, data): """Expands dependencies specified as build_file:*. For each target in |targets|, examines sections containing links to other targets. If any such section contains a link of the form build_file:*, it is taken as a wildcard link, and is expanded to list each target in build_file. The |data| dict provides access to build file dicts. Any target that does not wish to be included by wildcard can provide an optional "suppress_wildcard" key in its target dict. When present and true, a wildcard dependency link will not include such targets. All dependency names, including the keys to |targets| and the values in each dependency list, must be qualified when this function is called. """ for target, target_dict in targets.iteritems(): toolset = target_dict['toolset'] target_build_file = gyp.common.BuildFile(target) for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) # Loop this way instead of "for dependency in" or "for index in xrange" # because the dependencies list will be modified within the loop body. index = 0 while index < len(dependencies): (dependency_build_file, dependency_target, dependency_toolset) = \ gyp.common.ParseQualifiedTarget(dependencies[index]) if dependency_target != '*' and dependency_toolset != '*': # Not a wildcard. Keep it moving. index = index + 1 continue if dependency_build_file == target_build_file: # It's an error for a target to depend on all other targets in # the same file, because a target cannot depend on itself. raise GypError('Found wildcard in ' + dependency_key + ' of ' + target + ' referring to same build file') # Take the wildcard out and adjust the index so that the next # dependency in the list will be processed the next time through the # loop. del dependencies[index] index = index - 1 # Loop through the targets in the other build file, adding them to # this target's list of dependencies in place of the removed # wildcard. dependency_target_dicts = data[dependency_build_file]['targets'] for dependency_target_dict in dependency_target_dicts: if int(dependency_target_dict.get('suppress_wildcard', False)): continue dependency_target_name = dependency_target_dict['target_name'] if (dependency_target != '*' and dependency_target != dependency_target_name): continue dependency_target_toolset = dependency_target_dict['toolset'] if (dependency_toolset != '*' and dependency_toolset != dependency_target_toolset): continue dependency = gyp.common.QualifiedTarget(dependency_build_file, dependency_target_name, dependency_target_toolset) index = index + 1 dependencies.insert(index, dependency) index = index + 1 def Unify(l): """Removes duplicate elements from l, keeping the first element.""" seen = {} return [seen.setdefault(e, e) for e in l if e not in seen] def RemoveDuplicateDependencies(targets): """Makes sure every dependency appears only once in all targets's dependency lists.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: target_dict[dependency_key] = Unify(dependencies) def Filter(l, item): """Removes item from l.""" res = {} return [res.setdefault(e, e) for e in l if e != item] def RemoveSelfDependencies(targets): """Remove self dependencies from targets that have the prune_self_dependency variable set.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if t == target_name: if targets[t].get('variables', {}).get('prune_self_dependency', 0): target_dict[dependency_key] = Filter(dependencies, target_name) def RemoveLinkDependenciesFromNoneTargets(targets): """Remove dependencies having the 'link_dependency' attribute from the 'none' targets.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if target_dict.get('type', None) == 'none': if targets[t].get('variables', {}).get('link_dependency', 0): target_dict[dependency_key] = \ Filter(target_dict[dependency_key], t) class DependencyGraphNode(object): """ Attributes: ref: A reference to an object that this DependencyGraphNode represents. dependencies: List of DependencyGraphNodes on which this one depends. dependents: List of DependencyGraphNodes that depend on this one. """ class CircularException(GypError): pass def __init__(self, ref): self.ref = ref self.dependencies = [] self.dependents = [] def __repr__(self): return '<DependencyGraphNode: %r>' % self.ref def FlattenToList(self): # flat_list is the sorted list of dependencies - actually, the list items # are the "ref" attributes of DependencyGraphNodes. Every target will # appear in flat_list after all of its dependencies, and before all of its # dependents. flat_list = OrderedSet() # in_degree_zeros is the list of DependencyGraphNodes that have no # dependencies not in flat_list. Initially, it is a copy of the children # of this node, because when the graph was built, nodes with no # dependencies were made implicit dependents of the root node. in_degree_zeros = set(self.dependents[:]) while in_degree_zeros: # Nodes in in_degree_zeros have no dependencies not in flat_list, so they # can be appended to flat_list. Take these nodes out of in_degree_zeros # as work progresses, so that the next node to process from the list can # always be accessed at a consistent position. node = in_degree_zeros.pop() flat_list.add(node.ref) # Look at dependents of the node just added to flat_list. Some of them # may now belong in in_degree_zeros. for node_dependent in node.dependents: is_in_degree_zero = True # TODO: We want to check through the # node_dependent.dependencies list but if it's long and we # always start at the beginning, then we get O(n^2) behaviour. for node_dependent_dependency in node_dependent.dependencies: if not node_dependent_dependency.ref in flat_list: # The dependent one or more dependencies not in flat_list. There # will be more chances to add it to flat_list when examining # it again as a dependent of those other dependencies, provided # that there are no cycles. is_in_degree_zero = False break if is_in_degree_zero: # All of the dependent's dependencies are already in flat_list. Add # it to in_degree_zeros where it will be processed in a future # iteration of the outer loop. in_degree_zeros.add(node_dependent) return list(flat_list) def FindCycles(self): """ Returns a list of cycles in the graph, where each cycle is its own list. """ results = [] visited = set() def Visit(node, path): for child in node.dependents: if child in path: results.append([child] + path[:path.index(child) + 1]) elif not child in visited: visited.add(child) Visit(child, [child] + path) visited.add(self) Visit(self, [self]) return results def DirectDependencies(self, dependencies=None): """Returns a list of just direct dependencies.""" if dependencies == None: dependencies = [] for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref != None and dependency.ref not in dependencies: dependencies.append(dependency.ref) return dependencies def _AddImportedDependencies(self, targets, dependencies=None): """Given a list of direct dependencies, adds indirect dependencies that other dependencies have declared to export their settings. This method does not operate on self. Rather, it operates on the list of dependencies in the |dependencies| argument. For each dependency in that list, if any declares that it exports the settings of one of its own dependencies, those dependencies whose settings are "passed through" are added to the list. As new items are added to the list, they too will be processed, so it is possible to import settings through multiple levels of dependencies. This method is not terribly useful on its own, it depends on being "primed" with a list of direct dependencies such as one provided by DirectDependencies. DirectAndImportedDependencies is intended to be the public entry point. """ if dependencies == None: dependencies = [] index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Add any dependencies whose settings should be imported to the list # if not already present. Newly-added items will be checked for # their own imports when the list iteration reaches them. # Rather than simply appending new items, insert them after the # dependency that exported them. This is done to more closely match # the depth-first method used by DeepDependencies. add_index = 1 for imported_dependency in \ dependency_dict.get('export_dependent_settings', []): if imported_dependency not in dependencies: dependencies.insert(index + add_index, imported_dependency) add_index = add_index + 1 index = index + 1 return dependencies def DirectAndImportedDependencies(self, targets, dependencies=None): """Returns a list of a target's direct dependencies and all indirect dependencies that a dependency has advertised settings should be exported through the dependency for. """ dependencies = self.DirectDependencies(dependencies) return self._AddImportedDependencies(targets, dependencies) def DeepDependencies(self, dependencies=None): """Returns an OrderedSet of all of a target's dependencies, recursively.""" if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref is None: continue if dependency.ref not in dependencies: dependencies.add(dependency.ref) dependency.DeepDependencies(dependencies) return dependencies def _LinkDependenciesInternal(self, targets, include_shared_libraries, dependencies=None, initial=True): """Returns an OrderedSet of dependency targets that are linked into this target. This function has a split personality, depending on the setting of |initial|. Outside callers should always leave |initial| at its default setting. When adding a target to the list of dependencies, this function will recurse into itself with |initial| set to False, to collect dependencies that are linked into the linkable target for which the list is being built. If |include_shared_libraries| is False, the resulting dependencies will not include shared_library targets that are linked into this target. """ if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() # Check for None, corresponding to the root node. if self.ref is None: return dependencies # It's kind of sucky that |targets| has to be passed into this function, # but that's presently the easiest way to access the target dicts so that # this function can find target types. if 'target_name' not in targets[self.ref]: raise GypError("Missing 'target_name' field in target.") if 'type' not in targets[self.ref]: raise GypError("Missing 'type' field in target %s" % targets[self.ref]['target_name']) target_type = targets[self.ref]['type'] is_linkable = target_type in linkable_types if initial and not is_linkable: # If this is the first target being examined and it's not linkable, # return an empty list of link dependencies, because the link # dependencies are intended to apply to the target itself (initial is # True) and this target won't be linked. return dependencies # Don't traverse 'none' targets if explicitly excluded. if (target_type == 'none' and not targets[self.ref].get('dependencies_traverse', True)): dependencies.add(self.ref) return dependencies # Executables and loadable modules are already fully and finally linked. # Nothing else can be a link dependency of them, there can only be # dependencies in the sense that a dependent target might run an # executable or load the loadable_module. if not initial and target_type in ('executable', 'loadable_module'): return dependencies # Shared libraries are already fully linked. They should only be included # in |dependencies| when adjusting static library dependencies (in order to # link against the shared_library's import lib), but should not be included # in |dependencies| when propagating link_settings. # The |include_shared_libraries| flag controls which of these two cases we # are handling. if (not initial and target_type == 'shared_library' and not include_shared_libraries): return dependencies # The target is linkable, add it to the list of link dependencies. if self.ref not in dependencies: dependencies.add(self.ref) if initial or not is_linkable: # If this is a subsequent target and it's linkable, don't look any # further for linkable dependencies, as they'll already be linked into # this target linkable. Always look at dependencies of the initial # target, and always look at dependencies of non-linkables. for dependency in self.dependencies: dependency._LinkDependenciesInternal(targets, include_shared_libraries, dependencies, False) return dependencies def DependenciesForLinkSettings(self, targets): """ Returns a list of dependency targets whose link_settings should be merged into this target. """ # TODO(sbaig) Currently, chrome depends on the bug that shared libraries' # link_settings are propagated. So for now, we will allow it, unless the # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to # False. Once chrome is fixed, we can remove this flag. include_shared_libraries = \ targets[self.ref].get('allow_sharedlib_linksettings_propagation', True) return self._LinkDependenciesInternal(targets, include_shared_libraries) def DependenciesToLinkAgainst(self, targets): """ Returns a list of dependency targets that are linked into this target. """ return self._LinkDependenciesInternal(targets, True) def BuildDependencyList(targets): # Create a DependencyGraphNode for each target. Put it into a dict for easy # access. dependency_nodes = {} for target, spec in targets.iteritems(): if target not in dependency_nodes: dependency_nodes[target] = DependencyGraphNode(target) # Set up the dependency links. Targets that have no dependencies are treated # as dependent on root_node. root_node = DependencyGraphNode(None) for target, spec in targets.iteritems(): target_node = dependency_nodes[target] target_build_file = gyp.common.BuildFile(target) dependencies = spec.get('dependencies') if not dependencies: target_node.dependencies = [root_node] root_node.dependents.append(target_node) else: for dependency in dependencies: dependency_node = dependency_nodes.get(dependency) if not dependency_node: raise GypError("Dependency '%s' not found while " "trying to load target %s" % (dependency, target)) target_node.dependencies.append(dependency_node) dependency_node.dependents.append(target_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(targets): if not root_node.dependents: # If all targets have dependencies, add the first target as a dependent # of root_node so that the cycle can be discovered from root_node. target = targets.keys()[0] target_node = dependency_nodes[target] target_node.dependencies.append(root_node) root_node.dependents.append(target_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in dependency graph detected:\n' + '\n'.join(cycles)) return [dependency_nodes, flat_list] def VerifyNoGYPFileCircularDependencies(targets): # Create a DependencyGraphNode for each gyp file containing a target. Put # it into a dict for easy access. dependency_nodes = {} for target in targets.iterkeys(): build_file = gyp.common.BuildFile(target) if not build_file in dependency_nodes: dependency_nodes[build_file] = DependencyGraphNode(build_file) # Set up the dependency links. for target, spec in targets.iteritems(): build_file = gyp.common.BuildFile(target) build_file_node = dependency_nodes[build_file] target_dependencies = spec.get('dependencies', []) for dependency in target_dependencies: try: dependency_build_file = gyp.common.BuildFile(dependency) except GypError, e: gyp.common.ExceptionAppend( e, 'while computing dependencies of .gyp file %s' % build_file) raise if dependency_build_file == build_file: # A .gyp file is allowed to refer back to itself. continue dependency_node = dependency_nodes.get(dependency_build_file) if not dependency_node: raise GypError("Dependancy '%s' not found" % dependency_build_file) if dependency_node not in build_file_node.dependencies: build_file_node.dependencies.append(dependency_node) dependency_node.dependents.append(build_file_node) # Files that have no dependencies are treated as dependent on root_node. root_node = DependencyGraphNode(None) for build_file_node in dependency_nodes.itervalues(): if len(build_file_node.dependencies) == 0: build_file_node.dependencies.append(root_node) root_node.dependents.append(build_file_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(dependency_nodes): if not root_node.dependents: # If all files have dependencies, add the first file as a dependent # of root_node so that the cycle can be discovered from root_node. file_node = dependency_nodes.values()[0] file_node.dependencies.append(root_node) root_node.dependents.append(file_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)) def DoDependentSettings(key, flat_list, targets, dependency_nodes): # key should be one of all_dependent_settings, direct_dependent_settings, # or link_settings. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) if key == 'all_dependent_settings': dependencies = dependency_nodes[target].DeepDependencies() elif key == 'direct_dependent_settings': dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) elif key == 'link_settings': dependencies = \ dependency_nodes[target].DependenciesForLinkSettings(targets) else: raise GypError("DoDependentSettings doesn't know how to determine " 'dependencies for ' + key) for dependency in dependencies: dependency_dict = targets[dependency] if not key in dependency_dict: continue dependency_build_file = gyp.common.BuildFile(dependency) MergeDicts(target_dict, dependency_dict[key], build_file, dependency_build_file) def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, sort_dependencies): # Recompute target "dependencies" properties. For each static library # target, remove "dependencies" entries referring to other static libraries, # unless the dependency has the "hard_dependency" attribute set. For each # linkable target, add a "dependencies" entry referring to all of the # target's computed list of link dependencies (including static libraries # if no such entry is already present. for target in flat_list: target_dict = targets[target] target_type = target_dict['type'] if target_type == 'static_library': if not 'dependencies' in target_dict: continue target_dict['dependencies_original'] = target_dict.get( 'dependencies', [])[:] # A static library should not depend on another static library unless # the dependency relationship is "hard," which should only be done when # a dependent relies on some side effect other than just the build # product, like a rule or action output. Further, if a target has a # non-hard dependency, but that dependency exports a hard dependency, # the non-hard dependency can safely be removed, but the exported hard # dependency must be added to the target to keep the same dependency # ordering. dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Remove every non-hard static library dependency and remove every # non-static library dependency that isn't a direct dependency. if (dependency_dict['type'] == 'static_library' and \ not dependency_dict.get('hard_dependency', False)) or \ (dependency_dict['type'] != 'static_library' and \ not dependency in target_dict['dependencies']): # Take the dependency out of the list, and don't increment index # because the next dependency to analyze will shift into the index # formerly occupied by the one being removed. del dependencies[index] else: index = index + 1 # Update the dependencies. If the dependencies list is empty, it's not # needed, so unhook it. if len(dependencies) > 0: target_dict['dependencies'] = dependencies else: del target_dict['dependencies'] elif target_type in linkable_types: # Get a list of dependency targets that should be linked into this # target. Add them to the dependencies list if they're not already # present. link_dependencies = \ dependency_nodes[target].DependenciesToLinkAgainst(targets) for dependency in link_dependencies: if dependency == target: continue if not 'dependencies' in target_dict: target_dict['dependencies'] = [] if not dependency in target_dict['dependencies']: target_dict['dependencies'].append(dependency) # Sort the dependencies list in the order from dependents to dependencies. # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D. # Note: flat_list is already sorted in the order from dependencies to # dependents. if sort_dependencies and 'dependencies' in target_dict: target_dict['dependencies'] = [dep for dep in reversed(flat_list) if dep in target_dict['dependencies']] # Initialize this here to speed up MakePathRelative. exception_re = re.compile(r'''["']?[-/$<>^]''') def MakePathRelative(to_file, fro_file, item): # If item is a relative path, it's relative to the build file dict that it's # coming from. Fix it up to make it relative to the build file dict that # it's going into. # Exception: any |item| that begins with these special characters is # returned without modification. # / Used when a path is already absolute (shortcut optimization; # such paths would be returned as absolute anyway) # $ Used for build environment variables # - Used for some build environment flags (such as -lapr-1 in a # "libraries" section) # < Used for our own variable and command expansions (see ExpandVariables) # > Used for our own variable and command expansions (see ExpandVariables) # ^ Used for our own variable and command expansions (see ExpandVariables) # # "/' Used when a value is quoted. If these are present, then we # check the second character instead. # if to_file == fro_file or exception_re.match(item): return item else: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. ret = os.path.normpath(os.path.join( gyp.common.RelativePath(os.path.dirname(fro_file), os.path.dirname(to_file)), item)).replace('\\', '/') if item[-1] == '/': ret += '/' return ret def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True): # Python documentation recommends objects which do not support hash # set this value to None. Python library objects follow this rule. is_hashable = lambda val: val.__hash__ # If x is hashable, returns whether x is in s. Else returns whether x is in l. def is_in_set_or_list(x, s, l): if is_hashable(x): return x in s return x in l prepend_index = 0 # Make membership testing of hashables in |to| (in particular, strings) # faster. hashable_to_set = set(x for x in to if is_hashable(x)) for item in fro: singleton = False if type(item) in (str, int): # The cheap and easy case. if is_paths: to_item = MakePathRelative(to_file, fro_file, item) else: to_item = item if not (type(item) is str and item.startswith('-')): # Any string that doesn't begin with a "-" is a singleton - it can # only appear once in a list, to be enforced by the list merge append # or prepend. singleton = True elif type(item) is dict: # Make a copy of the dictionary, continuing to look for paths to fix. # The other intelligent aspects of merge processing won't apply because # item is being merged into an empty dict. to_item = {} MergeDicts(to_item, item, to_file, fro_file) elif type(item) is list: # Recurse, making a copy of the list. If the list contains any # descendant dicts, path fixing will occur. Note that here, custom # values for is_paths and append are dropped; those are only to be # applied to |to| and |fro|, not sublists of |fro|. append shouldn't # matter anyway because the new |to_item| list is empty. to_item = [] MergeLists(to_item, item, to_file, fro_file) else: raise TypeError( 'Attempt to merge list item of unsupported type ' + \ item.__class__.__name__) if append: # If appending a singleton that's already in the list, don't append. # This ensures that the earliest occurrence of the item will stay put. if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to): to.append(to_item) if is_hashable(to_item): hashable_to_set.add(to_item) else: # If prepending a singleton that's already in the list, remove the # existing instance and proceed with the prepend. This ensures that the # item appears at the earliest possible position in the list. while singleton and to_item in to: to.remove(to_item) # Don't just insert everything at index 0. That would prepend the new # items to the list in reverse order, which would be an unwelcome # surprise. to.insert(prepend_index, to_item) if is_hashable(to_item): hashable_to_set.add(to_item) prepend_index = prepend_index + 1 def MergeDicts(to, fro, to_file, fro_file): # I wanted to name the parameter "from" but it's a Python keyword... for k, v in fro.iteritems(): # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give # copy semantics. Something else may want to merge from the |fro| dict # later, and having the same dict ref pointed to twice in the tree isn't # what anyone wants considering that the dicts may subsequently be # modified. if k in to: bad_merge = False if type(v) in (str, int): if type(to[k]) not in (str, int): bad_merge = True elif type(v) is not type(to[k]): bad_merge = True if bad_merge: raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[k].__class__.__name__ + \ ' for key ' + k) if type(v) in (str, int): # Overwrite the existing value, if any. Cheap and easy. is_path = IsPathSection(k) if is_path: to[k] = MakePathRelative(to_file, fro_file, v) else: to[k] = v elif type(v) is dict: # Recurse, guaranteeing copies will be made of objects that require it. if not k in to: to[k] = {} MergeDicts(to[k], v, to_file, fro_file) elif type(v) is list: # Lists in dicts can be merged with different policies, depending on # how the key in the "from" dict (k, the from-key) is written. # # If the from-key has ...the to-list will have this action # this character appended:... applied when receiving the from-list: # = replace # + prepend # ? set, only if to-list does not yet exist # (none) append # # This logic is list-specific, but since it relies on the associated # dict key, it's checked in this dict-oriented function. ext = k[-1] append = True if ext == '=': list_base = k[:-1] lists_incompatible = [list_base, list_base + '?'] to[list_base] = [] elif ext == '+': list_base = k[:-1] lists_incompatible = [list_base + '=', list_base + '?'] append = False elif ext == '?': list_base = k[:-1] lists_incompatible = [list_base, list_base + '=', list_base + '+'] else: list_base = k lists_incompatible = [list_base + '=', list_base + '?'] # Some combinations of merge policies appearing together are meaningless. # It's stupid to replace and append simultaneously, for example. Append # and prepend are the only policies that can coexist. for list_incompatible in lists_incompatible: if list_incompatible in fro: raise GypError('Incompatible list policies ' + k + ' and ' + list_incompatible) if list_base in to: if ext == '?': # If the key ends in "?", the list will only be merged if it doesn't # already exist. continue elif type(to[list_base]) is not list: # This may not have been checked above if merging in a list with an # extension character. raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[list_base].__class__.__name__ + \ ' for key ' + list_base + '(' + k + ')') else: to[list_base] = [] # Call MergeLists, which will make copies of objects that require it. # MergeLists can recurse back into MergeDicts, although this will be # to make copies of dicts (with paths fixed), there will be no # subsequent dict "merging" once entering a list because lists are # always replaced, appended to, or prepended to. is_paths = IsPathSection(list_base) MergeLists(to[list_base], v, to_file, fro_file, is_paths, append) else: raise TypeError( 'Attempt to merge dict value of unsupported type ' + \ v.__class__.__name__ + ' for key ' + k) def MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, visited): # Skip if previously visted. if configuration in visited: return # Look at this configuration. configuration_dict = target_dict['configurations'][configuration] # Merge in parents. for parent in configuration_dict.get('inherit_from', []): MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, parent, visited + [configuration]) # Merge it into the new config. MergeDicts(new_configuration_dict, configuration_dict, build_file, build_file) # Drop abstract. if 'abstract' in new_configuration_dict: del new_configuration_dict['abstract'] def SetUpConfigurations(target, target_dict): # key_suffixes is a list of key suffixes that might appear on key names. # These suffixes are handled in conditional evaluations (for =, +, and ?) # and rules/exclude processing (for ! and /). Keys with these suffixes # should be treated the same as keys without. key_suffixes = ['=', '+', '?', '!', '/'] build_file = gyp.common.BuildFile(target) # Provide a single configuration by default if none exists. # TODO(mark): Signal an error if default_configurations exists but # configurations does not. if not 'configurations' in target_dict: target_dict['configurations'] = {'Default': {}} if not 'default_configuration' in target_dict: concrete = [i for (i, config) in target_dict['configurations'].iteritems() if not config.get('abstract')] target_dict['default_configuration'] = sorted(concrete)[0] merged_configurations = {} configs = target_dict['configurations'] for (configuration, old_configuration_dict) in configs.iteritems(): # Skip abstract configurations (saves work only). if old_configuration_dict.get('abstract'): continue # Configurations inherit (most) settings from the enclosing target scope. # Get the inheritance relationship right by making a copy of the target # dict. new_configuration_dict = {} for (key, target_val) in target_dict.iteritems(): key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val) # Merge in configuration (with all its parents first). MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, []) merged_configurations[configuration] = new_configuration_dict # Put the new configurations back into the target dict as a configuration. for configuration in merged_configurations.keys(): target_dict['configurations'][configuration] = ( merged_configurations[configuration]) # Now drop all the abstract ones. for configuration in target_dict['configurations'].keys(): old_configuration_dict = target_dict['configurations'][configuration] if old_configuration_dict.get('abstract'): del target_dict['configurations'][configuration] # Now that all of the target's configurations have been built, go through # the target dict's keys and remove everything that's been moved into a # "configurations" section. delete_keys = [] for key in target_dict: key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: delete_keys.append(key) for key in delete_keys: del target_dict[key] # Check the configurations to see if they contain invalid keys. for configuration in target_dict['configurations'].keys(): configuration_dict = target_dict['configurations'][configuration] for key in configuration_dict.keys(): if key in invalid_configuration_keys: raise GypError('%s not allowed in the %s configuration, found in ' 'target %s' % (key, configuration, target)) def ProcessListFiltersInDict(name, the_dict): """Process regular expression and exclusion-based filters on lists. An exclusion list is in a dict key named with a trailing "!", like "sources!". Every item in such a list is removed from the associated main list, which in this example, would be "sources". Removed items are placed into a "sources_excluded" list in the dict. Regular expression (regex) filters are contained in dict keys named with a trailing "/", such as "sources/" to operate on the "sources" list. Regex filters in a dict take the form: 'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'], ['include', '_mac\\.cc$'] ], The first filter says to exclude all files ending in _linux.cc, _mac.cc, and _win.cc. The second filter then includes all files ending in _mac.cc that are now or were once in the "sources" list. Items matching an "exclude" filter are subject to the same processing as would occur if they were listed by name in an exclusion list (ending in "!"). Items matching an "include" filter are brought back into the main list if previously excluded by an exclusion list or exclusion regex filter. Subsequent matching "exclude" patterns can still cause items to be excluded after matching an "include". """ # Look through the dictionary for any lists whose keys end in "!" or "/". # These are lists that will be treated as exclude lists and regular # expression-based exclude/include lists. Collect the lists that are # needed first, looking for the lists that they operate on, and assemble # then into |lists|. This is done in a separate loop up front, because # the _included and _excluded keys need to be added to the_dict, and that # can't be done while iterating through it. lists = [] del_lists = [] for key, value in the_dict.iteritems(): operation = key[-1] if operation != '!' and operation != '/': continue if type(value) is not list: raise ValueError(name + ' key ' + key + ' must be list, not ' + \ value.__class__.__name__) list_key = key[:-1] if list_key not in the_dict: # This happens when there's a list like "sources!" but no corresponding # "sources" list. Since there's nothing for it to operate on, queue up # the "sources!" list for deletion now. del_lists.append(key) continue if type(the_dict[list_key]) is not list: value = the_dict[list_key] raise ValueError(name + ' key ' + list_key + \ ' must be list, not ' + \ value.__class__.__name__ + ' when applying ' + \ {'!': 'exclusion', '/': 'regex'}[operation]) if not list_key in lists: lists.append(list_key) # Delete the lists that are known to be unneeded at this point. for del_list in del_lists: del the_dict[del_list] for list_key in lists: the_list = the_dict[list_key] # Initialize the list_actions list, which is parallel to the_list. Each # item in list_actions identifies whether the corresponding item in # the_list should be excluded, unconditionally preserved (included), or # whether no exclusion or inclusion has been applied. Items for which # no exclusion or inclusion has been applied (yet) have value -1, items # excluded have value 0, and items included have value 1. Includes and # excludes override previous actions. All items in list_actions are # initialized to -1 because no excludes or includes have been processed # yet. list_actions = list((-1,) * len(the_list)) exclude_key = list_key + '!' if exclude_key in the_dict: for exclude_item in the_dict[exclude_key]: for index in xrange(0, len(the_list)): if exclude_item == the_list[index]: # This item matches the exclude_item, so set its action to 0 # (exclude). list_actions[index] = 0 # The "whatever!" list is no longer needed, dump it. del the_dict[exclude_key] regex_key = list_key + '/' if regex_key in the_dict: for regex_item in the_dict[regex_key]: [action, pattern] = regex_item pattern_re = re.compile(pattern) if action == 'exclude': # This item matches an exclude regex, so set its value to 0 (exclude). action_value = 0 elif action == 'include': # This item matches an include regex, so set its value to 1 (include). action_value = 1 else: # This is an action that doesn't make any sense. raise ValueError('Unrecognized action ' + action + ' in ' + name + \ ' key ' + regex_key) for index in xrange(0, len(the_list)): list_item = the_list[index] if list_actions[index] == action_value: # Even if the regex matches, nothing will change so continue (regex # searches are expensive). continue if pattern_re.search(list_item): # Regular expression match. list_actions[index] = action_value # The "whatever/" list is no longer needed, dump it. del the_dict[regex_key] # Add excluded items to the excluded list. # # Note that exclude_key ("sources!") is different from excluded_key # ("sources_excluded"). The exclude_key list is input and it was already # processed and deleted; the excluded_key list is output and it's about # to be created. excluded_key = list_key + '_excluded' if excluded_key in the_dict: raise GypError(name + ' key ' + excluded_key + ' must not be present prior ' ' to applying exclusion/regex filters for ' + list_key) excluded_list = [] # Go backwards through the list_actions list so that as items are deleted, # the indices of items that haven't been seen yet don't shift. That means # that things need to be prepended to excluded_list to maintain them in the # same order that they existed in the_list. for index in xrange(len(list_actions) - 1, -1, -1): if list_actions[index] == 0: # Dump anything with action 0 (exclude). Keep anything with action 1 # (include) or -1 (no include or exclude seen for the item). excluded_list.insert(0, the_list[index]) del the_list[index] # If anything was excluded, put the excluded list into the_dict at # excluded_key. if len(excluded_list) > 0: the_dict[excluded_key] = excluded_list # Now recurse into subdicts and lists that may contain dicts. for key, value in the_dict.iteritems(): if type(value) is dict: ProcessListFiltersInDict(key, value) elif type(value) is list: ProcessListFiltersInList(key, value) def ProcessListFiltersInList(name, the_list): for item in the_list: if type(item) is dict: ProcessListFiltersInDict(name, item) elif type(item) is list: ProcessListFiltersInList(name, item) def ValidateTargetType(target, target_dict): """Ensures the 'type' field on the target is one of the known types. Arguments: target: string, name of target. target_dict: dict, target spec. Raises an exception on error. """ VALID_TARGET_TYPES = ('executable', 'loadable_module', 'static_library', 'shared_library', 'none') target_type = target_dict.get('type', None) if target_type not in VALID_TARGET_TYPES: raise GypError("Target %s has an invalid target type '%s'. " "Must be one of %s." % (target, target_type, '/'.join(VALID_TARGET_TYPES))) if (target_dict.get('standalone_static_library', 0) and not target_type == 'static_library'): raise GypError('Target %s has type %s but standalone_static_library flag is' ' only valid for static_library type.' % (target, target_type)) def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules): """Ensures that the rules sections in target_dict are valid and consistent, and determines which sources they apply to. Arguments: target: string, name of target. target_dict: dict, target spec containing "rules" and "sources" lists. extra_sources_for_rules: a list of keys to scan for rule matches in addition to 'sources'. """ # Dicts to map between values found in rules' 'rule_name' and 'extension' # keys and the rule dicts themselves. rule_names = {} rule_extensions = {} rules = target_dict.get('rules', []) for rule in rules: # Make sure that there's no conflict among rule names and extensions. rule_name = rule['rule_name'] if rule_name in rule_names: raise GypError('rule %s exists in duplicate, target %s' % (rule_name, target)) rule_names[rule_name] = rule rule_extension = rule['extension'] if rule_extension.startswith('.'): rule_extension = rule_extension[1:] if rule_extension in rule_extensions: raise GypError(('extension %s associated with multiple rules, ' + 'target %s rules %s and %s') % (rule_extension, target, rule_extensions[rule_extension]['rule_name'], rule_name)) rule_extensions[rule_extension] = rule # Make sure rule_sources isn't already there. It's going to be # created below if needed. if 'rule_sources' in rule: raise GypError( 'rule_sources must not exist in input, target %s rule %s' % (target, rule_name)) rule_sources = [] source_keys = ['sources'] source_keys.extend(extra_sources_for_rules) for source_key in source_keys: for source in target_dict.get(source_key, []): (source_root, source_extension) = os.path.splitext(source) if source_extension.startswith('.'): source_extension = source_extension[1:] if source_extension == rule_extension: rule_sources.append(source) if len(rule_sources) > 0: rule['rule_sources'] = rule_sources def ValidateRunAsInTarget(target, target_dict, build_file): target_name = target_dict.get('target_name') run_as = target_dict.get('run_as') if not run_as: return if type(run_as) is not dict: raise GypError("The 'run_as' in target %s from file %s should be a " "dictionary." % (target_name, build_file)) action = run_as.get('action') if not action: raise GypError("The 'run_as' in target %s from file %s must have an " "'action' section." % (target_name, build_file)) if type(action) is not list: raise GypError("The 'action' for 'run_as' in target %s from file %s " "must be a list." % (target_name, build_file)) working_directory = run_as.get('working_directory') if working_directory and type(working_directory) is not str: raise GypError("The 'working_directory' for 'run_as' in target %s " "in file %s should be a string." % (target_name, build_file)) environment = run_as.get('environment') if environment and type(environment) is not dict: raise GypError("The 'environment' for 'run_as' in target %s " "in file %s should be a dictionary." % (target_name, build_file)) def ValidateActionsInTarget(target, target_dict, build_file): '''Validates the inputs to the actions in a target.''' target_name = target_dict.get('target_name') actions = target_dict.get('actions', []) for action in actions: action_name = action.get('action_name') if not action_name: raise GypError("Anonymous action in target %s. " "An action must have an 'action_name' field." % target_name) inputs = action.get('inputs', None) if inputs is None: raise GypError('Action in target %s has no inputs.' % target_name) action_command = action.get('action') if action_command and not action_command[0]: raise GypError("Empty action as command in target %s." % target_name) def TurnIntIntoStrInDict(the_dict): """Given dict the_dict, recursively converts all integers into strings. """ # Use items instead of iteritems because there's no need to try to look at # reinserted keys and their associated values. for k, v in the_dict.items(): if type(v) is int: v = str(v) the_dict[k] = v elif type(v) is dict: TurnIntIntoStrInDict(v) elif type(v) is list: TurnIntIntoStrInList(v) if type(k) is int: del the_dict[k] the_dict[str(k)] = v def TurnIntIntoStrInList(the_list): """Given list the_list, recursively converts all integers into strings. """ for index in xrange(0, len(the_list)): item = the_list[index] if type(item) is int: the_list[index] = str(item) elif type(item) is dict: TurnIntIntoStrInDict(item) elif type(item) is list: TurnIntIntoStrInList(item) def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets, data): """Return only the targets that are deep dependencies of |root_targets|.""" qualified_root_targets = [] for target in root_targets: target = target.strip() qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list) if not qualified_targets: raise GypError("Could not find target %s" % target) qualified_root_targets.extend(qualified_targets) wanted_targets = {} for target in qualified_root_targets: wanted_targets[target] = targets[target] for dependency in dependency_nodes[target].DeepDependencies(): wanted_targets[dependency] = targets[dependency] wanted_flat_list = [t for t in flat_list if t in wanted_targets] # Prune unwanted targets from each build_file's data dict. for build_file in data['target_build_files']: if not 'targets' in data[build_file]: continue new_targets = [] for target in data[build_file]['targets']: qualified_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if qualified_name in wanted_targets: new_targets.append(target) data[build_file]['targets'] = new_targets return wanted_targets, wanted_flat_list def VerifyNoCollidingTargets(targets): """Verify that no two targets in the same directory share the same name. Arguments: targets: A list of targets in the form 'path/to/file.gyp:target_name'. """ # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'. used = {} for target in targets: # Separate out 'path/to/file.gyp, 'target_name' from # 'path/to/file.gyp:target_name'. path, name = target.rsplit(':', 1) # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'. subdir, gyp = os.path.split(path) # Use '.' for the current directory '', so that the error messages make # more sense. if not subdir: subdir = '.' # Prepare a key like 'path/to:target_name'. key = subdir + ':' + name if key in used: # Complain if this target is already used. raise GypError('Duplicate target name "%s" in directory "%s" used both ' 'in "%s" and "%s".' % (name, subdir, gyp, used[key])) used[key] = gyp def SetGeneratorGlobals(generator_input_info): # Set up path_sections and non_configuration_keys with the default data plus # the generator-specific data. global path_sections path_sections = set(base_path_sections) path_sections.update(generator_input_info['path_sections']) global non_configuration_keys non_configuration_keys = base_non_configuration_keys[:] non_configuration_keys.extend(generator_input_info['non_configuration_keys']) global multiple_toolsets multiple_toolsets = generator_input_info[ 'generator_supports_multiple_toolsets'] global generator_filelist_paths generator_filelist_paths = generator_input_info['generator_filelist_paths'] def Load(build_files, variables, includes, depth, generator_input_info, check, circular_check, parallel, root_targets): SetGeneratorGlobals(generator_input_info) # A generator can have other lists (in addition to sources) be processed # for rules. extra_sources_for_rules = generator_input_info['extra_sources_for_rules'] # Load build files. This loads every target-containing build file into # the |data| dictionary such that the keys to |data| are build file names, # and the values are the entire build file contents after "early" or "pre" # processing has been done and includes have been resolved. # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps # track of the keys corresponding to "target" files. data = {'target_build_files': set()} # Normalize paths everywhere. This is important because paths will be # used as keys to the data dict and for references between input files. build_files = set(map(os.path.normpath, build_files)) if parallel: LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info) else: aux_data = {} for build_file in build_files: try: LoadTargetBuildFile(build_file, data, aux_data, variables, includes, depth, check, True) except Exception, e: gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file) raise # Build a dict to access each target's subdict by qualified name. targets = BuildTargetsDict(data) # Fully qualify all dependency links. QualifyDependencies(targets) # Remove self-dependencies from targets that have 'prune_self_dependencies' # set to 1. RemoveSelfDependencies(targets) # Expand dependencies specified as build_file:*. ExpandWildcardDependencies(targets, data) # Remove all dependencies marked as 'link_dependency' from the targets of # type 'none'. RemoveLinkDependenciesFromNoneTargets(targets) # Apply exclude (!) and regex (/) list filters only for dependency_sections. for target_name, target_dict in targets.iteritems(): tmp_dict = {} for key_base in dependency_sections: for op in ('', '!', '/'): key = key_base + op if key in target_dict: tmp_dict[key] = target_dict[key] del target_dict[key] ProcessListFiltersInDict(target_name, tmp_dict) # Write the results back to |target_dict|. for key in tmp_dict: target_dict[key] = tmp_dict[key] # Make sure every dependency appears at most once. RemoveDuplicateDependencies(targets) if circular_check: # Make sure that any targets in a.gyp don't contain dependencies in other # .gyp files that further depend on a.gyp. VerifyNoGYPFileCircularDependencies(targets) [dependency_nodes, flat_list] = BuildDependencyList(targets) if root_targets: # Remove, from |targets| and |flat_list|, the targets that are not deep # dependencies of the targets specified in |root_targets|. targets, flat_list = PruneUnwantedTargets( targets, flat_list, dependency_nodes, root_targets, data) # Check that no two targets in the same directory have the same name. VerifyNoCollidingTargets(flat_list) # Handle dependent settings of various types. for settings_type in ['all_dependent_settings', 'direct_dependent_settings', 'link_settings']: DoDependentSettings(settings_type, flat_list, targets, dependency_nodes) # Take out the dependent settings now that they've been published to all # of the targets that require them. for target in flat_list: if settings_type in targets[target]: del targets[target][settings_type] # Make sure static libraries don't declare dependencies on other static # libraries, but that linkables depend on all unlinked static libraries # that they need so that their link steps will be correct. gii = generator_input_info if gii['generator_wants_static_library_dependencies_adjusted']: AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, gii['generator_wants_sorted_dependencies']) # Apply "post"/"late"/"target" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATE, variables, build_file) # Move everything that can go into a "configurations" section into one. for target in flat_list: target_dict = targets[target] SetUpConfigurations(target, target_dict) # Apply exclude (!) and regex (/) list filters. for target in flat_list: target_dict = targets[target] ProcessListFiltersInDict(target, target_dict) # Apply "latelate" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATELATE, variables, build_file) # Make sure that the rules make sense, and build up rule_sources lists as # needed. Not all generators will need to use the rule_sources lists, but # some may, and it seems best to build the list in a common spot. # Also validate actions and run_as elements in targets. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ValidateTargetType(target, target_dict) ValidateRulesInTarget(target, target_dict, extra_sources_for_rules) ValidateRunAsInTarget(target, target_dict, build_file) ValidateActionsInTarget(target, target_dict, build_file) # Generators might not expect ints. Turn them into strs. TurnIntIntoStrInDict(data) # TODO(mark): Return |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. return [flat_list, targets, data]
mit
ATIX-AG/foreman-ansible-modules
plugins/modules/setting.py
1
3129
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018 Matthias M Dellweg (ATIX AG) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: setting version_added: 1.0.0 short_description: Manage Settings description: - Manage Settings author: - "Matthias M Dellweg (@mdellweg) ATIX AG" options: name: description: - Name of the Setting required: true type: str value: description: - value to set the Setting to - if missing, reset to default required: false type: raw extends_documentation_fragment: - theforeman.foreman.foreman ''' EXAMPLES = ''' - name: "Set a Setting" theforeman.foreman.setting: username: "admin" password: "changeme" server_url: "https://foreman.example.com" name: "http_proxy" value: "http://localhost:8088" - name: "Reset a Setting" theforeman.foreman.setting: username: "admin" password: "changeme" server_url: "https://foreman.example.com" name: "http_proxy" ''' RETURN = ''' foreman_setting: description: Created / Updated state of the setting returned: success type: dict ''' from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import ForemanStatelessEntityAnsibleModule, parameter_value_to_str class ForemanSettingModule(ForemanStatelessEntityAnsibleModule): pass def main(): module = ForemanSettingModule( foreman_spec=dict( name=dict(required=True), value=dict(type='raw'), ), ) with module.api_connection(): entity = module.lookup_entity('entity') if 'value' not in module.foreman_params: module.foreman_params['value'] = entity['default'] or '' settings_type = entity['settings_type'] new_value = module.foreman_params['value'] # Allow to pass integers as string if settings_type == 'integer': new_value = int(new_value) module.foreman_params['value'] = parameter_value_to_str(new_value, settings_type) old_value = entity['value'] entity['value'] = parameter_value_to_str(old_value, settings_type) entity = module.ensure_entity('settings', module.foreman_params, entity, state='present') if entity: # Fake the not serialized input value as output entity['value'] = new_value module.exit_json(foreman_setting=entity) if __name__ == '__main__': main()
gpl-3.0
2014c2g23/2015cda
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_skipping.py
744
5173
import unittest from .support import LoggingResult class Test_TestSkipping(unittest.TestCase): def test_skipping(self): class Foo(unittest.TestCase): def test_skip_me(self): self.skipTest("skip") events = [] result = LoggingResult(events) test = Foo("test_skip_me") test.run(result) self.assertEqual(events, ['startTest', 'addSkip', 'stopTest']) self.assertEqual(result.skipped, [(test, "skip")]) # Try letting setUp skip the test now. class Foo(unittest.TestCase): def setUp(self): self.skipTest("testing") def test_nothing(self): pass events = [] result = LoggingResult(events) test = Foo("test_nothing") test.run(result) self.assertEqual(events, ['startTest', 'addSkip', 'stopTest']) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(result.testsRun, 1) def test_skipping_decorators(self): op_table = ((unittest.skipUnless, False, True), (unittest.skipIf, True, False)) for deco, do_skip, dont_skip in op_table: class Foo(unittest.TestCase): @deco(do_skip, "testing") def test_skip(self): pass @deco(dont_skip, "testing") def test_dont_skip(self): pass test_do_skip = Foo("test_skip") test_dont_skip = Foo("test_dont_skip") suite = unittest.TestSuite([test_do_skip, test_dont_skip]) events = [] result = LoggingResult(events) suite.run(result) self.assertEqual(len(result.skipped), 1) expected = ['startTest', 'addSkip', 'stopTest', 'startTest', 'addSuccess', 'stopTest'] self.assertEqual(events, expected) self.assertEqual(result.testsRun, 2) self.assertEqual(result.skipped, [(test_do_skip, "testing")]) self.assertTrue(result.wasSuccessful()) def test_skip_class(self): @unittest.skip("testing") class Foo(unittest.TestCase): def test_1(self): record.append(1) record = [] result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(record, []) def test_skip_non_unittest_class(self): @unittest.skip("testing") class Mixin: def test_1(self): record.append(1) class Foo(Mixin, unittest.TestCase): pass record = [] result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertEqual(record, []) def test_expected_failure(self): class Foo(unittest.TestCase): @unittest.expectedFailure def test_die(self): self.fail("help me!") events = [] result = LoggingResult(events) test = Foo("test_die") test.run(result) self.assertEqual(events, ['startTest', 'addExpectedFailure', 'stopTest']) self.assertEqual(result.expectedFailures[0][0], test) self.assertTrue(result.wasSuccessful()) def test_unexpected_success(self): class Foo(unittest.TestCase): @unittest.expectedFailure def test_die(self): pass events = [] result = LoggingResult(events) test = Foo("test_die") test.run(result) self.assertEqual(events, ['startTest', 'addUnexpectedSuccess', 'stopTest']) self.assertFalse(result.failures) self.assertEqual(result.unexpectedSuccesses, [test]) self.assertTrue(result.wasSuccessful()) def test_skip_doesnt_run_setup(self): class Foo(unittest.TestCase): wasSetUp = False wasTornDown = False def setUp(self): Foo.wasSetUp = True def tornDown(self): Foo.wasTornDown = True @unittest.skip('testing') def test_1(self): pass result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")]) self.assertFalse(Foo.wasSetUp) self.assertFalse(Foo.wasTornDown) def test_decorated_skip(self): def decorator(func): def inner(*a): return func(*a) return inner class Foo(unittest.TestCase): @decorator @unittest.skip('testing') def test_1(self): pass result = unittest.TestResult() test = Foo("test_1") suite = unittest.TestSuite([test]) suite.run(result) self.assertEqual(result.skipped, [(test, "testing")])
gpl-3.0
Bitl/RBXLegacy-src
Cut/RBXLegacyDiscordBot/lib/requests/cookies.py
109
18208
# -*- coding: utf-8 -*- """ requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import copy import time import calendar import collections from ._internal_utils import to_native_string from .compat import cookielib, urlparse, urlunparse, Morsel try: import threading except ImportError: import dummy_threading as threading class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain host = to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query, parsed.fragment ]) def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() @property def origin_req_host(self): return self.get_origin_req_host() @property def host(self): return self.get_host() class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `cookielib` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ if not (hasattr(response, '_original_response') and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """ Produce an appropriate Cookie header string to be sent with `request`, or None. :rtype: str """ r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name != name: continue if domain is not None and domain != cookie.domain: continue if path is not None and path != cookie.path: continue clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific. """ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Requests does not use the dict interface internally; it's just for compatibility with external client code. All requests code should work out of the box with externally provided instances of ``CookieJar``, e.g. ``LWPCookieJar`` and ``FileCookieJar``. Unlike a regular CookieJar, this class is pickleable. .. warning:: dictionary operations that are normally O(1) may be O(n). """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1). """ try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def iterkeys(self): """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. .. seealso:: itervalues() and iteritems(). """ for cookie in iter(self): yield cookie.name def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. .. seealso:: values() and items(). """ return list(self.iterkeys()) def itervalues(self): """Dict-like itervalues() that returns an iterator of values of cookies from the jar. .. seealso:: iterkeys() and iteritems(). """ for cookie in iter(self): yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. .. seealso:: keys() and items(). """ return list(self.itervalues()) def iteritems(self): """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. .. seealso:: iterkeys() and itervalues(). """ for cookie in iter(self): yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs. .. seealso:: keys() and values(). """ return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool """ domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements. :rtype: dict """ dictionary = {} for cookie in iter(self): if ( (domain is None or cookie.domain == domain) and (path is None or cookie.path == path) ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: return super(RequestsCookieJar, self).__contains__(name) except CookieConflictError: return True def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1). """ return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead. """ self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s ``remove_cookie_by_name()``. """ remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): cookie.value = cookie.value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :return: cookie.value """ for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never used elsewhere in Requests. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :raises KeyError: if cookie is not found :raises CookieConflictError: if there are multiple cookies that match name and optionally domain and path :return: cookie.value """ toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError('There are multiple cookies with name, %r' % (name)) toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """Return a copy of this RequestsCookieJar.""" new_cj = RequestsCookieJar() new_cj.update(self) return new_cj def _copy_cookie_jar(jar): if jar is None: return None if hasattr(jar, 'copy'): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance new_jar = copy.copy(jar) new_jar.clear() for cookie in jar: new_jar.set_cookie(copy.copy(cookie)) return new_jar def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = dict( version=0, name=name, value=value, port=None, domain='', path='/', secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,) badargs = set(kwargs) - set(result) if badargs: err = 'create_cookie() got unexpected keyword arguments: %s' raise TypeError(err % list(badargs)) result.update(kwargs) result['port_specified'] = bool(result['port']) result['domain_specified'] = bool(result['domain']) result['domain_initial_dot'] = result['domain'].startswith('.') result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel['max-age']: try: expires = int(time.time() + int(morsel['max-age'])) except ValueError: raise TypeError('max-age: %s must be integer' % morsel['max-age']) elif morsel['expires']: time_template = '%a, %d-%b-%Y %H:%M:%S GMT' expires = calendar.timegm( time.strptime(morsel['expires'], time_template) ) return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar def merge_cookies(cookiejar, cookies): """Add cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError('You can only merge into CookieJar') if isinstance(cookies, dict): cookiejar = cookiejar_from_dict( cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) except AttributeError: for cookie_in_jar in cookies: cookiejar.set_cookie(cookie_in_jar) return cookiejar
gpl-3.0
Carreau/pytest
testing/test_recwarn.py
3
2465
import py, pytest from _pytest.recwarn import WarningsRecorder def test_WarningRecorder(recwarn): showwarning = py.std.warnings.showwarning rec = WarningsRecorder() assert py.std.warnings.showwarning != showwarning assert not rec.list py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 py.std.warnings.warn(DeprecationWarning("hello")) assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" l = rec.list rec.clear() assert len(rec.list) == 0 assert l is rec.list pytest.raises(AssertionError, "rec.pop()") rec.finalize() assert showwarning == py.std.warnings.showwarning def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" import warnings oldwarn = warnings.showwarning def test_method(recwarn): assert warnings.showwarning != oldwarn warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) def test_finalized(): assert warnings.showwarning == oldwarn """) res = reprec.countoutcomes() assert tuple(res) == (2, 0, 0), res # # ============ test pytest.deprecated_call() ============== # def dep(i): if i == 0: py.std.warnings.warn("is deprecated", DeprecationWarning) return 42 reg = {} def dep_explicit(i): if i == 0: py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning, filename="hello", lineno=3) def test_deprecated_call_raises(): excinfo = pytest.raises(AssertionError, "pytest.deprecated_call(dep, 3)") assert str(excinfo).find("did not produce") != -1 def test_deprecated_call(): pytest.deprecated_call(dep, 0) def test_deprecated_call_ret(): ret = pytest.deprecated_call(dep, 0) assert ret == 42 def test_deprecated_call_preserves(): r = py.std.warnings.onceregistry.copy() f = py.std.warnings.filters[:] test_deprecated_call_raises() test_deprecated_call() assert r == py.std.warnings.onceregistry assert f == py.std.warnings.filters def test_deprecated_explicit_call_raises(): pytest.raises(AssertionError, "pytest.deprecated_call(dep_explicit, 3)") def test_deprecated_explicit_call(): pytest.deprecated_call(dep_explicit, 0) pytest.deprecated_call(dep_explicit, 0)
mit
0111001101111010/open-health-inspection-api
venv/lib/python2.7/site-packages/flask/logging.py
838
1398
# -*- coding: utf-8 -*- """ flask.logging ~~~~~~~~~~~~~ Implements the logging support for Flask. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG def create_logger(app): """Creates a logger for the given application. This logger works similar to a regular Python logger but changes the effective logging level based on the application's debug flag. Furthermore this function also removes all attached handlers in case there was a logger with the log name before. """ Logger = getLoggerClass() class DebugLogger(Logger): def getEffectiveLevel(x): if x.level == 0 and app.debug: return DEBUG return Logger.getEffectiveLevel(x) class DebugHandler(StreamHandler): def emit(x, record): StreamHandler.emit(x, record) if app.debug else None handler = DebugHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(app.debug_log_format)) logger = getLogger(app.logger_name) # just in case that was not a new logger, get rid of all the handlers # already attached to it. del logger.handlers[:] logger.__class__ = DebugLogger logger.addHandler(handler) return logger
gpl-2.0
xhchrn/gegan
model/utils.py
1
3045
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import import os import glob import math import imageio import scipy.misc as misc import numpy as np from PIL import Image def pad_seq(seq, batch_size): # pad the sequence to be the multiples of batch_size seq_len = len(seq) if seq_len % batch_size == 0: return seq padded = batch_size - (seq_len % batch_size) seq.extend(seq[:padded]) return seq def normalize_image(img): """ Make image zero centered and in between (-1, 1) """ normalized = (img / 127.5) - 1. return normalized def denormalize_image(img): deimg = (img + 1) * 127.5 return np.clip(deimg, 0.0, 255.0) def read_split_image(img): mat = misc.imread(img).astype(np.float) side = int(mat.shape[1] / 2) assert side * 2 == mat.shape[1] img_A = mat[:, :side] # target img_B = mat[:, side:] # source return img_A, img_B def shift_and_resize_image(img, shift_x, shift_y, nw, nh): w, h, _ = img.shape enlarged = misc.imresize(img, [nw, nh]) return enlarged[shift_x:shift_x + w, shift_y:shift_y + h] def scale_back(images): return (images + 1.) / 2. def merge(images, size): h, w = images.shape[1], images.shape[2] img = np.zeros((h * size[0], w * size[1], 3)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img def save_concat_images(imgs, img_path): concated = np.concatenate(imgs, axis=1) misc.imsave(img_path, concated) def compile_frames_to_gif(frame_dir, gif_file): frames = sorted(glob.glob(os.path.join(frame_dir, "*.png"))) print(frames) images = [misc.imresize(imageio.imread(f), interp='nearest', size=0.33) for f in frames] imageio.mimsave(gif_file, images, duration=0.1) return gif_file def make_grid(tensor, nrow=8, padding=2, normalize=False, scale_each=False): """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py""" nmaps = tensor.shape[0] xmaps = min(nrow, nmaps) ymaps = int(math.ceil(float(nmaps) / xmaps)) height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding) grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8) k = 0 for y in range(ymaps): for x in range(xmaps): if k >= nmaps: break h, h_width = y * height + 1 + padding // 2, height - padding w, w_width = x * width + 1 + padding // 2, width - padding grid[h:h+h_width, w:w+w_width] = tensor[k] k = k + 1 return grid def save_image(tensor, filename, nrow=8, padding=2, normalize=False, scale_each=False): ndarr = make_grid(tensor, nrow=nrow, padding=padding, normalize=normalize, scale_each=scale_each) im = Image.fromarray(ndarr) im.save(filename)
apache-2.0
Ninjakow/TrueSkill
lib/numpy/core/tests/test_dtype.py
30
25540
from __future__ import division, absolute_import, print_function import sys import numpy as np from numpy.core.test_rational import rational from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, dec ) def assert_dtype_equal(a, b): assert_equal(a, b) assert_equal(hash(a), hash(b), "two equivalent types do not hash to the same value !") def assert_dtype_not_equal(a, b): assert_(a != b) assert_(hash(a) != hash(b), "two different types hash to the same value !") class TestBuiltin(TestCase): def test_run(self): """Only test hash runs at all.""" for t in [np.int, np.float, np.complex, np.int32, np.str, np.object, np.unicode]: dt = np.dtype(t) hash(dt) def test_dtype(self): # Make sure equivalent byte order char hash the same (e.g. < and = on # little endian) for t in [np.int, np.float]: dt = np.dtype(t) dt2 = dt.newbyteorder("<") dt3 = dt.newbyteorder(">") if dt == dt2: self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test") assert_dtype_equal(dt, dt2) else: self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") assert_dtype_equal(dt, dt3) def test_equivalent_dtype_hashing(self): # Make sure equivalent dtypes with different type num hash equal uintp = np.dtype(np.uintp) if uintp.itemsize == 4: left = uintp right = np.dtype(np.uint32) else: left = uintp right = np.dtype(np.ulonglong) self.assertTrue(left == right) self.assertTrue(hash(left) == hash(right)) def test_invalid_types(self): # Make sure invalid type strings raise an error assert_raises(TypeError, np.dtype, 'O3') assert_raises(TypeError, np.dtype, 'O5') assert_raises(TypeError, np.dtype, 'O7') assert_raises(TypeError, np.dtype, 'b3') assert_raises(TypeError, np.dtype, 'h4') assert_raises(TypeError, np.dtype, 'I5') assert_raises(TypeError, np.dtype, 'e3') assert_raises(TypeError, np.dtype, 'f5') if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: assert_raises(TypeError, np.dtype, 'g12') elif np.dtype('g').itemsize == 12: assert_raises(TypeError, np.dtype, 'g16') if np.dtype('l').itemsize == 8: assert_raises(TypeError, np.dtype, 'l4') assert_raises(TypeError, np.dtype, 'L4') else: assert_raises(TypeError, np.dtype, 'l8') assert_raises(TypeError, np.dtype, 'L8') if np.dtype('q').itemsize == 8: assert_raises(TypeError, np.dtype, 'q4') assert_raises(TypeError, np.dtype, 'Q4') else: assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') def test_bad_param(self): # Can't give a size that's too small assert_raises(ValueError, np.dtype, {'names':['f0', 'f1'], 'formats':['i4', 'i1'], 'offsets':[0, 4], 'itemsize':4}) # If alignment is enabled, the alignment (4) must divide the itemsize assert_raises(ValueError, np.dtype, {'names':['f0', 'f1'], 'formats':['i4', 'i1'], 'offsets':[0, 4], 'itemsize':9}, align=True) # If alignment is enabled, the individual fields must be aligned assert_raises(ValueError, np.dtype, {'names':['f0', 'f1'], 'formats':['i1', 'f4'], 'offsets':[0, 2]}, align=True) class TestRecord(TestCase): def test_equivalent_record(self): """Test whether equivalent record dtypes hash the same.""" a = np.dtype([('yo', np.int)]) b = np.dtype([('yo', np.int)]) assert_dtype_equal(a, b) def test_different_names(self): # In theory, they may hash the same (collision) ? a = np.dtype([('yo', np.int)]) b = np.dtype([('ye', np.int)]) assert_dtype_not_equal(a, b) def test_different_titles(self): # In theory, they may hash the same (collision) ? a = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['Red pixel', 'Blue pixel']}) b = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['RRed pixel', 'Blue pixel']}) assert_dtype_not_equal(a, b) def test_mutate(self): # Mutating a dtype should reset the cached hash value a = np.dtype([('yo', np.int)]) b = np.dtype([('yo', np.int)]) c = np.dtype([('ye', np.int)]) assert_dtype_equal(a, b) assert_dtype_not_equal(a, c) a.names = ['ye'] assert_dtype_equal(a, c) assert_dtype_not_equal(a, b) state = b.__reduce__()[2] a.__setstate__(state) assert_dtype_equal(a, b) assert_dtype_not_equal(a, c) def test_not_lists(self): """Test if an appropriate exception is raised when passing bad values to the dtype constructor. """ self.assertRaises(TypeError, np.dtype, dict(names=set(['A', 'B']), formats=['f8', 'i4'])) self.assertRaises(TypeError, np.dtype, dict(names=['A', 'B'], formats=set(['f8', 'i4']))) def test_aligned_size(self): # Check that structured dtypes get padded to an aligned size dt = np.dtype('i4, i1', align=True) assert_equal(dt.itemsize, 8) dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) assert_equal(dt.itemsize, 8) dt = np.dtype({'names':['f0', 'f1'], 'formats':['i4', 'u1'], 'offsets':[0, 4]}, align=True) assert_equal(dt.itemsize, 8) dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) assert_equal(dt.itemsize, 8) # Nesting should preserve that alignment dt1 = np.dtype([('f0', 'i4'), ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=True) assert_equal(dt1.itemsize, 20) dt2 = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], 'offsets':[0, 4, 16]}, align=True) assert_equal(dt2.itemsize, 20) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), 'f2': ('i1', 16)}, align=True) assert_equal(dt3.itemsize, 20) assert_equal(dt1, dt2) assert_equal(dt2, dt3) # Nesting should preserve packing dt1 = np.dtype([('f0', 'i4'), ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=False) assert_equal(dt1.itemsize, 11) dt2 = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], 'offsets':[0, 4, 10]}, align=False) assert_equal(dt2.itemsize, 11) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), 'f2': ('i1', 10)}, align=False) assert_equal(dt3.itemsize, 11) assert_equal(dt1, dt2) assert_equal(dt2, dt3) def test_union_struct(self): # Should be able to create union dtypes dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], 'offsets':[0, 0, 2]}, align=True) assert_equal(dt.itemsize, 4) a = np.array([3], dtype='<u4').view(dt) a['f1'] = 10 a['f2'] = 36 assert_equal(a['f0'], 10 + 36*256*256) # Should be able to specify fields out of order dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], 'offsets':[4, 0, 2]}, align=True) assert_equal(dt.itemsize, 8) dt2 = np.dtype({'names':['f2', 'f0', 'f1'], 'formats':['<u2', '<u4', '<u2'], 'offsets':[2, 4, 0]}, align=True) vals = [(0, 1, 2), (3, -1, 4)] vals2 = [(2, 0, 1), (4, 3, -1)] a = np.array(vals, dt) b = np.array(vals2, dt2) assert_equal(a.astype(dt2), b) assert_equal(b.astype(dt), a) assert_equal(a.view(dt2), b) assert_equal(b.view(dt), a) # Should not be able to overlap objects with other types assert_raises(TypeError, np.dtype, {'names':['f0', 'f1'], 'formats':['O', 'i1'], 'offsets':[0, 2]}) assert_raises(TypeError, np.dtype, {'names':['f0', 'f1'], 'formats':['i4', 'O'], 'offsets':[0, 3]}) assert_raises(TypeError, np.dtype, {'names':['f0', 'f1'], 'formats':[[('a', 'O')], 'i1'], 'offsets':[0, 2]}) assert_raises(TypeError, np.dtype, {'names':['f0', 'f1'], 'formats':['i4', [('a', 'O')]], 'offsets':[0, 3]}) # Out of order should still be ok, however dt = np.dtype({'names':['f0', 'f1'], 'formats':['i1', 'O'], 'offsets':[np.dtype('intp').itemsize, 0]}) def test_comma_datetime(self): dt = np.dtype('M8[D],datetime64[Y],i8') assert_equal(dt, np.dtype([('f0', 'M8[D]'), ('f1', 'datetime64[Y]'), ('f2', 'i8')])) def test_from_dictproxy(self): # Tests for PR #5920 dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']}) assert_dtype_equal(dt, np.dtype(dt.fields)) dt2 = np.dtype((np.void, dt.fields)) assert_equal(dt2.fields, dt.fields) def test_from_dict_with_zero_width_field(self): # Regression test for #6430 / #2196 dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)]) dt2 = np.dtype({'names': ['val1', 'val2'], 'formats': [(np.float32, (0,)), int]}) assert_dtype_equal(dt, dt2) assert_equal(dt.fields['val1'][0].itemsize, 0) assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize) def test_bool_commastring(self): d = np.dtype('?,?,?') # raises? assert_equal(len(d.names), 3) for n in d.names: assert_equal(d.fields[n][0], np.dtype('?')) def test_nonint_offsets(self): # gh-8059 def make_dtype(off): return np.dtype({'names': ['A'], 'formats': ['i4'], 'offsets': [off]}) assert_raises(TypeError, make_dtype, 'ASD') assert_raises(OverflowError, make_dtype, 2**70) assert_raises(TypeError, make_dtype, 2.3) assert_raises(ValueError, make_dtype, -10) # no errors here: dt = make_dtype(np.uint32(0)) np.zeros(1, dtype=dt)[0].item() class TestSubarray(TestCase): def test_single_subarray(self): a = np.dtype((np.int, (2))) b = np.dtype((np.int, (2,))) assert_dtype_equal(a, b) assert_equal(type(a.subdtype[1]), tuple) assert_equal(type(b.subdtype[1]), tuple) def test_equivalent_record(self): """Test whether equivalent subarray dtypes hash the same.""" a = np.dtype((np.int, (2, 3))) b = np.dtype((np.int, (2, 3))) assert_dtype_equal(a, b) def test_nonequivalent_record(self): """Test whether different subarray dtypes hash differently.""" a = np.dtype((np.int, (2, 3))) b = np.dtype((np.int, (3, 2))) assert_dtype_not_equal(a, b) a = np.dtype((np.int, (2, 3))) b = np.dtype((np.int, (2, 2))) assert_dtype_not_equal(a, b) a = np.dtype((np.int, (1, 2, 3))) b = np.dtype((np.int, (1, 2))) assert_dtype_not_equal(a, b) def test_shape_equal(self): """Test some data types that are equal""" assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple()))) assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1))) assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,)))) assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2)))) d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2)) assert_dtype_equal(np.dtype(d), np.dtype(d)) def test_shape_simple(self): """Test some simple cases that shouldn't be equal""" assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,)))) assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1)))) assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3)))) def test_shape_monster(self): """Test some more complicated cases that shouldn't be equal""" assert_dtype_not_equal( np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2)))) assert_dtype_not_equal( np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2)))) assert_dtype_not_equal( np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2)))) assert_dtype_not_equal( np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2)))) def test_shape_sequence(self): # Any sequence of integers should work as shape, but the result # should be a tuple (immutable) of base type integers. a = np.array([1, 2, 3], dtype=np.int16) l = [1, 2, 3] # Array gets converted dt = np.dtype([('a', 'f4', a)]) assert_(isinstance(dt['a'].shape, tuple)) assert_(isinstance(dt['a'].shape[0], int)) # List gets converted dt = np.dtype([('a', 'f4', l)]) assert_(isinstance(dt['a'].shape, tuple)) # class IntLike(object): def __index__(self): return 3 def __int__(self): # (a PyNumber_Check fails without __int__) return 3 dt = np.dtype([('a', 'f4', IntLike())]) assert_(isinstance(dt['a'].shape, tuple)) assert_(isinstance(dt['a'].shape[0], int)) dt = np.dtype([('a', 'f4', (IntLike(),))]) assert_(isinstance(dt['a'].shape, tuple)) assert_(isinstance(dt['a'].shape[0], int)) def test_shape_invalid(self): # Check that the shape is valid. max_int = np.iinfo(np.intc).max max_intp = np.iinfo(np.intp).max # Too large values (the datatype is part of this) assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)]) assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)]) assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))]) # Takes a different code path (fails earlier: assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)]) # Negative values assert_raises(ValueError, np.dtype, [('a', 'f4', -1)]) assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))]) def test_alignment(self): #Check that subarrays are aligned t1 = np.dtype('1i4', align=True) t2 = np.dtype('2i4', align=True) assert_equal(t1.alignment, t2.alignment) class TestMonsterType(TestCase): """Test deeply nested subtypes.""" def test1(self): simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['Red pixel', 'Blue pixel']}) a = np.dtype([('yo', np.int), ('ye', simple1), ('yi', np.dtype((np.int, (3, 2))))]) b = np.dtype([('yo', np.int), ('ye', simple1), ('yi', np.dtype((np.int, (3, 2))))]) assert_dtype_equal(a, b) c = np.dtype([('yo', np.int), ('ye', simple1), ('yi', np.dtype((a, (3, 2))))]) d = np.dtype([('yo', np.int), ('ye', simple1), ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) class TestMetadata(TestCase): def test_no_metadata(self): d = np.dtype(int) self.assertEqual(d.metadata, None) def test_metadata_takes_dict(self): d = np.dtype(int, metadata={'datum': 1}) self.assertEqual(d.metadata, {'datum': 1}) def test_metadata_rejects_nondict(self): self.assertRaises(TypeError, np.dtype, int, metadata='datum') self.assertRaises(TypeError, np.dtype, int, metadata=1) self.assertRaises(TypeError, np.dtype, int, metadata=None) def test_nested_metadata(self): d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) self.assertEqual(d['a'].metadata, {'datum': 1}) def base_metadata_copied(self): d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) assert_equal(d.metadata, {'datum': 1}) class TestString(TestCase): def test_complex_dtype_str(self): dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), ('rtile', '>f4', (64, 36))], (3,)), ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), ('bright', '>f4', (8, 36))])]) assert_equal(str(dt), "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " "('rtile', '>f4', (64, 36))], (3,)), " "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " "('bright', '>f4', (8, 36))])]") # If the sticky aligned flag is set to True, it makes the # str() function use a dict representation with an 'aligned' flag dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), ('rtile', '>f4', (64, 36))], (3,)), ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), ('bright', '>f4', (8, 36))])], align=True) assert_equal(str(dt), "{'names':['top','bottom'], " "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " "('rtile', '>f4', (64, 36))], (3,))," "[('bleft', ('>f4', (8, 64)), (1,)), " "('bright', '>f4', (8, 36))]], " "'offsets':[0,76800], " "'itemsize':80000, " "'aligned':True}") assert_equal(np.dtype(eval(str(dt))), dt) dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], 'offsets': [0, 1, 2], 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) assert_equal(str(dt), "[(('Red pixel', 'r'), 'u1'), " "(('Green pixel', 'g'), 'u1'), " "(('Blue pixel', 'b'), 'u1')]") dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], 'formats': ['<u4', 'u1', 'u1', 'u1'], 'offsets': [0, 0, 1, 2], 'titles': ['Color', 'Red pixel', 'Green pixel', 'Blue pixel']}) assert_equal(str(dt), "{'names':['rgba','r','g','b']," " 'formats':['<u4','u1','u1','u1']," " 'offsets':[0,0,1,2]," " 'titles':['Color','Red pixel'," "'Green pixel','Blue pixel']," " 'itemsize':4}") dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel']}) assert_equal(str(dt), "{'names':['r','b']," " 'formats':['u1','u1']," " 'offsets':[0,2]," " 'titles':['Red pixel','Blue pixel']," " 'itemsize':3}") dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')]) assert_equal(str(dt), "[('a', '<m8[D]'), ('b', '<M8[us]')]") def test_complex_dtype_repr(self): dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), ('rtile', '>f4', (64, 36))], (3,)), ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), ('bright', '>f4', (8, 36))])]) assert_equal(repr(dt), "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " "('rtile', '>f4', (64, 36))], (3,)), " "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " "('bright', '>f4', (8, 36))])])") dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], 'offsets': [0, 1, 2], 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, align=True) assert_equal(repr(dt), "dtype([(('Red pixel', 'r'), 'u1'), " "(('Green pixel', 'g'), 'u1'), " "(('Blue pixel', 'b'), 'u1')], align=True)") dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], 'formats': ['<u4', 'u1', 'u1', 'u1'], 'offsets': [0, 0, 1, 2], 'titles': ['Color', 'Red pixel', 'Green pixel', 'Blue pixel']}, align=True) assert_equal(repr(dt), "dtype({'names':['rgba','r','g','b']," " 'formats':['<u4','u1','u1','u1']," " 'offsets':[0,0,1,2]," " 'titles':['Color','Red pixel'," "'Green pixel','Blue pixel']," " 'itemsize':4}, align=True)") dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel'], 'itemsize': 4}) assert_equal(repr(dt), "dtype({'names':['r','b'], " "'formats':['u1','u1'], " "'offsets':[0,2], " "'titles':['Red pixel','Blue pixel'], " "'itemsize':4})") dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')]) assert_equal(repr(dt), "dtype([('a', '<M8[D]'), ('b', '<m8[us]')])") @dec.skipif(sys.version_info[0] >= 3) def test_dtype_str_with_long_in_shape(self): # Pull request #376, should not error np.dtype('(1L,)i4') def test_base_dtype_with_object_type(self): # Issue gh-2798, should not error. np.array(['a'], dtype="O").astype(("O", [("name", "O")])) def test_empty_string_to_object(self): # Pull request #4722 np.array(["", ""]).astype(object) class TestDtypeAttributeDeletion(TestCase): def test_dtype_non_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = ["subdtype", "descr", "str", "name", "base", "shape", "isbuiltin", "isnative", "isalignedstruct", "fields", "metadata", "hasobject"] for s in attr: assert_raises(AttributeError, delattr, dt, s) def test_dtype_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = ["names"] for s in attr: assert_raises(AttributeError, delattr, dt, s) class TestDtypeAttributes(TestCase): def test_descr_has_trailing_void(self): # see gh-6359 dtype = np.dtype({ 'names': ['A', 'B'], 'formats': ['f4', 'f4'], 'offsets': [0, 8], 'itemsize': 16}) new_dtype = np.dtype(dtype.descr) assert_equal(new_dtype.itemsize, 16) class TestDtypeAttributes(TestCase): def test_name_builtin(self): for t in np.typeDict.values(): name = t.__name__ if name.endswith('_'): name = name[:-1] assert_equal(np.dtype(t).name, name) def test_name_dtype_subclass(self): # Ticket #4357 class user_def_subcls(np.void): pass assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls') def test_rational_dtype(): # test for bug gh-5719 a = np.array([1111], dtype=rational).astype assert_raises(OverflowError, a, 'int8') # test that dtype detection finds user-defined types x = rational(1) assert_equal(np.array([x,x]).dtype, np.dtype(rational)) if __name__ == "__main__": run_module_suite()
gpl-3.0
naparuba/opsbro
opsbro/misc/internalcherrypy/cherrypy/lib/cpstats.py
49
22770
"""CPStats, a package for collecting and reporting on program statistics. Overview ======== Statistics about program operation are an invaluable monitoring and debugging tool. Unfortunately, the gathering and reporting of these critical values is usually ad-hoc. This package aims to add a centralized place for gathering statistical performance data, a structure for recording that data which provides for extrapolation of that data into more useful information, and a method of serving that data to both human investigators and monitoring software. Let's examine each of those in more detail. Data Gathering -------------- Just as Python's `logging` module provides a common importable for gathering and sending messages, performance statistics would benefit from a similar common mechanism, and one that does *not* require each package which wishes to collect stats to import a third-party module. Therefore, we choose to re-use the `logging` module by adding a `statistics` object to it. That `logging.statistics` object is a nested dict. It is not a custom class, because that would: 1. require libraries and applications to import a third-party module in order to participate 2. inhibit innovation in extrapolation approaches and in reporting tools, and 3. be slow. There are, however, some specifications regarding the structure of the dict.:: { +----"SQLAlchemy": { | "Inserts": 4389745, | "Inserts per Second": | lambda s: s["Inserts"] / (time() - s["Start"]), | C +---"Table Statistics": { | o | "widgets": {-----------+ N | l | "Rows": 1.3M, | Record a | l | "Inserts": 400, | m | e | },---------------------+ e | c | "froobles": { s | t | "Rows": 7845, p | i | "Inserts": 0, a | o | }, c | n +---}, e | "Slow Queries": | [{"Query": "SELECT * FROM widgets;", | "Processing Time": 47.840923343, | }, | ], +----}, } The `logging.statistics` dict has four levels. The topmost level is nothing more than a set of names to introduce modularity, usually along the lines of package names. If the SQLAlchemy project wanted to participate, for example, it might populate the item `logging.statistics['SQLAlchemy']`, whose value would be a second-layer dict we call a "namespace". Namespaces help multiple packages to avoid collisions over key names, and make reports easier to read, to boot. The maintainers of SQLAlchemy should feel free to use more than one namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case or other syntax constraints on the namespace names; they should be chosen to be maximally readable by humans (neither too short nor too long). Each namespace, then, is a dict of named statistical values, such as 'Requests/sec' or 'Uptime'. You should choose names which will look good on a report: spaces and capitalization are just fine. In addition to scalars, values in a namespace MAY be a (third-layer) dict, or a list, called a "collection". For example, the CherryPy :class:`StatsTool` keeps track of what each request is doing (or has most recently done) in a 'Requests' collection, where each key is a thread ID; each value in the subdict MUST be a fourth dict (whew!) of statistical data about each thread. We call each subdict in the collection a "record". Similarly, the :class:`StatsTool` also keeps a list of slow queries, where each record contains data about each slow query, in order. Values in a namespace or record may also be functions, which brings us to: Extrapolation ------------- The collection of statistical data needs to be fast, as close to unnoticeable as possible to the host program. That requires us to minimize I/O, for example, but in Python it also means we need to minimize function calls. So when you are designing your namespace and record values, try to insert the most basic scalar values you already have on hand. When it comes time to report on the gathered data, however, we usually have much more freedom in what we can calculate. Therefore, whenever reporting tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents of `logging.statistics` for reporting, they first call `extrapolate_statistics` (passing the whole `statistics` dict as the only argument). This makes a deep copy of the statistics dict so that the reporting tool can both iterate over it and even change it without harming the original. But it also expands any functions in the dict by calling them. For example, you might have a 'Current Time' entry in the namespace with the value "lambda scope: time.time()". The "scope" parameter is the current namespace dict (or record, if we're currently expanding one of those instead), allowing you access to existing static entries. If you're truly evil, you can even modify more than one entry at a time. However, don't try to calculate an entry and then use its value in further extrapolations; the order in which the functions are called is not guaranteed. This can lead to a certain amount of duplicated work (or a redesign of your schema), but that's better than complicating the spec. After the whole thing has been extrapolated, it's time for: Reporting --------- The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates it all, and then transforms it to HTML for easy viewing. Each namespace gets its own header and attribute table, plus an extra table for each collection. This is NOT part of the statistics specification; other tools can format how they like. You can control which columns are output and how they are formatted by updating StatsPage.formatting, which is a dict that mirrors the keys and nesting of `logging.statistics`. The difference is that, instead of data values, it has formatting values. Use None for a given key to indicate to the StatsPage that a given column should not be output. Use a string with formatting (such as '%.3f') to interpolate the value(s), or use a callable (such as lambda v: v.isoformat()) for more advanced formatting. Any entry which is not mentioned in the formatting dict is output unchanged. Monitoring ---------- Although the HTML output takes pains to assign unique id's to each <td> with statistical data, you're probably better off fetching /cpstats/data, which outputs the whole (extrapolated) `logging.statistics` dict in JSON format. That is probably easier to parse, and doesn't have any formatting controls, so you get the "original" data in a consistently-serialized format. Note: there's no treatment yet for datetime objects. Try time.time() instead for now if you can. Nagios will probably thank you. Turning Collection Off ---------------------- It is recommended each namespace have an "Enabled" item which, if False, stops collection (but not reporting) of statistical data. Applications SHOULD provide controls to pause and resume collection by setting these entries to False or True, if present. Usage ===== To collect statistics on CherryPy applications:: from cherrypy.lib import cpstats appconfig['/']['tools.cpstats.on'] = True To collect statistics on your own code:: import logging # Initialize the repository if not hasattr(logging, 'statistics'): logging.statistics = {} # Initialize my namespace mystats = logging.statistics.setdefault('My Stuff', {}) # Initialize my namespace's scalars and collections mystats.update({ 'Enabled': True, 'Start Time': time.time(), 'Important Events': 0, 'Events/Second': lambda s: ( (s['Important Events'] / (time.time() - s['Start Time']))), }) ... for event in events: ... # Collect stats if mystats.get('Enabled', False): mystats['Important Events'] += 1 To report statistics:: root.cpstats = cpstats.StatsPage() To format statistics reports:: See 'Reporting', above. """ # ------------------------------- Statistics -------------------------------- # import logging if not hasattr(logging, 'statistics'): logging.statistics = {} def extrapolate_statistics(scope): """Return an extrapolated copy of the given scope.""" c = {} for k, v in list(scope.items()): if isinstance(v, dict): v = extrapolate_statistics(v) elif isinstance(v, (list, tuple)): v = [extrapolate_statistics(record) for record in v] elif hasattr(v, '__call__'): v = v(scope) c[k] = v return c # -------------------- CherryPy Applications Statistics --------------------- # import threading import time import cherrypy appstats = logging.statistics.setdefault('CherryPy Applications', {}) appstats.update({ 'Enabled': True, 'Bytes Read/Request': lambda s: ( s['Total Requests'] and (s['Total Bytes Read'] / float(s['Total Requests'])) or 0.0 ), 'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s), 'Bytes Written/Request': lambda s: ( s['Total Requests'] and (s['Total Bytes Written'] / float(s['Total Requests'])) or 0.0 ), 'Bytes Written/Second': lambda s: ( s['Total Bytes Written'] / s['Uptime'](s) ), 'Current Time': lambda s: time.time(), 'Current Requests': 0, 'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s), 'Server Version': cherrypy.__version__, 'Start Time': time.time(), 'Total Bytes Read': 0, 'Total Bytes Written': 0, 'Total Requests': 0, 'Total Time': 0, 'Uptime': lambda s: time.time() - s['Start Time'], 'Requests': {}, }) proc_time = lambda s: time.time() - s['Start Time'] class ByteCountWrapper(object): """Wraps a file-like object, counting the number of bytes read.""" def __init__(self, rfile): self.rfile = rfile self.bytes_read = 0 def read(self, size=-1): data = self.rfile.read(size) self.bytes_read += len(data) return data def readline(self, size=-1): data = self.rfile.readline(size) self.bytes_read += len(data) return data def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def close(self): self.rfile.close() def __iter__(self): return self def next(self): data = self.rfile.next() self.bytes_read += len(data) return data average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0 class StatsTool(cherrypy.Tool): """Record various information about the current request.""" def __init__(self): cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop) def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ if appstats.get('Enabled', False): cherrypy.Tool._setup(self) self.record_start() def record_start(self): """Record the beginning of a request.""" request = cherrypy.serving.request if not hasattr(request.rfile, 'bytes_read'): request.rfile = ByteCountWrapper(request.rfile) request.body.fp = request.rfile r = request.remote appstats['Current Requests'] += 1 appstats['Total Requests'] += 1 appstats['Requests'][threading._get_ident()] = { 'Bytes Read': None, 'Bytes Written': None, # Use a lambda so the ip gets updated by tools.proxy later 'Client': lambda s: '%s:%s' % (r.ip, r.port), 'End Time': None, 'Processing Time': proc_time, 'Request-Line': request.request_line, 'Response Status': None, 'Start Time': time.time(), } def record_stop( self, uriset=None, slow_queries=1.0, slow_queries_count=100, debug=False, **kwargs): """Record the end of a request.""" resp = cherrypy.serving.response w = appstats['Requests'][threading._get_ident()] r = cherrypy.request.rfile.bytes_read w['Bytes Read'] = r appstats['Total Bytes Read'] += r if resp.stream: w['Bytes Written'] = 'chunked' else: cl = int(resp.headers.get('Content-Length', 0)) w['Bytes Written'] = cl appstats['Total Bytes Written'] += cl w['Response Status'] = getattr( resp, 'output_status', None) or resp.status w['End Time'] = time.time() p = w['End Time'] - w['Start Time'] w['Processing Time'] = p appstats['Total Time'] += p appstats['Current Requests'] -= 1 if debug: cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS') if uriset: rs = appstats.setdefault('URI Set Tracking', {}) r = rs.setdefault(uriset, { 'Min': None, 'Max': None, 'Count': 0, 'Sum': 0, 'Avg': average_uriset_time}) if r['Min'] is None or p < r['Min']: r['Min'] = p if r['Max'] is None or p > r['Max']: r['Max'] = p r['Count'] += 1 r['Sum'] += p if slow_queries and p > slow_queries: sq = appstats.setdefault('Slow Queries', []) sq.append(w.copy()) if len(sq) > slow_queries_count: sq.pop(0) import cherrypy cherrypy.tools.cpstats = StatsTool() # ---------------------- CherryPy Statistics Reporting ---------------------- # import os thisdir = os.path.abspath(os.path.dirname(__file__)) try: import json except ImportError: try: import simplejson as json except ImportError: json = None missing = object() locale_date = lambda v: time.strftime('%c', time.gmtime(v)) iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v)) def pause_resume(ns): def _pause_resume(enabled): pause_disabled = '' resume_disabled = '' if enabled: resume_disabled = 'disabled="disabled" ' else: pause_disabled = 'disabled="disabled" ' return """ <form action="pause" method="POST" style="display:inline"> <input type="hidden" name="namespace" value="%s" /> <input type="submit" value="Pause" %s/> </form> <form action="resume" method="POST" style="display:inline"> <input type="hidden" name="namespace" value="%s" /> <input type="submit" value="Resume" %s/> </form> """ % (ns, pause_disabled, ns, resume_disabled) return _pause_resume class StatsPage(object): formatting = { 'CherryPy Applications': { 'Enabled': pause_resume('CherryPy Applications'), 'Bytes Read/Request': '%.3f', 'Bytes Read/Second': '%.3f', 'Bytes Written/Request': '%.3f', 'Bytes Written/Second': '%.3f', 'Current Time': iso_format, 'Requests/Second': '%.3f', 'Start Time': iso_format, 'Total Time': '%.3f', 'Uptime': '%.3f', 'Slow Queries': { 'End Time': None, 'Processing Time': '%.3f', 'Start Time': iso_format, }, 'URI Set Tracking': { 'Avg': '%.3f', 'Max': '%.3f', 'Min': '%.3f', 'Sum': '%.3f', }, 'Requests': { 'Bytes Read': '%s', 'Bytes Written': '%s', 'End Time': None, 'Processing Time': '%.3f', 'Start Time': None, }, }, 'CherryPy WSGIServer': { 'Enabled': pause_resume('CherryPy WSGIServer'), 'Connections/second': '%.3f', 'Start time': iso_format, }, } def index(self): # Transform the raw data into pretty output for HTML yield """ <html> <head> <title>Statistics</title> <style> th, td { padding: 0.25em 0.5em; border: 1px solid #666699; } table { border-collapse: collapse; } table.stats1 { width: 100%; } table.stats1 th { font-weight: bold; text-align: right; background-color: #CCD5DD; } table.stats2, h2 { margin-left: 50px; } table.stats2 th { font-weight: bold; text-align: center; background-color: #CCD5DD; } </style> </head> <body> """ for title, scalars, collections in self.get_namespaces(): yield """ <h1>%s</h1> <table class='stats1'> <tbody> """ % title for i, (key, value) in enumerate(scalars): colnum = i % 3 if colnum == 0: yield """ <tr>""" yield ( """ <th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" % vars() ) if colnum == 2: yield """ </tr>""" if colnum == 0: yield """ <th></th><td></td> <th></th><td></td> </tr>""" elif colnum == 1: yield """ <th></th><td></td> </tr>""" yield """ </tbody> </table>""" for subtitle, headers, subrows in collections: yield """ <h2>%s</h2> <table class='stats2'> <thead> <tr>""" % subtitle for key in headers: yield """ <th>%s</th>""" % key yield """ </tr> </thead> <tbody>""" for subrow in subrows: yield """ <tr>""" for value in subrow: yield """ <td>%s</td>""" % value yield """ </tr>""" yield """ </tbody> </table>""" yield """ </body> </html> """ index.exposed = True def get_namespaces(self): """Yield (title, scalars, collections) for each namespace.""" s = extrapolate_statistics(logging.statistics) for title, ns in sorted(s.items()): scalars = [] collections = [] ns_fmt = self.formatting.get(title, {}) for k, v in sorted(ns.items()): fmt = ns_fmt.get(k, {}) if isinstance(v, dict): headers, subrows = self.get_dict_collection(v, fmt) collections.append((k, ['ID'] + headers, subrows)) elif isinstance(v, (list, tuple)): headers, subrows = self.get_list_collection(v, fmt) collections.append((k, headers, subrows)) else: format = ns_fmt.get(k, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v = format(v) elif format is not missing: v = format % v scalars.append((k, v)) yield title, scalars, collections def get_dict_collection(self, v, formatting): """Return ([headers], [rows]) for the given collection.""" # E.g., the 'Requests' dict. headers = [] for record in v.itervalues(): for k3 in record: format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if k3 not in headers: headers.append(k3) headers.sort() subrows = [] for k2, record in sorted(v.items()): subrow = [k2] for k3 in headers: v3 = record.get(k3, '') format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v3 = format(v3) elif format is not missing: v3 = format % v3 subrow.append(v3) subrows.append(subrow) return headers, subrows def get_list_collection(self, v, formatting): """Return ([headers], [subrows]) for the given collection.""" # E.g., the 'Slow Queries' list. headers = [] for record in v: for k3 in record: format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if k3 not in headers: headers.append(k3) headers.sort() subrows = [] for record in v: subrow = [] for k3 in headers: v3 = record.get(k3, '') format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v3 = format(v3) elif format is not missing: v3 = format % v3 subrow.append(v3) subrows.append(subrow) return headers, subrows if json is not None: def data(self): s = extrapolate_statistics(logging.statistics) cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps(s, sort_keys=True, indent=4) data.exposed = True def pause(self, namespace): logging.statistics.get(namespace, {})['Enabled'] = False raise cherrypy.HTTPRedirect('./') pause.exposed = True pause.cp_config = {'tools.allow.on': True, 'tools.allow.methods': ['POST']} def resume(self, namespace): logging.statistics.get(namespace, {})['Enabled'] = True raise cherrypy.HTTPRedirect('./') resume.exposed = True resume.cp_config = {'tools.allow.on': True, 'tools.allow.methods': ['POST']}
mit
darkryder/django
django/template/context_processors.py
55
2497
""" A set of request processors that return dictionaries to be merged into a template context. Each function takes the request object as its only parameter and returns a dictionary to add to the context. These are referenced from the 'context_processors' option of the configuration of a DjangoTemplates backend and used by RequestContext. """ from __future__ import unicode_literals import itertools from django.conf import settings from django.middleware.csrf import get_token from django.utils.encoding import smart_text from django.utils.functional import SimpleLazyObject, lazy def csrf(request): """ Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if it has not been provided by either a view decorator or the middleware """ def _get_val(): token = get_token(request) if token is None: # In order to be able to provide debugging info in the # case of misconfiguration, we use a sentinel value # instead of returning an empty dict. return 'NOTPROVIDED' else: return smart_text(token) return {'csrf_token': SimpleLazyObject(_get_val)} def debug(request): """ Returns context variables helpful for debugging. """ context_extras = {} if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS: context_extras['debug'] = True from django.db import connections # Return a lazy reference that computes connection.queries on access, # to ensure it contains queries triggered after this function runs. context_extras['sql_queries'] = lazy( lambda: list(itertools.chain(*[connections[x].queries for x in connections])), list ) return context_extras def i18n(request): from django.utils import translation return { 'LANGUAGES': settings.LANGUAGES, 'LANGUAGE_CODE': translation.get_language(), 'LANGUAGE_BIDI': translation.get_language_bidi(), } def tz(request): from django.utils import timezone return {'TIME_ZONE': timezone.get_current_timezone_name()} def static(request): """ Adds static-related context variables to the context. """ return {'STATIC_URL': settings.STATIC_URL} def media(request): """ Adds media-related context variables to the context. """ return {'MEDIA_URL': settings.MEDIA_URL} def request(request): return {'request': request}
bsd-3-clause
ewindisch/nova
nova/api/ec2/ec2utils.py
13
13554
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import re from nova import availability_zones from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova.objects import instance as instance_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils from nova.openstack.common import uuidutils LOG = logging.getLogger(__name__) # NOTE(vish): cache mapping for one week _CACHE_TIME = 7 * 24 * 60 * 60 _CACHE = None def memoize(func): @functools.wraps(func) def memoizer(context, reqid): global _CACHE if not _CACHE: _CACHE = memorycache.get_client() key = "%s:%s" % (func.__name__, reqid) key = str(key) value = _CACHE.get(key) if value is None: value = func(context, reqid) _CACHE.set(key, value, time=_CACHE_TIME) return value return memoizer def reset_cache(): global _CACHE _CACHE = None def image_type(image_type): """Converts to a three letter image type. aki, kernel => aki ari, ramdisk => ari anything else => ami """ if image_type == 'kernel': return 'aki' if image_type == 'ramdisk': return 'ari' if image_type not in ['aki', 'ari']: return 'ami' return image_type def resource_type_from_id(context, resource_id): """Get resource type by ID Returns a string representation of the Amazon resource type, if known. Returns None on failure. :param context: context under which the method is called :param resource_id: resource_id to evaluate """ known_types = { 'i': 'instance', 'r': 'reservation', 'vol': 'volume', 'snap': 'snapshot', 'ami': 'image', 'aki': 'image', 'ari': 'image' } type_marker = resource_id.split('-')[0] return known_types.get(type_marker) @memoize def id_to_glance_id(context, image_id): """Convert an internal (db) id to a glance id.""" return db.s3_image_get(context, image_id)['uuid'] @memoize def glance_id_to_id(context, glance_id): """Convert a glance id to an internal (db) id.""" if glance_id is None: return try: return db.s3_image_get_by_uuid(context, glance_id)['id'] except exception.NotFound: return db.s3_image_create(context, glance_id)['id'] def ec2_id_to_glance_id(context, ec2_id): image_id = ec2_id_to_id(ec2_id) return id_to_glance_id(context, image_id) def glance_id_to_ec2_id(context, glance_id, image_type='ami'): image_id = glance_id_to_id(context, glance_id) return image_ec2_id(image_id, image_type=image_type) def ec2_id_to_id(ec2_id): """Convert an ec2 ID (i-[base 16 number]) to an instance id (int).""" try: return int(ec2_id.split('-')[-1], 16) except ValueError: raise exception.InvalidEc2Id(ec2_id=ec2_id) def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' return id_to_ec2_id(image_id, template=template) def get_ip_info_for_instance_from_nw_info(nw_info): if not isinstance(nw_info, network_model.NetworkInfo): nw_info = network_model.NetworkInfo.hydrate(nw_info) ip_info = {} fixed_ips = nw_info.fixed_ips() ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips if ip['version'] == 4] ip_info['fixed_ip6s'] = [ip['address'] for ip in fixed_ips if ip['version'] == 6] ip_info['floating_ips'] = [ip['address'] for ip in nw_info.floating_ips()] return ip_info def get_ip_info_for_instance(context, instance): """Return a dictionary of IP information for an instance.""" if isinstance(instance, instance_obj.Instance): nw_info = instance.info_cache.network_info else: # FIXME(comstud): Temporary as we transition to objects. info_cache = instance['info_cache'] or {} nw_info = info_cache.get('network_info') # Make sure empty response is turned into the model if not nw_info: nw_info = [] return get_ip_info_for_instance_from_nw_info(nw_info) def get_availability_zone_by_host(host, conductor_api=None): return availability_zones.get_host_availability_zone( context.get_admin_context(), host, conductor_api) def id_to_ec2_id(instance_id, template='i-%08x'): """Convert an instance ID (int) to an ec2 ID (i-[base 16 number]).""" return template % int(instance_id) def id_to_ec2_inst_id(instance_id): """Get or create an ec2 instance ID (i-[base 16 number]) from uuid.""" if instance_id is None: return None elif uuidutils.is_uuid_like(instance_id): ctxt = context.get_admin_context() int_id = get_int_id_from_instance_uuid(ctxt, instance_id) return id_to_ec2_id(int_id) else: return id_to_ec2_id(instance_id) def ec2_inst_id_to_uuid(context, ec2_id): """"Convert an instance id to uuid.""" int_id = ec2_id_to_id(ec2_id) return get_instance_uuid_from_int_id(context, int_id) @memoize def get_instance_uuid_from_int_id(context, int_id): return db.get_instance_uuid_by_ec2_id(context, int_id) def id_to_ec2_snap_id(snapshot_id): """Get or create an ec2 volume ID (vol-[base 16 number]) from uuid.""" if uuidutils.is_uuid_like(snapshot_id): ctxt = context.get_admin_context() int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id) return id_to_ec2_id(int_id, 'snap-%08x') else: return id_to_ec2_id(snapshot_id, 'snap-%08x') def id_to_ec2_vol_id(volume_id): """Get or create an ec2 volume ID (vol-[base 16 number]) from uuid.""" if uuidutils.is_uuid_like(volume_id): ctxt = context.get_admin_context() int_id = get_int_id_from_volume_uuid(ctxt, volume_id) return id_to_ec2_id(int_id, 'vol-%08x') else: return id_to_ec2_id(volume_id, 'vol-%08x') def ec2_vol_id_to_uuid(ec2_id): """Get the corresponding UUID for the given ec2-id.""" ctxt = context.get_admin_context() # NOTE(jgriffith) first strip prefix to get just the numeric int_id = ec2_id_to_id(ec2_id) return get_volume_uuid_from_int_id(ctxt, int_id) _ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$') def is_ec2_timestamp_expired(request, expires=None): """Checks the timestamp or expiry time included in an EC2 request and returns true if the request is expired """ query_time = None timestamp = request.get('Timestamp') expiry_time = request.get('Expires') def parse_strtime(strtime): if _ms_time_regex.match(strtime): # NOTE(MotoKen): time format for aws-sdk-java contains millisecond time_format = "%Y-%m-%dT%H:%M:%S.%fZ" else: time_format = "%Y-%m-%dT%H:%M:%SZ" return timeutils.parse_strtime(strtime, time_format) try: if timestamp and expiry_time: msg = _("Request must include either Timestamp or Expires," " but cannot contain both") LOG.error(msg) raise exception.InvalidRequest(msg) elif expiry_time: query_time = parse_strtime(expiry_time) return timeutils.is_older_than(query_time, -1) elif timestamp: query_time = parse_strtime(timestamp) # Check if the difference between the timestamp in the request # and the time on our servers is larger than 5 minutes, the # request is too old (or too new). if query_time and expires: return timeutils.is_older_than(query_time, expires) or \ timeutils.is_newer_than(query_time, expires) return False except ValueError: LOG.audit(_("Timestamp is invalid.")) return True @memoize def get_int_id_from_instance_uuid(context, instance_uuid): if instance_uuid is None: return try: return db.get_ec2_instance_id_by_uuid(context, instance_uuid) except exception.NotFound: return db.ec2_instance_create(context, instance_uuid)['id'] @memoize def get_int_id_from_volume_uuid(context, volume_uuid): if volume_uuid is None: return try: return db.get_ec2_volume_id_by_uuid(context, volume_uuid) except exception.NotFound: return db.ec2_volume_create(context, volume_uuid)['id'] @memoize def get_volume_uuid_from_int_id(context, int_id): return db.get_volume_uuid_by_ec2_id(context, int_id) def ec2_snap_id_to_uuid(ec2_id): """Get the corresponding UUID for the given ec2-id.""" ctxt = context.get_admin_context() # NOTE(jgriffith) first strip prefix to get just the numeric int_id = ec2_id_to_id(ec2_id) return get_snapshot_uuid_from_int_id(ctxt, int_id) @memoize def get_int_id_from_snapshot_uuid(context, snapshot_uuid): if snapshot_uuid is None: return try: return db.get_ec2_snapshot_id_by_uuid(context, snapshot_uuid) except exception.NotFound: return db.ec2_snapshot_create(context, snapshot_uuid)['id'] @memoize def get_snapshot_uuid_from_int_id(context, int_id): return db.get_snapshot_uuid_by_ec2_id(context, int_id) _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') def camelcase_to_underscore(str): return _c2u.sub(r'_\1', str).lower().strip('_') def _try_convert(value): """Return a non-string from a string or unicode, if possible. ============= ===================================================== When value is returns ============= ===================================================== zero-length '' 'None' None 'True' True case insensitive 'False' False case insensitive '0', '-0' 0 0xN, -0xN int from hex (positive) (N is any number) 0bN, -0bN int from binary (positive) (N is any number) * try conversion to int, float, complex, fallback value """ def _negative_zero(value): epsilon = 1e-7 return 0 if abs(value) < epsilon else value if len(value) == 0: return '' if value == 'None': return None lowered_value = value.lower() if lowered_value == 'true': return True if lowered_value == 'false': return False for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]: try: if lowered_value.startswith((prefix, "-" + prefix)): return int(lowered_value, base) except ValueError: pass try: return _negative_zero(float(value)) except ValueError: return value def dict_from_dotted_str(items): """parse multi dot-separated argument into dict. EBS boot uses multi dot-separated arguments like BlockDeviceMapping.1.DeviceName=snap-id Convert the above into {'block_device_mapping': {'1': {'device_name': snap-id}}} """ args = {} for key, value in items: parts = key.split(".") key = str(camelcase_to_underscore(parts[0])) if isinstance(value, str) or isinstance(value, unicode): # NOTE(vish): Automatically convert strings back # into their respective values value = _try_convert(value) if len(parts) > 1: d = args.get(key, {}) args[key] = d for k in parts[1:-1]: k = camelcase_to_underscore(k) v = d.get(k, {}) d[k] = v d = v d[camelcase_to_underscore(parts[-1])] = value else: args[key] = value return args def search_opts_from_filters(filters): return dict((f['name'].replace('-', '_'), f['value']['1']) for f in filters if f['value']['1']) if filters else {} def regex_from_ec2_regex(ec2_re): """Converts an EC2-style regex to a python regex. Approach is based on python fnmatch. """ iter_ec2_re = iter(ec2_re) py_re = '' for char in iter_ec2_re: if char == '*': py_re += '.*' elif char == '?': py_re += '.' elif char == '\\': try: next_char = iter_ec2_re.next() except StopIteration: next_char = '' if next_char == '*' or next_char == '?': py_re += '[%s]' % next_char else: py_re += '\\\\' + next_char else: py_re += re.escape(char) return '\A%s\Z(?s)' % py_re
apache-2.0
Kraymer/keroaek
keroaek/vlc.py
1
292112
#! /usr/bin/python # Python ctypes bindings for VLC # # Copyright (C) 2009-2012 the VideoLAN team # $Id: $ # # Authors: Olivier Aubert <contact at olivieraubert.net> # Jean Brouwers <MrJean1 at gmail.com> # Geoff Salmon <geoff.salmon at gmail.com> # # This library is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA """This module provides bindings for the LibVLC public API, see U{http://wiki.videolan.org/LibVLC}. You can find the documentation and a README file with some examples at U{http://www.advene.org/download/python-ctypes/}. Basically, the most important class is L{Instance}, which is used to create a libvlc instance. From this instance, you then create L{MediaPlayer} and L{MediaListPlayer} instances. Alternatively, you may create instances of the L{MediaPlayer} and L{MediaListPlayer} class directly and an instance of L{Instance} will be implicitly created. The latter can be obtained using the C{get_instance} method of L{MediaPlayer} and L{MediaListPlayer}. """ import ctypes from ctypes.util import find_library import os import sys import functools # Used by EventManager in override.py from inspect import getargspec __version__ = "N/A" build_date = "Wed Apr 1 21:28:00 2015" if sys.version_info[0] > 2: str = str unicode = str bytes = bytes basestring = (str, bytes) PYTHON3 = True def str_to_bytes(s): """Translate string or bytes to bytes. """ if isinstance(s, str): return bytes(s, sys.getfilesystemencoding()) else: return s def bytes_to_str(b): """Translate bytes to string. """ if isinstance(b, bytes): return b.decode(sys.getfilesystemencoding()) else: return b else: str = str unicode = unicode bytes = str basestring = basestring PYTHON3 = False def str_to_bytes(s): """Translate string or bytes to bytes. """ if isinstance(s, unicode): return s.encode(sys.getfilesystemencoding()) else: return s def bytes_to_str(b): """Translate bytes to unicode string. """ if isinstance(b, str): return unicode(b, sys.getfilesystemencoding()) else: return b # Internal guard to prevent internal classes to be directly # instanciated. _internal_guard = object() def find_lib(): dll = None plugin_path = None if sys.platform.startswith('linux'): p = find_library('vlc') try: dll = ctypes.CDLL(p) except OSError: # may fail dll = ctypes.CDLL('libvlc.so.5') elif sys.platform.startswith('win'): p = find_library('libvlc.dll') if p is None: try: # some registry settings # leaner than win32api, win32con if PYTHON3: import winreg as w else: import _winreg as w for r in w.HKEY_LOCAL_MACHINE, w.HKEY_CURRENT_USER: try: r = w.OpenKey(r, 'Software\\VideoLAN\\VLC') plugin_path, _ = w.QueryValueEx(r, 'InstallDir') w.CloseKey(r) break except w.error: pass except ImportError: # no PyWin32 pass if plugin_path is None: # try some standard locations. for p in ('Program Files\\VideoLan\\', 'VideoLan\\', 'Program Files\\', ''): p = 'C:\\' + p + 'VLC\\libvlc.dll' if os.path.exists(p): plugin_path = os.path.dirname(p) break if plugin_path is not None: # try loading p = os.getcwd() os.chdir(plugin_path) # if chdir failed, this will raise an exception dll = ctypes.CDLL('libvlc.dll') # restore cwd after dll has been loaded os.chdir(p) else: # may fail dll = ctypes.CDLL('libvlc.dll') else: plugin_path = os.path.dirname(p) dll = ctypes.CDLL(p) elif sys.platform.startswith('darwin'): # FIXME: should find a means to configure path d = '/Applications/VLC.app/Contents/MacOS/' p = d + 'lib/libvlc.dylib' if os.path.exists(p): dll = ctypes.CDLL(p) d += 'modules' if os.path.isdir(d): plugin_path = d else: # hope, some PATH is set... dll = ctypes.CDLL('libvlc.dylib') else: raise NotImplementedError('%s: %s not supported' % (sys.argv[0], sys.platform)) return (dll, plugin_path) # plugin_path used on win32 and MacOS in override.py dll, plugin_path = find_lib() class VLCException(Exception): """Exception raised by libvlc methods. """ pass try: _Ints = (int, long) except NameError: # no long in Python 3+ _Ints = int _Seqs = (list, tuple) # Used for handling *event_manager() methods. class memoize_parameterless(object): """Decorator. Caches a parameterless method's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary """ def __init__(self, func): self.func = func self._cache = {} def __call__(self, obj): try: return self._cache[obj] except KeyError: v = self._cache[obj] = self.func(obj) return v def __repr__(self): """Return the function's docstring. """ return self.func.__doc__ def __get__(self, obj, objtype): """Support instance methods. """ return functools.partial(self.__call__, obj) # Default instance. It is used to instanciate classes directly in the # OO-wrapper. _default_instance = None def get_default_instance(): """Return the default VLC.Instance. """ global _default_instance if _default_instance is None: _default_instance = Instance() return _default_instance _Cfunctions = {} # from LibVLC __version__ _Globals = globals() # sys.modules[__name__].__dict__ def _Cfunction(name, flags, errcheck, *types): """(INTERNAL) New ctypes function binding. """ if hasattr(dll, name) and name in _Globals: p = ctypes.CFUNCTYPE(*types) f = p((name, dll), flags) if errcheck is not None: f.errcheck = errcheck # replace the Python function # in this module, but only when # running as python -O or -OO if __debug__: _Cfunctions[name] = f else: _Globals[name] = f return f raise NameError('no function %r' % (name,)) def _Cobject(cls, ctype): """(INTERNAL) New instance from ctypes. """ o = object.__new__(cls) o._as_parameter_ = ctype return o def _Constructor(cls, ptr=_internal_guard): """(INTERNAL) New wrapper from ctypes. """ if ptr == _internal_guard: raise VLCException("(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API.") if ptr is None or ptr == 0: return None return _Cobject(cls, ctypes.c_void_p(ptr)) class _Cstruct(ctypes.Structure): """(INTERNAL) Base class for ctypes structures. """ _fields_ = [] # list of 2-tuples ('name', ctyptes.<type>) def __str__(self): l = [' %s:\t%s' % (n, getattr(self, n)) for n, _ in self._fields_] return '\n'.join([self.__class__.__name__] + l) def __repr__(self): return '%s.%s' % (self.__class__.__module__, self) class _Ctype(object): """(INTERNAL) Base class for ctypes. """ @staticmethod def from_param(this): # not self """(INTERNAL) ctypes parameter conversion method. """ if this is None: return None return this._as_parameter_ class ListPOINTER(object): """Just like a POINTER but accept a list of ctype as an argument. """ def __init__(self, etype): self.etype = etype def from_param(self, param): if isinstance(param, _Seqs): return (self.etype * len(param))(*param) # errcheck functions for some native functions. def string_result(result, func, arguments): """Errcheck function. Returns a string and frees the original pointer. It assumes the result is a char *. """ if result: # make a python string copy s = bytes_to_str(ctypes.string_at(result)) # free original string ptr libvlc_free(result) return s return None def class_result(classname): """Errcheck function. Returns a function that creates the specified class. """ def wrap_errcheck(result, func, arguments): if result is None: return None return classname(result) return wrap_errcheck # Wrapper for the opaque struct libvlc_log_t class Log(ctypes.Structure): pass Log_ptr = ctypes.POINTER(Log) # FILE* ctypes wrapper, copied from # http://svn.python.org/projects/ctypes/trunk/ctypeslib/ctypeslib/contrib/pythonhdr.py class FILE(ctypes.Structure): pass FILE_ptr = ctypes.POINTER(FILE) if PYTHON3: PyFile_FromFd = ctypes.pythonapi.PyFile_FromFd PyFile_FromFd.restype = ctypes.py_object PyFile_FromFd.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int ] PyFile_AsFd = ctypes.pythonapi.PyObject_AsFileDescriptor PyFile_AsFd.restype = ctypes.c_int PyFile_AsFd.argtypes = [ctypes.py_object] else: PyFile_FromFile = ctypes.pythonapi.PyFile_FromFile PyFile_FromFile.restype = ctypes.py_object PyFile_FromFile.argtypes = [FILE_ptr, ctypes.c_char_p, ctypes.c_char_p, ctypes.CFUNCTYPE(ctypes.c_int, FILE_ptr)] PyFile_AsFile = ctypes.pythonapi.PyFile_AsFile PyFile_AsFile.restype = FILE_ptr PyFile_AsFile.argtypes = [ctypes.py_object] # Generated enum types # class _Enum(ctypes.c_uint): '''(INTERNAL) Base class ''' _enum_names_ = {} def __str__(self): n = self._enum_names_.get(self.value, '') or ('FIXME_(%r)' % (self.value,)) return '.'.join((self.__class__.__name__, n)) def __hash__(self): return self.value def __repr__(self): return '.'.join((self.__class__.__module__, self.__str__())) def __eq__(self, other): return ( (isinstance(other, _Enum) and self.value == other.value) or (isinstance(other, _Ints) and self.value == other) ) def __ne__(self, other): return not self.__eq__(other) class LogLevel(_Enum): '''Logging messages level. \note future libvlc versions may define new levels. ''' _enum_names_ = { 0: 'DEBUG', 2: 'NOTICE', 3: 'WARNING', 4: 'ERROR', } LogLevel.DEBUG = LogLevel(0) LogLevel.ERROR = LogLevel(4) LogLevel.NOTICE = LogLevel(2) LogLevel.WARNING = LogLevel(3) class EventType(_Enum): '''Event types. ''' _enum_names_ = { 0: 'MediaMetaChanged', 1: 'MediaSubItemAdded', 2: 'MediaDurationChanged', 3: 'MediaParsedChanged', 4: 'MediaFreed', 5: 'MediaStateChanged', 6: 'MediaSubItemTreeAdded', 0x100: 'MediaPlayerMediaChanged', 257: 'MediaPlayerNothingSpecial', 258: 'MediaPlayerOpening', 259: 'MediaPlayerBuffering', 260: 'MediaPlayerPlaying', 261: 'MediaPlayerPaused', 262: 'MediaPlayerStopped', 263: 'MediaPlayerForward', 264: 'MediaPlayerBackward', 265: 'MediaPlayerEndReached', 266: 'MediaPlayerEncounteredError', 267: 'MediaPlayerTimeChanged', 268: 'MediaPlayerPositionChanged', 269: 'MediaPlayerSeekableChanged', 270: 'MediaPlayerPausableChanged', 271: 'MediaPlayerTitleChanged', 272: 'MediaPlayerSnapshotTaken', 273: 'MediaPlayerLengthChanged', 274: 'MediaPlayerVout', 275: 'MediaPlayerScrambledChanged', 276: 'MediaPlayerESAdded', 277: 'MediaPlayerESDeleted', 278: 'MediaPlayerESSelected', 0x200: 'MediaListItemAdded', 513: 'MediaListWillAddItem', 514: 'MediaListItemDeleted', 515: 'MediaListWillDeleteItem', 516: 'MediaListEndReached', 0x300: 'MediaListViewItemAdded', 769: 'MediaListViewWillAddItem', 770: 'MediaListViewItemDeleted', 771: 'MediaListViewWillDeleteItem', 0x400: 'MediaListPlayerPlayed', 1025: 'MediaListPlayerNextItemSet', 1026: 'MediaListPlayerStopped', 0x500: 'MediaDiscovererStarted', 1281: 'MediaDiscovererEnded', 0x600: 'VlmMediaAdded', 1537: 'VlmMediaRemoved', 1538: 'VlmMediaChanged', 1539: 'VlmMediaInstanceStarted', 1540: 'VlmMediaInstanceStopped', 1541: 'VlmMediaInstanceStatusInit', 1542: 'VlmMediaInstanceStatusOpening', 1543: 'VlmMediaInstanceStatusPlaying', 1544: 'VlmMediaInstanceStatusPause', 1545: 'VlmMediaInstanceStatusEnd', 1546: 'VlmMediaInstanceStatusError', } EventType.MediaDiscovererEnded = EventType(1281) EventType.MediaDiscovererStarted = EventType(0x500) EventType.MediaDurationChanged = EventType(2) EventType.MediaFreed = EventType(4) EventType.MediaListEndReached = EventType(516) EventType.MediaListItemAdded = EventType(0x200) EventType.MediaListItemDeleted = EventType(514) EventType.MediaListPlayerNextItemSet = EventType(1025) EventType.MediaListPlayerPlayed = EventType(0x400) EventType.MediaListPlayerStopped = EventType(1026) EventType.MediaListViewItemAdded = EventType(0x300) EventType.MediaListViewItemDeleted = EventType(770) EventType.MediaListViewWillAddItem = EventType(769) EventType.MediaListViewWillDeleteItem = EventType(771) EventType.MediaListWillAddItem = EventType(513) EventType.MediaListWillDeleteItem = EventType(515) EventType.MediaMetaChanged = EventType(0) EventType.MediaParsedChanged = EventType(3) EventType.MediaPlayerBackward = EventType(264) EventType.MediaPlayerBuffering = EventType(259) EventType.MediaPlayerESAdded = EventType(276) EventType.MediaPlayerESDeleted = EventType(277) EventType.MediaPlayerESSelected = EventType(278) EventType.MediaPlayerEncounteredError = EventType(266) EventType.MediaPlayerEndReached = EventType(265) EventType.MediaPlayerForward = EventType(263) EventType.MediaPlayerLengthChanged = EventType(273) EventType.MediaPlayerMediaChanged = EventType(0x100) EventType.MediaPlayerNothingSpecial = EventType(257) EventType.MediaPlayerOpening = EventType(258) EventType.MediaPlayerPausableChanged = EventType(270) EventType.MediaPlayerPaused = EventType(261) EventType.MediaPlayerPlaying = EventType(260) EventType.MediaPlayerPositionChanged = EventType(268) EventType.MediaPlayerScrambledChanged = EventType(275) EventType.MediaPlayerSeekableChanged = EventType(269) EventType.MediaPlayerSnapshotTaken = EventType(272) EventType.MediaPlayerStopped = EventType(262) EventType.MediaPlayerTimeChanged = EventType(267) EventType.MediaPlayerTitleChanged = EventType(271) EventType.MediaPlayerVout = EventType(274) EventType.MediaStateChanged = EventType(5) EventType.MediaSubItemAdded = EventType(1) EventType.MediaSubItemTreeAdded = EventType(6) EventType.VlmMediaAdded = EventType(0x600) EventType.VlmMediaChanged = EventType(1538) EventType.VlmMediaInstanceStarted = EventType(1539) EventType.VlmMediaInstanceStatusEnd = EventType(1545) EventType.VlmMediaInstanceStatusError = EventType(1546) EventType.VlmMediaInstanceStatusInit = EventType(1541) EventType.VlmMediaInstanceStatusOpening = EventType(1542) EventType.VlmMediaInstanceStatusPause = EventType(1544) EventType.VlmMediaInstanceStatusPlaying = EventType(1543) EventType.VlmMediaInstanceStopped = EventType(1540) EventType.VlmMediaRemoved = EventType(1537) class Meta(_Enum): '''Meta data types. ''' _enum_names_ = { 0: 'Title', 1: 'Artist', 2: 'Genre', 3: 'Copyright', 4: 'Album', 5: 'TrackNumber', 6: 'Description', 7: 'Rating', 8: 'Date', 9: 'Setting', 10: 'URL', 11: 'Language', 12: 'NowPlaying', 13: 'Publisher', 14: 'EncodedBy', 15: 'ArtworkURL', 16: 'TrackID', 17: 'TrackTotal', 18: 'Director', 19: 'Season', 20: 'Episode', 21: 'ShowName', 22: 'Actors', 23: 'AlbumArtist', 24: 'DiscNumber', } Meta.Actors = Meta(22) Meta.Album = Meta(4) Meta.AlbumArtist = Meta(23) Meta.Artist = Meta(1) Meta.ArtworkURL = Meta(15) Meta.Copyright = Meta(3) Meta.Date = Meta(8) Meta.Description = Meta(6) Meta.Director = Meta(18) Meta.DiscNumber = Meta(24) Meta.EncodedBy = Meta(14) Meta.Episode = Meta(20) Meta.Genre = Meta(2) Meta.Language = Meta(11) Meta.NowPlaying = Meta(12) Meta.Publisher = Meta(13) Meta.Rating = Meta(7) Meta.Season = Meta(19) Meta.Setting = Meta(9) Meta.ShowName = Meta(21) Meta.Title = Meta(0) Meta.TrackID = Meta(16) Meta.TrackNumber = Meta(5) Meta.TrackTotal = Meta(17) Meta.URL = Meta(10) class State(_Enum): '''Note the order of libvlc_state_t enum must match exactly the order of See mediacontrol_playerstatus, See input_state_e enums, and videolan.libvlc.state (at bindings/cil/src/media.cs). expected states by web plugins are: idle/close=0, opening=1, buffering=2, playing=3, paused=4, stopping=5, ended=6, error=7. ''' _enum_names_ = { 0: 'NothingSpecial', 1: 'Opening', 2: 'Buffering', 3: 'Playing', 4: 'Paused', 5: 'Stopped', 6: 'Ended', 7: 'Error', } State.Buffering = State(2) State.Ended = State(6) State.Error = State(7) State.NothingSpecial = State(0) State.Opening = State(1) State.Paused = State(4) State.Playing = State(3) State.Stopped = State(5) class TrackType(_Enum): '''N/A ''' _enum_names_ = { -1: 'unknown', 0: 'audio', 1: 'video', 2: 'text', } TrackType.audio = TrackType(0) TrackType.text = TrackType(2) TrackType.unknown = TrackType(-1) TrackType.video = TrackType(1) class MediaType(_Enum): '''Media type See libvlc_media_get_type. ''' _enum_names_ = { 0: 'unknown', 1: 'file', 2: 'directory', 3: 'disc', 4: 'stream', 5: 'playlist', } MediaType.directory = MediaType(2) MediaType.disc = MediaType(3) MediaType.file = MediaType(1) MediaType.playlist = MediaType(5) MediaType.stream = MediaType(4) MediaType.unknown = MediaType(0) class MediaParseFlag(_Enum): '''Parse flags used by libvlc_media_parse_with_options() See libvlc_media_parse_with_options. ''' _enum_names_ = { 0x00: 'local', 0x01: 'network', 0x02: 'local', 0x04: 'network', } MediaParseFlag.local = MediaParseFlag(0x00) MediaParseFlag.local = MediaParseFlag(0x02) MediaParseFlag.network = MediaParseFlag(0x01) MediaParseFlag.network = MediaParseFlag(0x04) class PlaybackMode(_Enum): '''Defines playback modes for playlist. ''' _enum_names_ = { 0: 'default', 1: 'loop', 2: 'repeat', } PlaybackMode.default = PlaybackMode(0) PlaybackMode.loop = PlaybackMode(1) PlaybackMode.repeat = PlaybackMode(2) class VideoMarqueeOption(_Enum): '''Marq options definition. ''' _enum_names_ = { 0: 'Enable', 1: 'Text', 2: 'Color', 3: 'Opacity', 4: 'Position', 5: 'Refresh', 6: 'Size', 7: 'Timeout', 8: 'marquee_X', 9: 'marquee_Y', } VideoMarqueeOption.Color = VideoMarqueeOption(2) VideoMarqueeOption.Enable = VideoMarqueeOption(0) VideoMarqueeOption.Opacity = VideoMarqueeOption(3) VideoMarqueeOption.Position = VideoMarqueeOption(4) VideoMarqueeOption.Refresh = VideoMarqueeOption(5) VideoMarqueeOption.Size = VideoMarqueeOption(6) VideoMarqueeOption.Text = VideoMarqueeOption(1) VideoMarqueeOption.Timeout = VideoMarqueeOption(7) VideoMarqueeOption.marquee_X = VideoMarqueeOption(8) VideoMarqueeOption.marquee_Y = VideoMarqueeOption(9) class NavigateMode(_Enum): '''Navigation mode. ''' _enum_names_ = { 0: 'activate', 1: 'up', 2: 'down', 3: 'left', 4: 'right', } NavigateMode.activate = NavigateMode(0) NavigateMode.down = NavigateMode(2) NavigateMode.left = NavigateMode(3) NavigateMode.right = NavigateMode(4) NavigateMode.up = NavigateMode(1) class Position(_Enum): '''Enumeration of values used to set position (e.g. of video title). ''' _enum_names_ = { -1: 'disable', 0: 'center', 1: 'left', 2: 'right', 3: 'top', 4: 'left', 5: 'right', 6: 'bottom', 7: 'left', 8: 'right', } Position.bottom = Position(6) Position.center = Position(0) Position.disable = Position(-1) Position.left = Position(1) Position.left = Position(4) Position.left = Position(7) Position.right = Position(2) Position.right = Position(5) Position.right = Position(8) Position.top = Position(3) class VideoLogoOption(_Enum): '''Option values for libvlc_video_{get,set}_logo_{int,string}. ''' _enum_names_ = { 0: 'enable', 1: 'file', 2: 'logo_x', 3: 'logo_y', 4: 'delay', 5: 'repeat', 6: 'opacity', 7: 'position', } VideoLogoOption.delay = VideoLogoOption(4) VideoLogoOption.enable = VideoLogoOption(0) VideoLogoOption.file = VideoLogoOption(1) VideoLogoOption.logo_x = VideoLogoOption(2) VideoLogoOption.logo_y = VideoLogoOption(3) VideoLogoOption.opacity = VideoLogoOption(6) VideoLogoOption.position = VideoLogoOption(7) VideoLogoOption.repeat = VideoLogoOption(5) class VideoAdjustOption(_Enum): '''Option values for libvlc_video_{get,set}_adjust_{int,float,bool}. ''' _enum_names_ = { 0: 'Enable', 1: 'Contrast', 2: 'Brightness', 3: 'Hue', 4: 'Saturation', 5: 'Gamma', } VideoAdjustOption.Brightness = VideoAdjustOption(2) VideoAdjustOption.Contrast = VideoAdjustOption(1) VideoAdjustOption.Enable = VideoAdjustOption(0) VideoAdjustOption.Gamma = VideoAdjustOption(5) VideoAdjustOption.Hue = VideoAdjustOption(3) VideoAdjustOption.Saturation = VideoAdjustOption(4) class AudioOutputDeviceTypes(_Enum): '''Audio device types. ''' _enum_names_ = { -1: 'Error', 1: 'Mono', 2: 'Stereo', 4: '_2F2R', 5: '_3F2R', 6: '_5_1', 7: '_6_1', 8: '_7_1', 10: 'SPDIF', } AudioOutputDeviceTypes.Error = AudioOutputDeviceTypes(-1) AudioOutputDeviceTypes.Mono = AudioOutputDeviceTypes(1) AudioOutputDeviceTypes.SPDIF = AudioOutputDeviceTypes(10) AudioOutputDeviceTypes.Stereo = AudioOutputDeviceTypes(2) AudioOutputDeviceTypes._2F2R = AudioOutputDeviceTypes(4) AudioOutputDeviceTypes._3F2R = AudioOutputDeviceTypes(5) AudioOutputDeviceTypes._5_1 = AudioOutputDeviceTypes(6) AudioOutputDeviceTypes._6_1 = AudioOutputDeviceTypes(7) AudioOutputDeviceTypes._7_1 = AudioOutputDeviceTypes(8) class AudioOutputChannel(_Enum): '''Audio channels. ''' _enum_names_ = { -1: 'Error', 1: 'Stereo', 2: 'RStereo', 3: 'Left', 4: 'Right', 5: 'Dolbys', } AudioOutputChannel.Dolbys = AudioOutputChannel(5) AudioOutputChannel.Error = AudioOutputChannel(-1) AudioOutputChannel.Left = AudioOutputChannel(3) AudioOutputChannel.RStereo = AudioOutputChannel(2) AudioOutputChannel.Right = AudioOutputChannel(4) AudioOutputChannel.Stereo = AudioOutputChannel(1) class Callback(ctypes.c_void_p): """Callback function notification \param p_event the event triggering the callback """ pass class LogCb(ctypes.c_void_p): """Callback prototype for LibVLC log message handler. \param data data pointer as given to L{libvlc_log_set}() \param level message level (@ref enum libvlc_log_level) \param ctx message context (meta-information about the message) \param fmt printf() format string (as defined by ISO C11) \param args variable argument list for the format \note Log message handlers <b>must</b> be thread-safe. \warning The message context pointer, the format string parameters and the variable arguments are only valid until the callback returns. """ pass class MediaOpenCb(ctypes.c_void_p): """Callback prototype to open a custom bitstream input media. The same media item can be opened multiple times. Each time, this callback is invoked. It should allocate and initialize any instance-specific resources, then store them in *datap. The instance resources can be freed in the @ref libvlc_close_cb callback. \param opaque private pointer as passed to L{libvlc_media_new_callbacks}() \param datap storage space for a private data pointer [OUT] \param sizep byte length of the bitstream or 0 if unknown [OUT] \note For convenience, *datap is initially NULL and *sizep is initially 0. \return 0 on success, non-zero on error. In case of failure, the other callbacks will not be invoked and any value stored in *datap and *sizep is discarded. """ pass class MediaReadCb(ctypes.c_void_p): """Callback prototype to read data from a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback \param buf start address of the buffer to read data into \param len bytes length of the buffer \return strictly positive number of bytes read, 0 on end-of-stream, or -1 on non-recoverable error \note If no data is immediately available, then the callback should sleep. \warning The application is responsible for avoiding deadlock situations. In particular, the callback should return an error if playback is stopped; if it does not return, then L{libvlc_media_player_stop}() will never return. """ pass class MediaSeekCb(ctypes.c_void_p): """Callback prototype to seek a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback \param offset absolute byte offset to seek to \return 0 on success, -1 on error. """ pass class MediaCloseCb(ctypes.c_void_p): """Callback prototype to close a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback """ pass class VideoLockCb(ctypes.c_void_p): """Callback prototype to allocate and lock a picture buffer. Whenever a new video frame needs to be decoded, the lock callback is invoked. Depending on the video chroma, one or three pixel planes of adequate dimensions must be returned via the second parameter. Those planes must be aligned on 32-bytes boundaries. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param planes start address of the pixel planes (LibVLC allocates the array of void pointers, this callback must initialize the array) [OUT] \return a private pointer for the display and unlock callbacks to identify the picture buffers """ pass class VideoUnlockCb(ctypes.c_void_p): """Callback prototype to unlock a picture buffer. When the video frame decoding is complete, the unlock callback is invoked. This callback might not be needed at all. It is only an indication that the application can now read the pixel values if it needs to. \warning A picture buffer is unlocked after the picture is decoded, but before the picture is displayed. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param picture private pointer returned from the @ref libvlc_video_lock_cb callback [IN] \param planes pixel planes as defined by the @ref libvlc_video_lock_cb callback (this parameter is only for convenience) [IN] """ pass class VideoDisplayCb(ctypes.c_void_p): """Callback prototype to display a picture. When the video frame needs to be shown, as determined by the media playback clock, the display callback is invoked. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param picture private pointer returned from the @ref libvlc_video_lock_cb callback [IN] """ pass class VideoFormatCb(ctypes.c_void_p): """Callback prototype to configure picture buffers format. This callback gets the format of the video as output by the video decoder and the chain of video filters (if any). It can opt to change any parameter as it needs. In that case, LibVLC will attempt to convert the video format (rescaling and chroma conversion) but these operations can be CPU intensive. \param opaque pointer to the private pointer passed to L{libvlc_video_set_callbacks}() [IN/OUT] \param chroma pointer to the 4 bytes video format identifier [IN/OUT] \param width pointer to the pixel width [IN/OUT] \param height pointer to the pixel height [IN/OUT] \param pitches table of scanline pitches in bytes for each pixel plane (the table is allocated by LibVLC) [OUT] \param lines table of scanlines count for each plane [OUT] \return the number of picture buffers allocated, 0 indicates failure \note For each pixels plane, the scanline pitch must be bigger than or equal to the number of bytes per pixel multiplied by the pixel width. Similarly, the number of scanlines must be bigger than of equal to the pixel height. Furthermore, we recommend that pitches and lines be multiple of 32 to not break assumptions that might be held by optimized code in the video decoders, video filters and/or video converters. """ pass class VideoCleanupCb(ctypes.c_void_p): """Callback prototype to configure picture buffers format. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() (and possibly modified by @ref libvlc_video_format_cb) [IN] """ pass class AudioPlayCb(ctypes.c_void_p): """Callback prototype for audio playback. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param samples pointer to the first audio sample to play back [IN] \param count number of audio samples to play back \param pts expected play time stamp (see libvlc_delay()) """ pass class AudioPauseCb(ctypes.c_void_p): """Callback prototype for audio pause. \note The pause callback is never called if the audio is already paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the pause request (should be elapsed already) """ pass class AudioResumeCb(ctypes.c_void_p): """Callback prototype for audio resumption (i.e. restart from pause). \note The resume callback is never called if the audio is not paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the resumption request (should be elapsed already) """ pass class AudioFlushCb(ctypes.c_void_p): """Callback prototype for audio buffer flush (i.e. discard all pending buffers and stop playback as soon as possible). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] """ pass class AudioDrainCb(ctypes.c_void_p): """Callback prototype for audio buffer drain (i.e. wait for pending buffers to be played). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] """ pass class AudioSetVolumeCb(ctypes.c_void_p): """Callback prototype for audio volume change. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param volume software volume (1. = nominal, 0. = mute) \param mute muted flag """ pass class AudioSetupCb(ctypes.c_void_p): """Callback prototype to setup the audio playback. This is called when the media player needs to create a new audio output. \param opaque pointer to the data pointer passed to L{libvlc_audio_set_callbacks}() [IN/OUT] \param format 4 bytes sample format [IN/OUT] \param rate sample rate [IN/OUT] \param channels channels count [IN/OUT] \return 0 on success, anything else to skip audio playback """ pass class AudioCleanupCb(ctypes.c_void_p): """Callback prototype for audio playback cleanup. This is called when the media player no longer needs an audio output. \param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] """ pass class CallbackDecorators(object): "Class holding various method decorators for callback functions." Callback = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p) Callback.__doc__ = '''Callback function notification \param p_event the event triggering the callback ''' LogCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, Log_ptr, ctypes.c_char_p, ctypes.c_void_p) LogCb.__doc__ = '''Callback prototype for LibVLC log message handler. \param data data pointer as given to L{libvlc_log_set}() \param level message level (@ref enum libvlc_log_level) \param ctx message context (meta-information about the message) \param fmt printf() format string (as defined by ISO C11) \param args variable argument list for the format \note Log message handlers <b>must</b> be thread-safe. \warning The message context pointer, the format string parameters and the variable arguments are only valid until the callback returns. ''' MediaOpenCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ListPOINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_uint64)) MediaOpenCb.__doc__ = '''Callback prototype to open a custom bitstream input media. The same media item can be opened multiple times. Each time, this callback is invoked. It should allocate and initialize any instance-specific resources, then store them in *datap. The instance resources can be freed in the @ref libvlc_close_cb callback. \param opaque private pointer as passed to L{libvlc_media_new_callbacks}() \param datap storage space for a private data pointer [OUT] \param sizep byte length of the bitstream or 0 if unknown [OUT] \note For convenience, *datap is initially NULL and *sizep is initially 0. \return 0 on success, non-zero on error. In case of failure, the other callbacks will not be invoked and any value stored in *datap and *sizep is discarded. ''' MediaReadCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_ssize_t), ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t) MediaReadCb.__doc__ = '''Callback prototype to read data from a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback \param buf start address of the buffer to read data into \param len bytes length of the buffer \return strictly positive number of bytes read, 0 on end-of-stream, or -1 on non-recoverable error \note If no data is immediately available, then the callback should sleep. \warning The application is responsible for avoiding deadlock situations. In particular, the callback should return an error if playback is stopped; if it does not return, then L{libvlc_media_player_stop}() will never return. ''' MediaSeekCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ctypes.c_uint64) MediaSeekCb.__doc__ = '''Callback prototype to seek a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback \param offset absolute byte offset to seek to \return 0 on success, -1 on error. ''' MediaCloseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) MediaCloseCb.__doc__ = '''Callback prototype to close a custom bitstream input media. \param opaque private pointer as set by the @ref libvlc_media_open_cb callback ''' VideoLockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p)) VideoLockCb.__doc__ = '''Callback prototype to allocate and lock a picture buffer. Whenever a new video frame needs to be decoded, the lock callback is invoked. Depending on the video chroma, one or three pixel planes of adequate dimensions must be returned via the second parameter. Those planes must be aligned on 32-bytes boundaries. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param planes start address of the pixel planes (LibVLC allocates the array of void pointers, this callback must initialize the array) [OUT] \return a private pointer for the display and unlock callbacks to identify the picture buffers ''' VideoUnlockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p)) VideoUnlockCb.__doc__ = '''Callback prototype to unlock a picture buffer. When the video frame decoding is complete, the unlock callback is invoked. This callback might not be needed at all. It is only an indication that the application can now read the pixel values if it needs to. \warning A picture buffer is unlocked after the picture is decoded, but before the picture is displayed. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param picture private pointer returned from the @ref libvlc_video_lock_cb callback [IN] \param planes pixel planes as defined by the @ref libvlc_video_lock_cb callback (this parameter is only for convenience) [IN] ''' VideoDisplayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p) VideoDisplayCb.__doc__ = '''Callback prototype to display a picture. When the video frame needs to be shown, as determined by the media playback clock, the display callback is invoked. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN] \param picture private pointer returned from the @ref libvlc_video_lock_cb callback [IN] ''' VideoFormatCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_uint), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) VideoFormatCb.__doc__ = '''Callback prototype to configure picture buffers format. This callback gets the format of the video as output by the video decoder and the chain of video filters (if any). It can opt to change any parameter as it needs. In that case, LibVLC will attempt to convert the video format (rescaling and chroma conversion) but these operations can be CPU intensive. \param opaque pointer to the private pointer passed to L{libvlc_video_set_callbacks}() [IN/OUT] \param chroma pointer to the 4 bytes video format identifier [IN/OUT] \param width pointer to the pixel width [IN/OUT] \param height pointer to the pixel height [IN/OUT] \param pitches table of scanline pitches in bytes for each pixel plane (the table is allocated by LibVLC) [OUT] \param lines table of scanlines count for each plane [OUT] \return the number of picture buffers allocated, 0 indicates failure \note For each pixels plane, the scanline pitch must be bigger than or equal to the number of bytes per pixel multiplied by the pixel width. Similarly, the number of scanlines must be bigger than of equal to the pixel height. Furthermore, we recommend that pitches and lines be multiple of 32 to not break assumptions that might be held by optimized code in the video decoders, video filters and/or video converters. ''' VideoCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) VideoCleanupCb.__doc__ = '''Callback prototype to configure picture buffers format. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() (and possibly modified by @ref libvlc_video_format_cb) [IN] ''' AudioPlayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int64) AudioPlayCb.__doc__ = '''Callback prototype for audio playback. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param samples pointer to the first audio sample to play back [IN] \param count number of audio samples to play back \param pts expected play time stamp (see libvlc_delay()) ''' AudioPauseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioPauseCb.__doc__ = '''Callback prototype for audio pause. \note The pause callback is never called if the audio is already paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the pause request (should be elapsed already) ''' AudioResumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioResumeCb.__doc__ = '''Callback prototype for audio resumption (i.e. restart from pause). \note The resume callback is never called if the audio is not paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the resumption request (should be elapsed already) ''' AudioFlushCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioFlushCb.__doc__ = '''Callback prototype for audio buffer flush (i.e. discard all pending buffers and stop playback as soon as possible). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' AudioDrainCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) AudioDrainCb.__doc__ = '''Callback prototype for audio buffer drain (i.e. wait for pending buffers to be played). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' AudioSetVolumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_float, ctypes.c_bool) AudioSetVolumeCb.__doc__ = '''Callback prototype for audio volume change. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param volume software volume (1. = nominal, 0. = mute) \param mute muted flag ''' AudioSetupCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) AudioSetupCb.__doc__ = '''Callback prototype to setup the audio playback. This is called when the media player needs to create a new audio output. \param opaque pointer to the data pointer passed to L{libvlc_audio_set_callbacks}() [IN/OUT] \param format 4 bytes sample format [IN/OUT] \param rate sample rate [IN/OUT] \param channels channels count [IN/OUT] \return 0 on success, anything else to skip audio playback ''' AudioCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) AudioCleanupCb.__doc__ = '''Callback prototype for audio playback cleanup. This is called when the media player no longer needs an audio output. \param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' cb = CallbackDecorators # End of generated enum types # # From libvlc_structures.h class AudioOutput(_Cstruct): def __str__(self): return '%s(%s:%s)' % (self.__class__.__name__, self.name, self.description) AudioOutput._fields_ = [ # recursive struct ('name', ctypes.c_char_p), ('description', ctypes.c_char_p), ('next', ctypes.POINTER(AudioOutput)), ] class LogMessage(_Cstruct): _fields_ = [ ('size', ctypes.c_uint ), ('severity', ctypes.c_int ), ('type', ctypes.c_char_p), ('name', ctypes.c_char_p), ('header', ctypes.c_char_p), ('message', ctypes.c_char_p), ] def __init__(self): super(LogMessage, self).__init__() self.size = ctypes.sizeof(self) def __str__(self): return '%s(%d:%s): %s' % (self.__class__.__name__, self.severity, self.type, self.message) class MediaEvent(_Cstruct): _fields_ = [ ('media_name', ctypes.c_char_p), ('instance_name', ctypes.c_char_p), ] class MediaStats(_Cstruct): _fields_ = [ ('read_bytes', ctypes.c_int ), ('input_bitrate', ctypes.c_float), ('demux_read_bytes', ctypes.c_int ), ('demux_bitrate', ctypes.c_float), ('demux_corrupted', ctypes.c_int ), ('demux_discontinuity', ctypes.c_int ), ('decoded_video', ctypes.c_int ), ('decoded_audio', ctypes.c_int ), ('displayed_pictures', ctypes.c_int ), ('lost_pictures', ctypes.c_int ), ('played_abuffers', ctypes.c_int ), ('lost_abuffers', ctypes.c_int ), ('sent_packets', ctypes.c_int ), ('sent_bytes', ctypes.c_int ), ('send_bitrate', ctypes.c_float), ] class MediaTrackInfo(_Cstruct): _fields_ = [ ('codec', ctypes.c_uint32), ('id', ctypes.c_int ), ('type', TrackType ), ('profile', ctypes.c_int ), ('level', ctypes.c_int ), ('channels_or_height', ctypes.c_uint ), ('rate_or_width', ctypes.c_uint ), ] class AudioTrack(_Cstruct): _fields_ = [ ('channels', ctypes.c_uint), ('rate', ctypes.c_uint), ] class VideoTrack(_Cstruct): _fields_ = [ ('height', ctypes.c_uint), ('width', ctypes.c_uint), ('sar_num', ctypes.c_uint), ('sar_den', ctypes.c_uint), ('frame_rate_num', ctypes.c_uint), ('frame_rate_den', ctypes.c_uint), ] class SubtitleTrack(_Cstruct): _fields_ = [ ('encoding', ctypes.c_char_p), ] class MediaTrackTracks(ctypes.Union): _fields_ = [ ('audio', ctypes.POINTER(AudioTrack)), ('video', ctypes.POINTER(VideoTrack)), ('subtitle', ctypes.POINTER(SubtitleTrack)), ] class MediaTrack(_Cstruct): _anonymous_ = ("u",) _fields_ = [ ('codec', ctypes.c_uint32), ('original_fourcc', ctypes.c_uint32), ('id', ctypes.c_int ), ('type', TrackType ), ('profile', ctypes.c_int ), ('level', ctypes.c_int ), ('u', MediaTrackTracks), ('bitrate', ctypes.c_uint), ('language', ctypes.c_char_p), ('description', ctypes.c_char_p), ] class PlaylistItem(_Cstruct): _fields_ = [ ('id', ctypes.c_int ), ('uri', ctypes.c_char_p), ('name', ctypes.c_char_p), ] def __str__(self): return '%s #%d %s (uri %s)' % (self.__class__.__name__, self.id, self.name, self.uri) class Position(object): """Enum-like, immutable window position constants. See e.g. VideoMarqueeOption.Position. """ Center = 0 Left = 1 CenterLeft = 1 Right = 2 CenterRight = 2 Top = 4 TopCenter = 4 TopLeft = 5 TopRight = 6 Bottom = 8 BottomCenter = 8 BottomLeft = 9 BottomRight = 10 def __init__(self, *unused): raise TypeError('constants only') def __setattr__(self, *unused): #PYCHOK expected raise TypeError('immutable constants') class Rectangle(_Cstruct): _fields_ = [ ('top', ctypes.c_int), ('left', ctypes.c_int), ('bottom', ctypes.c_int), ('right', ctypes.c_int), ] class TrackDescription(_Cstruct): def __str__(self): return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name) TrackDescription._fields_ = [ # recursive struct ('id', ctypes.c_int ), ('name', ctypes.c_char_p), ('next', ctypes.POINTER(TrackDescription)), ] def track_description_list(head): """Convert a TrackDescription linked list to a Python list (and release the former). """ r = [] if head: item = head while item: item = item.contents r.append((item.id, item.name)) item = item.next try: libvlc_track_description_release(head) except NameError: libvlc_track_description_list_release(head) return r class EventUnion(ctypes.Union): _fields_ = [ ('meta_type', ctypes.c_uint ), ('new_child', ctypes.c_uint ), ('new_duration', ctypes.c_longlong), ('new_status', ctypes.c_int ), ('media', ctypes.c_void_p ), ('new_state', ctypes.c_uint ), # Media instance ('new_position', ctypes.c_float ), ('new_time', ctypes.c_longlong), ('new_title', ctypes.c_int ), ('new_seekable', ctypes.c_longlong), ('new_pausable', ctypes.c_longlong), # FIXME: Skipped MediaList and MediaListView... ('filename', ctypes.c_char_p ), ('new_length', ctypes.c_longlong), ('media_event', MediaEvent ), ] class Event(_Cstruct): _fields_ = [ ('type', EventType ), ('object', ctypes.c_void_p), ('u', EventUnion ), ] class ModuleDescription(_Cstruct): def __str__(self): return '%s %s (%s)' % (self.__class__.__name__, self.shortname, self.name) ModuleDescription._fields_ = [ # recursive struct ('name', ctypes.c_char_p), ('shortname', ctypes.c_char_p), ('longname', ctypes.c_char_p), ('help', ctypes.c_char_p), ('next', ctypes.POINTER(ModuleDescription)), ] def module_description_list(head): """Convert a ModuleDescription linked list to a Python list (and release the former). """ r = [] if head: item = head while item: item = item.contents r.append((item.name, item.shortname, item.longname, item.help)) item = item.next libvlc_module_description_list_release(head) return r class AudioOutputDevice(_Cstruct): def __str__(self): return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name) AudioOutputDevice._fields_ = [ # recursive struct ('next', ctypes.POINTER(AudioOutputDevice)), ('device', ctypes.c_char_p ), ('description', ctypes.c_char_p), ] # End of header.py # class EventManager(_Ctype): '''Create an event manager with callback handler. This class interposes the registration and handling of event notifications in order to (a) remove the need for decorating each callback functions with the decorator '@callbackmethod', (b) allow any number of positional and/or keyword arguments to the callback (in addition to the Event instance) and (c) to preserve the Python objects such that the callback and argument objects remain alive (i.e. are not garbage collected) until B{after} the notification has been unregistered. @note: Only a single notification can be registered for each event type in an EventManager instance. ''' _callback_handler = None _callbacks = {} def __new__(cls, ptr=_internal_guard): if ptr == _internal_guard: raise VLCException("(INTERNAL) ctypes class.\nYou should get a reference to EventManager through the MediaPlayer.event_manager() method.") return _Constructor(cls, ptr) def event_attach(self, eventtype, callback, *args, **kwds): """Register an event notification. @param eventtype: the desired event type to be notified about. @param callback: the function to call when the event occurs. @param args: optional positional arguments for the callback. @param kwds: optional keyword arguments for the callback. @return: 0 on success, ENOMEM on error. @note: The callback function must have at least one argument, an Event instance. Any other, optional positional and keyword arguments are in B{addition} to the first one. """ if not isinstance(eventtype, EventType): raise VLCException("%s required: %r" % ('EventType', eventtype)) if not hasattr(callback, '__call__'): # callable() raise VLCException("%s required: %r" % ('callable', callback)) # check that the callback expects arguments if not any(getargspec(callback)[:2]): # list(...) raise VLCException("%s required: %r" % ('argument', callback)) if self._callback_handler is None: _called_from_ctypes = ctypes.CFUNCTYPE(None, ctypes.POINTER(Event), ctypes.c_void_p) @_called_from_ctypes def _callback_handler(event, k): """(INTERNAL) handle callback call from ctypes. @note: We cannot simply make this an EventManager method since ctypes does not prepend self as the first parameter, hence this closure. """ try: # retrieve Python callback and arguments call, args, kwds = self._callbacks[k] # deref event.contents to simplify callback code call(event.contents, *args, **kwds) except KeyError: # detached? pass self._callback_handler = _callback_handler self._callbacks = {} k = eventtype.value r = libvlc_event_attach(self, k, self._callback_handler, k) if not r: self._callbacks[k] = (callback, args, kwds) return r def event_detach(self, eventtype): """Unregister an event notification. @param eventtype: the event type notification to be removed. """ if not isinstance(eventtype, EventType): raise VLCException("%s required: %r" % ('EventType', eventtype)) k = eventtype.value if k in self._callbacks: del self._callbacks[k] # remove, regardless of libvlc return value libvlc_event_detach(self, k, self._callback_handler, k) class Instance(_Ctype): '''Create a new Instance instance. It may take as parameter either: - a string - a list of strings as first parameters - the parameters given as the constructor parameters (must be strings) ''' def __new__(cls, *args): if len(args) == 1: # Only 1 arg. It is either a C pointer, or an arg string, # or a tuple. i = args[0] if isinstance(i, _Ints): return _Constructor(cls, i) elif isinstance(i, basestring): args = i.strip().split() elif isinstance(i, _Seqs): args = i else: raise VLCException('Instance %r' % (args,)) if not args and plugin_path is not None: # no parameters passed, for win32 and MacOS, # specify the plugin_path if detected earlier args = ['vlc', '--plugin-path=' + plugin_path] if PYTHON3: args = [ str_to_bytes(a) for a in args ] return libvlc_new(len(args), args) def media_player_new(self, uri=None): """Create a new MediaPlayer instance. @param uri: an optional URI to play in the player. """ p = libvlc_media_player_new(self) if uri: p.set_media(self.media_new(uri)) p._instance = self return p def media_list_player_new(self): """Create a new MediaListPlayer instance. """ p = libvlc_media_list_player_new(self) p._instance = self return p def media_new(self, mrl, *options): """Create a new Media instance. If mrl contains a colon (:) preceded by more than 1 letter, it will be treated as a URL. Else, it will be considered as a local path. If you need more control, directly use media_new_location/media_new_path methods. Options can be specified as supplementary string parameters, but note that many options cannot be set at the media level, and rather at the Instance level. For instance, the marquee filter must be specified when creating the vlc.Instance or vlc.MediaPlayer. Alternatively, options can be added to the media using the Media.add_options method (with the same limitation). @param options: optional media option=value strings """ if ':' in mrl and mrl.index(':') > 1: # Assume it is a URL m = libvlc_media_new_location(self, str_to_bytes(mrl)) else: # Else it should be a local path. m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl))) for o in options: libvlc_media_add_option(m, str_to_bytes(o)) m._instance = self return m def media_list_new(self, mrls=None): """Create a new MediaList instance. @param mrls: optional list of MRL strings """ l = libvlc_media_list_new(self) # We should take the lock, but since we did not leak the # reference, nobody else can access it. if mrls: for m in mrls: l.add_media(m) l._instance = self return l def audio_output_enumerate_devices(self): """Enumerate the defined audio output devices. @return: list of dicts {name:, description:, devices:} """ r = [] head = libvlc_audio_output_list_get(self) if head: i = head while i: i = i.contents d = [{'id': libvlc_audio_output_device_id (self, i.name, d), 'longname': libvlc_audio_output_device_longname(self, i.name, d)} for d in range(libvlc_audio_output_device_count (self, i.name))] r.append({'name': i.name, 'description': i.description, 'devices': d}) i = i.next libvlc_audio_output_list_release(head) return r def audio_filter_list_get(self): """Returns a list of available audio filters. """ return module_description_list(libvlc_audio_filter_list_get(self)) def video_filter_list_get(self): """Returns a list of available video filters. """ return module_description_list(libvlc_video_filter_list_get(self)) def release(self): '''Decrement the reference count of a libvlc instance, and destroy it if it reaches zero. ''' return libvlc_release(self) def retain(self): '''Increments the reference count of a libvlc instance. The initial reference count is 1 after L{new}() returns. ''' return libvlc_retain(self) def add_intf(self, name): '''Try to start a user interface for the libvlc instance. @param name: interface name, or NULL for default. @return: 0 on success, -1 on error. ''' return libvlc_add_intf(self, str_to_bytes(name)) def set_user_agent(self, name, http): '''Sets the application name. LibVLC passes this as the user agent string when a protocol requires it. @param name: human-readable application name, e.g. "FooBar player 1.2.3". @param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0". @version: LibVLC 1.1.1 or later. ''' return libvlc_set_user_agent(self, str_to_bytes(name), str_to_bytes(http)) def set_app_id(self, id, version, icon): '''Sets some meta-information about the application. See also L{set_user_agent}(). @param id: Java-style application identifier, e.g. "com.acme.foobar". @param version: application version numbers, e.g. "1.2.3". @param icon: application icon name, e.g. "foobar". @version: LibVLC 2.1.0 or later. ''' return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon)) def log_unset(self): '''Unsets the logging callback for a LibVLC instance. This is rarely needed: the callback is implicitly unset when the instance is destroyed. This function will wait for any pending callbacks invocation to complete (causing a deadlock if called from within the callback). @version: LibVLC 2.1.0 or later. ''' return libvlc_log_unset(self) def log_set(self, data, p_instance): '''Sets the logging callback for a LibVLC instance. This function is thread-safe: it will wait for any pending callbacks invocation to complete. @param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback. @param p_instance: libvlc instance. @version: LibVLC 2.1.0 or later. ''' return libvlc_log_set(self, data, p_instance) def log_set_file(self, stream): '''Sets up logging to a file. @param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{log_unset}()). @version: LibVLC 2.1.0 or later. ''' return libvlc_log_set_file(self, stream) def media_new_location(self, psz_mrl): '''Create a media with a certain given media resource location, for instance a valid URL. @note: To refer to a local file with this function, the file://... URI syntax B{must} be used (see IETF RFC3986). We recommend using L{media_new_path}() instead when dealing with local files. See L{media_release}. @param psz_mrl: the media location. @return: the newly created media or NULL on error. ''' return libvlc_media_new_location(self, str_to_bytes(psz_mrl)) def media_new_path(self, path): '''Create a media for a certain file path. See L{media_release}. @param path: local filesystem path. @return: the newly created media or NULL on error. ''' return libvlc_media_new_path(self, str_to_bytes(path)) def media_new_fd(self, fd): '''Create a media for an already open file descriptor. The file descriptor shall be open for reading (or reading and writing). Regular file descriptors, pipe read descriptors and character device descriptors (including TTYs) are supported on all platforms. Block device descriptors are supported where available. Directory descriptors are supported on systems that provide fdopendir(). Sockets are supported on all platforms where they are file descriptors, i.e. all except Windows. @note: This library will B{not} automatically close the file descriptor under any circumstance. Nevertheless, a file descriptor can usually only be rendered once in a media player. To render it a second time, the file descriptor should probably be rewound to the beginning with lseek(). See L{media_release}. @param fd: open file descriptor. @return: the newly created media or NULL on error. @version: LibVLC 1.1.5 and later. ''' return libvlc_media_new_fd(self, fd) def media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque): '''Create a media with custom callbacks to read the data from. @param open_cb: callback to open the custom bitstream input media. @param read_cb: callback to read data (must not be NULL). @param seek_cb: callback to seek, or NULL if seeking is not supported. @param close_cb: callback to close the media, or NULL if unnecessary. @param opaque: data pointer for the open callback. @return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{media_release}. @version: LibVLC 3.0.0 and later. ''' return libvlc_media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque) def media_new_as_node(self, psz_name): '''Create a media as an empty node with a given name. See L{media_release}. @param psz_name: the name of the node. @return: the new empty media or NULL on error. ''' return libvlc_media_new_as_node(self, str_to_bytes(psz_name)) def media_discoverer_new(self, psz_name): '''Create a media discoverer object by name. After this object is created, you should attach to events in order to be notified of the discoverer state. You should also attach to media_list events in order to be notified of new items discovered. You need to call L{media_discoverer_start}() in order to start the discovery. See L{media_discoverer_media_list} See L{media_discoverer_event_manager} See L{media_discoverer_start}. @param psz_name: service name. @return: media discover object or NULL in case of error. @version: LibVLC 3.0.0 or later. ''' return libvlc_media_discoverer_new(self, str_to_bytes(psz_name)) def media_library_new(self): '''Create an new Media Library object. @return: a new object or NULL on error. ''' return libvlc_media_library_new(self) def audio_output_list_get(self): '''Gets the list of available audio output modules. @return: list of available audio outputs. It must be freed it with In case of error, NULL is returned. ''' return libvlc_audio_output_list_get(self) def audio_output_device_list_get(self, aout): '''Gets a list of audio output devices for a given audio output module, See L{audio_output_device_set}(). @note: Not all audio outputs support this. In particular, an empty (NULL) list of devices does B{not} imply that the specified audio output does not work. @note: The list might not be exhaustive. @warning: Some audio output devices in the list might not actually work in some circumstances. By default, it is recommended to not specify any explicit audio device. @param psz_aout: audio output name (as returned by L{audio_output_list_get}()). @return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{audio_output_device_list_release}(). @version: LibVLC 2.1.0 or later. ''' return libvlc_audio_output_device_list_get(self, str_to_bytes(aout)) def vlm_release(self): '''Release the vlm instance related to the given L{Instance}. ''' return libvlc_vlm_release(self) def vlm_add_broadcast(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Add a broadcast, with one input. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' return libvlc_vlm_add_broadcast(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop) def vlm_add_vod(self, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux): '''Add a vod, with one input. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error. ''' return libvlc_vlm_add_vod(self, str_to_bytes(psz_name), str_to_bytes(psz_input), i_options, ppsz_options, b_enabled, str_to_bytes(psz_mux)) def vlm_del_media(self, psz_name): '''Delete a media (VOD or broadcast). @param psz_name: the media to delete. @return: 0 on success, -1 on error. ''' return libvlc_vlm_del_media(self, str_to_bytes(psz_name)) def vlm_set_enabled(self, psz_name, b_enabled): '''Enable or disable a media (VOD or broadcast). @param psz_name: the media to work on. @param b_enabled: the new status. @return: 0 on success, -1 on error. ''' return libvlc_vlm_set_enabled(self, str_to_bytes(psz_name), b_enabled) def vlm_set_output(self, psz_name, psz_output): '''Set the output for a media. @param psz_name: the media to work on. @param psz_output: the output MRL (the parameter to the "sout" variable). @return: 0 on success, -1 on error. ''' return libvlc_vlm_set_output(self, str_to_bytes(psz_name), str_to_bytes(psz_output)) def vlm_set_input(self, psz_name, psz_input): '''Set a media's input MRL. This will delete all existing inputs and add the specified one. @param psz_name: the media to work on. @param psz_input: the input MRL. @return: 0 on success, -1 on error. ''' return libvlc_vlm_set_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input)) def vlm_add_input(self, psz_name, psz_input): '''Add a media's input MRL. This will add the specified one. @param psz_name: the media to work on. @param psz_input: the input MRL. @return: 0 on success, -1 on error. ''' return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input)) def vlm_set_loop(self, psz_name, b_loop): '''Set a media's loop status. @param psz_name: the media to work on. @param b_loop: the new status. @return: 0 on success, -1 on error. ''' return libvlc_vlm_set_loop(self, str_to_bytes(psz_name), b_loop) def vlm_set_mux(self, psz_name, psz_mux): '''Set a media's vod muxer. @param psz_name: the media to work on. @param psz_mux: the new muxer. @return: 0 on success, -1 on error. ''' return libvlc_vlm_set_mux(self, str_to_bytes(psz_name), str_to_bytes(psz_mux)) def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Edit the parameters of a media. This will delete all existing inputs and add the specified one. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop) def vlm_play_media(self, psz_name): '''Play the named broadcast. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' return libvlc_vlm_play_media(self, str_to_bytes(psz_name)) def vlm_stop_media(self, psz_name): '''Stop the named broadcast. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' return libvlc_vlm_stop_media(self, str_to_bytes(psz_name)) def vlm_pause_media(self, psz_name): '''Pause the named broadcast. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' return libvlc_vlm_pause_media(self, str_to_bytes(psz_name)) def vlm_seek_media(self, psz_name, f_percentage): '''Seek in the named broadcast. @param psz_name: the name of the broadcast. @param f_percentage: the percentage to seek to. @return: 0 on success, -1 on error. ''' return libvlc_vlm_seek_media(self, str_to_bytes(psz_name), f_percentage) def vlm_show_media(self, psz_name): '''Return information about the named media as a JSON string representation. This function is mainly intended for debugging use, if you want programmatic access to the state of a vlm_media_instance_t, please use the corresponding libvlc_vlm_get_media_instance_xxx -functions. Currently there are no such functions available for vlm_media_t though. @param psz_name: the name of the media, if the name is an empty string, all media is described. @return: string with information about named media, or NULL on error. ''' return libvlc_vlm_show_media(self, str_to_bytes(psz_name)) def vlm_get_media_instance_position(self, psz_name, i_instance): '''Get vlm_media instance position by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: position as float or -1. on error. ''' return libvlc_vlm_get_media_instance_position(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_time(self, psz_name, i_instance): '''Get vlm_media instance time by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: time as integer or -1 on error. ''' return libvlc_vlm_get_media_instance_time(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_length(self, psz_name, i_instance): '''Get vlm_media instance length by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: length of media item or -1 on error. ''' return libvlc_vlm_get_media_instance_length(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_rate(self, psz_name, i_instance): '''Get vlm_media instance playback rate by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: playback rate or -1 on error. ''' return libvlc_vlm_get_media_instance_rate(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_title(self, psz_name, i_instance): '''Get vlm_media instance title number by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: title as number or -1 on error. @bug: will always return 0. ''' return libvlc_vlm_get_media_instance_title(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_chapter(self, psz_name, i_instance): '''Get vlm_media instance chapter number by name or instance id. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: chapter as number or -1 on error. @bug: will always return 0. ''' return libvlc_vlm_get_media_instance_chapter(self, str_to_bytes(psz_name), i_instance) def vlm_get_media_instance_seekable(self, psz_name, i_instance): '''Is libvlc instance seekable ? @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: 1 if seekable, 0 if not, -1 if media does not exist. @bug: will always return 0. ''' return libvlc_vlm_get_media_instance_seekable(self, str_to_bytes(psz_name), i_instance) @memoize_parameterless def vlm_get_event_manager(self): '''Get libvlc_event_manager from a vlm media. The p_event_manager is immutable, so you don't have to hold the lock. @return: libvlc_event_manager. ''' return libvlc_vlm_get_event_manager(self) class Media(_Ctype): '''Create a new Media instance. Usage: Media(MRL, *options) See vlc.Instance.media_new documentation for details. ''' def __new__(cls, *args): if args: i = args[0] if isinstance(i, _Ints): return _Constructor(cls, i) if isinstance(i, Instance): return i.media_new(*args[1:]) o = get_default_instance().media_new(*args) return o def get_instance(self): return getattr(self, '_instance', None) def add_options(self, *options): """Add a list of options to the media. Options must be written without the double-dash. Warning: most audio and video options, such as text renderer, have no effects on an individual media. These options must be set at the vlc.Instance or vlc.MediaPlayer instanciation. @param options: optional media option=value strings """ for o in options: self.add_option(o) def tracks_get(self): """Get media descriptor's elementary streams description Note, you need to call L{parse}() or play the media at least once before calling this function. Not doing this will result in an empty array. The result must be freed with L{tracks_release}. @version: LibVLC 2.1.0 and later. """ mediaTrack_pp = ctypes.POINTER(MediaTrack)() n = libvlc_media_tracks_get(self, byref(mediaTrack_pp)) info = cast(ctypes.mediaTrack_pp, ctypes.POINTER(ctypes.POINTER(MediaTrack) * n)) return info def add_option(self, psz_options): '''Add an option to the media. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. @note: The options are listed in 'vlc --long-help' from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{new}() instead. @param psz_options: the options (as a string). ''' return libvlc_media_add_option(self, str_to_bytes(psz_options)) def add_option_flag(self, psz_options, i_flags): '''Add an option to the media with configurable flags. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. The options are detailed in vlc --long-help, for instance "--sout-all". Note that all options are not usable on medias: specifically, due to architectural issues, video-related options such as text renderer options cannot be set on a single media. They must be set on the whole libvlc instance instead. @param psz_options: the options (as a string). @param i_flags: the flags for this option. ''' return libvlc_media_add_option_flag(self, str_to_bytes(psz_options), i_flags) def retain(self): '''Retain a reference to a media descriptor object (libvlc_media_t). Use L{release}() to decrement the reference count of a media descriptor object. ''' return libvlc_media_retain(self) def release(self): '''Decrement the reference count of a media descriptor object. If the reference count is 0, then L{release}() will release the media descriptor object. It will send out an libvlc_MediaFreed event to all listeners. If the media descriptor object has been released it should not be used again. ''' return libvlc_media_release(self) def get_mrl(self): '''Get the media resource locator (mrl) from a media descriptor object. @return: string with mrl of media descriptor object. ''' return libvlc_media_get_mrl(self) def duplicate(self): '''Duplicate a media descriptor object. ''' return libvlc_media_duplicate(self) def get_meta(self, e_meta): '''Read the meta of the media. If the media has not yet been parsed this will return NULL. This methods automatically calls L{parse_async}(), so after calling it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous version ensure that you call L{parse}() before get_meta(). See L{parse} See L{parse_async} See libvlc_MediaMetaChanged. @param e_meta: the meta to read. @return: the media's meta. ''' return libvlc_media_get_meta(self, e_meta) def set_meta(self, e_meta, psz_value): '''Set the meta of the media (this function will not save the meta, call L{save_meta} in order to save the meta). @param e_meta: the meta to write. @param psz_value: the media's meta. ''' return libvlc_media_set_meta(self, e_meta, str_to_bytes(psz_value)) def save_meta(self): '''Save the meta previously set. @return: true if the write operation was successful. ''' return libvlc_media_save_meta(self) def get_state(self): '''Get current state of media descriptor object. Possible media states are defined in libvlc_structures.c ( libvlc_NothingSpecial=0, libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused, libvlc_Stopped, libvlc_Ended, libvlc_Error). See libvlc_state_t. @return: state of media descriptor object. ''' return libvlc_media_get_state(self) def get_stats(self, p_stats): '''Get the current statistics about the media. @param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller). @return: true if the statistics are available, false otherwise \libvlc_return_bool. ''' return libvlc_media_get_stats(self, p_stats) def subitems(self): '''Get subitems of media descriptor object. This will increment the reference count of supplied media descriptor object. Use L{list_release}() to decrement the reference counting. @return: list of media descriptor subitems or NULL. ''' return libvlc_media_subitems(self) @memoize_parameterless def event_manager(self): '''Get event manager from media descriptor object. NOTE: this function doesn't increment reference counting. @return: event manager object. ''' return libvlc_media_event_manager(self) def get_duration(self): '''Get duration (in ms) of media descriptor object item. @return: duration of media item or -1 on error. ''' return libvlc_media_get_duration(self) def parse(self): '''Parse a media. This fetches (local) art, meta data and tracks information. The method is synchronous. See L{parse_async} See L{get_meta} See libvlc_media_get_tracks_info. ''' return libvlc_media_parse(self) def parse_async(self): '''Parse a media. This fetches (local) art, meta data and tracks information. The method is the asynchronous of L{parse}(). To track when this is over you can listen to libvlc_MediaParsedChanged event. However if the media was already parsed you will not receive this event. See L{parse} See libvlc_MediaParsedChanged See L{get_meta} See libvlc_media_get_tracks_info. ''' return libvlc_media_parse_async(self) def parse_with_options(self, parse_flag): '''Parse the media asynchronously with options. This fetches (local or network) art, meta data and/or tracks information. This method is the extended version of L{parse_async}(). To track when this is over you can listen to libvlc_MediaParsedChanged event. However if this functions returns an error, you will not receive this event. It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All these flags can be combined. By default, media is parsed if it's a local file. See libvlc_MediaParsedChanged See L{get_meta} See L{tracks_get} See libvlc_media_parse_flag_t. @param parse_flag: parse options: @return: -1 in case of error, 0 otherwise. @version: LibVLC 3.0.0 or later. ''' return libvlc_media_parse_with_options(self, parse_flag) def is_parsed(self): '''Get Parsed status for media descriptor object. See libvlc_MediaParsedChanged. @return: true if media object has been parsed otherwise it returns false \libvlc_return_bool. ''' return libvlc_media_is_parsed(self) def set_user_data(self, p_new_user_data): '''Sets media descriptor's user_data. user_data is specialized data accessed by the host application, VLC.framework uses it as a pointer to an native object that references a L{Media} pointer. @param p_new_user_data: pointer to user data. ''' return libvlc_media_set_user_data(self, p_new_user_data) def get_user_data(self): '''Get media descriptor's user_data. user_data is specialized data accessed by the host application, VLC.framework uses it as a pointer to an native object that references a L{Media} pointer. ''' return libvlc_media_get_user_data(self) def get_type(self): '''Get the media type of the media descriptor object. @return: media type. @version: LibVLC 3.0.0 and later. See libvlc_media_type_t. ''' return libvlc_media_get_type(self) def player_new_from_media(self): '''Create a Media Player object from a Media. @return: a new media player object, or NULL on error. ''' return libvlc_media_player_new_from_media(self) class MediaDiscoverer(_Ctype): '''N/A ''' def __new__(cls, ptr=_internal_guard): '''(INTERNAL) ctypes wrapper constructor. ''' return _Constructor(cls, ptr) def start(self): '''Start media discovery. To stop it, call L{stop}() or L{release}() directly. See L{stop}. @return: -1 in case of error, 0 otherwise. @version: LibVLC 3.0.0 or later. ''' return libvlc_media_discoverer_start(self) def stop(self): '''Stop media discovery. See L{start}. @version: LibVLC 3.0.0 or later. ''' return libvlc_media_discoverer_stop(self) def release(self): '''Release media discover object. If the reference count reaches 0, then the object will be released. ''' return libvlc_media_discoverer_release(self) def localized_name(self): '''Get media service discover object its localized name. @return: localized name. ''' return libvlc_media_discoverer_localized_name(self) def media_list(self): '''Get media service discover media list. @return: list of media items. ''' return libvlc_media_discoverer_media_list(self) @memoize_parameterless def event_manager(self): '''Get event manager from media service discover object. @return: event manager object. ''' return libvlc_media_discoverer_event_manager(self) def is_running(self): '''Query if media service discover object is running. @return: true if running, false if not \libvlc_return_bool. ''' return libvlc_media_discoverer_is_running(self) class MediaLibrary(_Ctype): '''N/A ''' def __new__(cls, ptr=_internal_guard): '''(INTERNAL) ctypes wrapper constructor. ''' return _Constructor(cls, ptr) def release(self): '''Release media library object. This functions decrements the reference count of the media library object. If it reaches 0, then the object will be released. ''' return libvlc_media_library_release(self) def retain(self): '''Retain a reference to a media library object. This function will increment the reference counting for this object. Use L{release}() to decrement the reference count. ''' return libvlc_media_library_retain(self) def load(self): '''Load media library. @return: 0 on success, -1 on error. ''' return libvlc_media_library_load(self) def media_list(self): '''Get media library subitems. @return: media list subitems. ''' return libvlc_media_library_media_list(self) class MediaList(_Ctype): '''Create a new MediaList instance. Usage: MediaList(list_of_MRLs) See vlc.Instance.media_list_new documentation for details. ''' def __new__(cls, *args): if args: i = args[0] if isinstance(i, _Ints): return _Constructor(cls, i) if isinstance(i, Instance): return i.media_list_new(*args[1:]) o = get_default_instance().media_list_new(*args) return o def get_instance(self): return getattr(self, '_instance', None) def add_media(self, mrl): """Add media instance to media list. The L{lock} should be held upon entering this function. @param mrl: a media instance or a MRL. @return: 0 on success, -1 if the media list is read-only. """ if isinstance(mrl, basestring): mrl = (self.get_instance() or get_default_instance()).media_new(mrl) return libvlc_media_list_add_media(self, mrl) def release(self): '''Release media list created with L{new}(). ''' return libvlc_media_list_release(self) def retain(self): '''Retain reference to a media list. ''' return libvlc_media_list_retain(self) def set_media(self, p_md): '''Associate media instance with this media list instance. If another media instance was present it will be released. The L{lock} should NOT be held upon entering this function. @param p_md: media instance to add. ''' return libvlc_media_list_set_media(self, p_md) def media(self): '''Get media instance from this media list instance. This action will increase the refcount on the media instance. The L{lock} should NOT be held upon entering this function. @return: media instance. ''' return libvlc_media_list_media(self) def insert_media(self, p_md, i_pos): '''Insert media instance in media list on a position The L{lock} should be held upon entering this function. @param p_md: a media instance. @param i_pos: position in array where to insert. @return: 0 on success, -1 if the media list is read-only. ''' return libvlc_media_list_insert_media(self, p_md, i_pos) def remove_index(self, i_pos): '''Remove media instance from media list on a position The L{lock} should be held upon entering this function. @param i_pos: position in array where to insert. @return: 0 on success, -1 if the list is read-only or the item was not found. ''' return libvlc_media_list_remove_index(self, i_pos) def count(self): '''Get count on media list items The L{lock} should be held upon entering this function. @return: number of items in media list. ''' return libvlc_media_list_count(self) def __len__(self): return libvlc_media_list_count(self) def item_at_index(self, i_pos): '''List media instance in media list at a position The L{lock} should be held upon entering this function. @param i_pos: position in array where to insert. @return: media instance at position i_pos, or NULL if not found. In case of success, L{media_retain}() is called to increase the refcount on the media. ''' return libvlc_media_list_item_at_index(self, i_pos) def __getitem__(self, i): return libvlc_media_list_item_at_index(self, i) def __iter__(self): for i in range(len(self)): yield self[i] def index_of_item(self, p_md): '''Find index position of List media instance in media list. Warning: the function will return the first matched position. The L{lock} should be held upon entering this function. @param p_md: media instance. @return: position of media instance or -1 if media not found. ''' return libvlc_media_list_index_of_item(self, p_md) def is_readonly(self): '''This indicates if this media list is read-only from a user point of view. @return: 1 on readonly, 0 on readwrite \libvlc_return_bool. ''' return libvlc_media_list_is_readonly(self) def lock(self): '''Get lock on media list items. ''' return libvlc_media_list_lock(self) def unlock(self): '''Release lock on media list items The L{lock} should be held upon entering this function. ''' return libvlc_media_list_unlock(self) @memoize_parameterless def event_manager(self): '''Get libvlc_event_manager from this media list instance. The p_event_manager is immutable, so you don't have to hold the lock. @return: libvlc_event_manager. ''' return libvlc_media_list_event_manager(self) class MediaListPlayer(_Ctype): '''Create a new MediaListPlayer instance. It may take as parameter either: - a vlc.Instance - nothing ''' def __new__(cls, arg=None): if arg is None: i = get_default_instance() elif isinstance(arg, Instance): i = arg elif isinstance(arg, _Ints): return _Constructor(cls, arg) else: raise TypeError('MediaListPlayer %r' % (arg,)) return i.media_list_player_new() def get_instance(self): """Return the associated Instance. """ return self._instance #PYCHOK expected def release(self): '''Release a media_list_player after use Decrement the reference count of a media player object. If the reference count is 0, then L{release}() will release the media player object. If the media player object has been released, then it should not be used again. ''' return libvlc_media_list_player_release(self) def retain(self): '''Retain a reference to a media player list object. Use L{release}() to decrement reference count. ''' return libvlc_media_list_player_retain(self) @memoize_parameterless def event_manager(self): '''Return the event manager of this media_list_player. @return: the event manager. ''' return libvlc_media_list_player_event_manager(self) def set_media_player(self, p_mi): '''Replace media player in media_list_player with this instance. @param p_mi: media player instance. ''' return libvlc_media_list_player_set_media_player(self, p_mi) def set_media_list(self, p_mlist): '''Set the media list associated with the player. @param p_mlist: list of media. ''' return libvlc_media_list_player_set_media_list(self, p_mlist) def play(self): '''Play media list. ''' return libvlc_media_list_player_play(self) def pause(self): '''Toggle pause (or resume) media list. ''' return libvlc_media_list_player_pause(self) def is_playing(self): '''Is media list playing? @return: true for playing and false for not playing \libvlc_return_bool. ''' return libvlc_media_list_player_is_playing(self) def get_state(self): '''Get current libvlc_state of media list player. @return: libvlc_state_t for media list player. ''' return libvlc_media_list_player_get_state(self) def play_item_at_index(self, i_index): '''Play media list item at position index. @param i_index: index in media list to play. @return: 0 upon success -1 if the item wasn't found. ''' return libvlc_media_list_player_play_item_at_index(self, i_index) def __getitem__(self, i): return libvlc_media_list_player_play_item_at_index(self, i) def __iter__(self): for i in range(len(self)): yield self[i] def play_item(self, p_md): '''Play the given media item. @param p_md: the media instance. @return: 0 upon success, -1 if the media is not part of the media list. ''' return libvlc_media_list_player_play_item(self, p_md) def stop(self): '''Stop playing media list. ''' return libvlc_media_list_player_stop(self) def next(self): '''Play next item from media list. @return: 0 upon success -1 if there is no next item. ''' return libvlc_media_list_player_next(self) def previous(self): '''Play previous item from media list. @return: 0 upon success -1 if there is no previous item. ''' return libvlc_media_list_player_previous(self) def set_playback_mode(self, e_mode): '''Sets the playback mode for the playlist. @param e_mode: playback mode specification. ''' return libvlc_media_list_player_set_playback_mode(self, e_mode) class MediaPlayer(_Ctype): '''Create a new MediaPlayer instance. It may take as parameter either: - a string (media URI), options... In this case, a vlc.Instance will be created. - a vlc.Instance, a string (media URI), options... ''' def __new__(cls, *args): if len(args) == 1 and isinstance(args[0], _Ints): return _Constructor(cls, args[0]) if args and isinstance(args[0], Instance): instance = args[0] args = args[1:] else: instance = get_default_instance() o = instance.media_player_new() if args: o.set_media(instance.media_new(*args)) return o def get_instance(self): """Return the associated Instance. """ return self._instance #PYCHOK expected def set_mrl(self, mrl, *options): """Set the MRL to play. Warning: most audio and video options, such as text renderer, have no effects on an individual media. These options must be set at the vlc.Instance or vlc.MediaPlayer instanciation. @param mrl: The MRL @param options: optional media option=value strings @return: the Media object """ m = self.get_instance().media_new(mrl, *options) self.set_media(m) return m def video_get_spu_description(self): """Get the description of available video subtitles. """ return track_description_list(libvlc_video_get_spu_description(self)) def video_get_title_description(self): """Get the description of available titles. """ return track_description_list(libvlc_video_get_title_description(self)) def video_get_chapter_description(self, title): """Get the description of available chapters for specific title. @param title: selected title (int) """ return track_description_list(libvlc_video_get_chapter_description(self, title)) def video_get_track_description(self): """Get the description of available video tracks. """ return track_description_list(libvlc_video_get_track_description(self)) def audio_get_track_description(self): """Get the description of available audio tracks. """ return track_description_list(libvlc_audio_get_track_description(self)) def video_get_size(self, num=0): """Get the video size in pixels as 2-tuple (width, height). @param num: video number (default 0). """ r = libvlc_video_get_size(self, num) if isinstance(r, tuple) and len(r) == 2: return r else: raise VLCException('invalid video number (%s)' % (num,)) def set_hwnd(self, drawable): """Set a Win32/Win64 API window handle (HWND). Specify where the media player should render its video output. If LibVLC was built without Win32/Win64 API output support, then this has no effects. @param drawable: windows handle of the drawable. """ if not isinstance(drawable, ctypes.c_void_p): drawable = ctypes.c_void_p(int(drawable)) libvlc_media_player_set_hwnd(self, drawable) def video_get_width(self, num=0): """Get the width of a video in pixels. @param num: video number (default 0). """ return self.video_get_size(num)[0] def video_get_height(self, num=0): """Get the height of a video in pixels. @param num: video number (default 0). """ return self.video_get_size(num)[1] def video_get_cursor(self, num=0): """Get the mouse pointer coordinates over a video as 2-tuple (x, y). Coordinates are expressed in terms of the decoded video resolution, B{not} in terms of pixels on the screen/viewport. To get the latter, you must query your windowing system directly. Either coordinate may be negative or larger than the corresponding size of the video, if the cursor is outside the rendering area. @warning: The coordinates may be out-of-date if the pointer is not located on the video rendering area. LibVLC does not track the mouse pointer if the latter is outside the video widget. @note: LibVLC does not support multiple mouse pointers (but does support multiple input devices sharing the same pointer). @param num: video number (default 0). """ r = libvlc_video_get_cursor(self, num) if isinstance(r, tuple) and len(r) == 2: return r raise VLCException('invalid video number (%s)' % (num,)) def release(self): '''Release a media_player after use Decrement the reference count of a media player object. If the reference count is 0, then L{release}() will release the media player object. If the media player object has been released, then it should not be used again. ''' return libvlc_media_player_release(self) def retain(self): '''Retain a reference to a media player object. Use L{release}() to decrement reference count. ''' return libvlc_media_player_retain(self) def set_media(self, p_md): '''Set the media that will be used by the media_player. If any, previous md will be released. @param p_md: the Media. Afterwards the p_md can be safely destroyed. ''' return libvlc_media_player_set_media(self, p_md) def get_media(self): '''Get the media used by the media_player. @return: the media associated with p_mi, or NULL if no media is associated. ''' return libvlc_media_player_get_media(self) @memoize_parameterless def event_manager(self): '''Get the Event Manager from which the media player send event. @return: the event manager associated with p_mi. ''' return libvlc_media_player_event_manager(self) def is_playing(self): '''is_playing. @return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool. ''' return libvlc_media_player_is_playing(self) def play(self): '''Play. @return: 0 if playback started (and was already started), or -1 on error. ''' return libvlc_media_player_play(self) def set_pause(self, do_pause): '''Pause or resume (no effect if there is no media). @param do_pause: play/resume if zero, pause if non-zero. @version: LibVLC 1.1.1 or later. ''' return libvlc_media_player_set_pause(self, do_pause) def pause(self): '''Toggle pause (no effect if there is no media). ''' return libvlc_media_player_pause(self) def stop(self): '''Stop (no effect if there is no media). ''' return libvlc_media_player_stop(self) def video_set_callbacks(self, lock, unlock, display, opaque): '''Set callbacks and private data to render decoded video to a custom area in memory. Use L{video_set_format}() or L{video_set_format_callbacks}() to configure the decoded format. @param lock: callback to lock video memory (must not be NULL). @param unlock: callback to unlock video memory (or NULL if not needed). @param display: callback to display video (or NULL if not needed). @param opaque: private pointer for the three callbacks (as first parameter). @version: LibVLC 1.1.1 or later. ''' return libvlc_video_set_callbacks(self, lock, unlock, display, opaque) def video_set_format(self, chroma, width, height, pitch): '''Set decoded video chroma and dimensions. This only works in combination with L{video_set_callbacks}(), and is mutually exclusive with L{video_set_format_callbacks}(). @param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV"). @param width: pixel width. @param height: pixel height. @param pitch: line pitch (in bytes). @version: LibVLC 1.1.1 or later. @bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{video_set_format_callbacks}() instead. ''' return libvlc_video_set_format(self, str_to_bytes(chroma), width, height, pitch) def video_set_format_callbacks(self, setup, cleanup): '''Set decoded video chroma and dimensions. This only works in combination with L{video_set_callbacks}(). @param setup: callback to select the video format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ''' return libvlc_video_set_format_callbacks(self, setup, cleanup) def set_nsobject(self, drawable): '''Set the NSView handler where the media player should render its video output. Use the vout called "macosx". The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding protocol: @begincode \@protocol VLCOpenGLVideoViewEmbedding <NSObject> - (void)addVoutSubview:(NSView *)view; - (void)removeVoutSubview:(NSView *)view; \@end @endcode Or it can be an NSView object. If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then the following code should work: @begincode NSView *video = [[NSView alloc] init]; QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent); L{set_nsobject}(mp, video); [video release]; @endcode You can find a live example in VLCVideoView in VLCKit.framework. @param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol. ''' return libvlc_media_player_set_nsobject(self, drawable) def get_nsobject(self): '''Get the NSView handler previously set with L{set_nsobject}(). @return: the NSView handler or 0 if none where set. ''' return libvlc_media_player_get_nsobject(self) def set_agl(self, drawable): '''Set the agl handler where the media player should render its video output. @param drawable: the agl handler. ''' return libvlc_media_player_set_agl(self, drawable) def get_agl(self): '''Get the agl handler previously set with L{set_agl}(). @return: the agl handler or 0 if none where set. ''' return libvlc_media_player_get_agl(self) def set_xwindow(self, drawable): '''Set an X Window System drawable where the media player should render its video output. If LibVLC was built without X11 output support, then this has no effects. The specified identifier must correspond to an existing Input/Output class X11 window. Pixmaps are B{not} supported. The caller shall ensure that the X11 server is the same as the one the VLC instance has been configured with. This function must be called before video playback is started; otherwise it will only take effect after playback stop and restart. @param drawable: the ID of the X window. ''' return libvlc_media_player_set_xwindow(self, drawable) def get_xwindow(self): '''Get the X Window System window identifier previously set with L{set_xwindow}(). Note that this will return the identifier even if VLC is not currently using it (for instance if it is playing an audio-only input). @return: an X window ID, or 0 if none where set. ''' return libvlc_media_player_get_xwindow(self) def get_hwnd(self): '''Get the Windows API window handle (HWND) previously set with L{set_hwnd}(). The handle will be returned even if LibVLC is not currently outputting any video to it. @return: a window handle or NULL if there are none. ''' return libvlc_media_player_get_hwnd(self) def audio_set_callbacks(self, play, pause, resume, flush, drain, opaque): '''Set callbacks and private data for decoded audio. Use L{audio_set_format}() or L{audio_set_format_callbacks}() to configure the decoded audio format. @param play: callback to play audio samples (must not be NULL). @param pause: callback to pause playback (or NULL to ignore). @param resume: callback to resume playback (or NULL to ignore). @param flush: callback to flush audio buffers (or NULL to ignore). @param drain: callback to drain audio buffers (or NULL to ignore). @param opaque: private pointer for the audio callbacks (as first parameter). @version: LibVLC 2.0.0 or later. ''' return libvlc_audio_set_callbacks(self, play, pause, resume, flush, drain, opaque) def audio_set_volume_callback(self, set_volume): '''Set callbacks and private data for decoded audio. This only works in combination with L{audio_set_callbacks}(). Use L{audio_set_format}() or L{audio_set_format_callbacks}() to configure the decoded audio format. @param set_volume: callback to apply audio volume, or NULL to apply volume in software. @version: LibVLC 2.0.0 or later. ''' return libvlc_audio_set_volume_callback(self, set_volume) def audio_set_format_callbacks(self, setup, cleanup): '''Set decoded audio format. This only works in combination with L{audio_set_callbacks}(). @param setup: callback to select the audio format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ''' return libvlc_audio_set_format_callbacks(self, setup, cleanup) def audio_set_format(self, format, rate, channels): '''Set decoded audio format. This only works in combination with L{audio_set_callbacks}(), and is mutually exclusive with L{audio_set_format_callbacks}(). @param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32"). @param rate: sample rate (expressed in Hz). @param channels: channels count. @version: LibVLC 2.0.0 or later. ''' return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels) def get_length(self): '''Get the current movie length (in ms). @return: the movie length (in ms), or -1 if there is no media. ''' return libvlc_media_player_get_length(self) def get_time(self): '''Get the current movie time (in ms). @return: the movie time (in ms), or -1 if there is no media. ''' return libvlc_media_player_get_time(self) def set_time(self, i_time): '''Set the movie time (in ms). This has no effect if no media is being played. Not all formats and protocols support this. @param i_time: the movie time (in ms). ''' return libvlc_media_player_set_time(self, i_time) def get_position(self): '''Get movie position as percentage between 0.0 and 1.0. @return: movie position, or -1. in case of error. ''' return libvlc_media_player_get_position(self) def set_position(self, f_pos): '''Set movie position as percentage between 0.0 and 1.0. This has no effect if playback is not enabled. This might not work depending on the underlying input format and protocol. @param f_pos: the position. ''' return libvlc_media_player_set_position(self, f_pos) def set_chapter(self, i_chapter): '''Set movie chapter (if applicable). @param i_chapter: chapter number to play. ''' return libvlc_media_player_set_chapter(self, i_chapter) def get_chapter(self): '''Get movie chapter. @return: chapter number currently playing, or -1 if there is no media. ''' return libvlc_media_player_get_chapter(self) def get_chapter_count(self): '''Get movie chapter count. @return: number of chapters in movie, or -1. ''' return libvlc_media_player_get_chapter_count(self) def will_play(self): '''Is the player able to play. @return: boolean \libvlc_return_bool. ''' return libvlc_media_player_will_play(self) def get_chapter_count_for_title(self, i_title): '''Get title chapter count. @param i_title: title. @return: number of chapters in title, or -1. ''' return libvlc_media_player_get_chapter_count_for_title(self, i_title) def set_title(self, i_title): '''Set movie title. @param i_title: title number to play. ''' return libvlc_media_player_set_title(self, i_title) def get_title(self): '''Get movie title. @return: title number currently playing, or -1. ''' return libvlc_media_player_get_title(self) def get_title_count(self): '''Get movie title count. @return: title number count, or -1. ''' return libvlc_media_player_get_title_count(self) def previous_chapter(self): '''Set previous chapter (if applicable). ''' return libvlc_media_player_previous_chapter(self) def next_chapter(self): '''Set next chapter (if applicable). ''' return libvlc_media_player_next_chapter(self) def get_rate(self): '''Get the requested movie play rate. @warning: Depending on the underlying media, the requested rate may be different from the real playback rate. @return: movie play rate. ''' return libvlc_media_player_get_rate(self) def set_rate(self, rate): '''Set movie play rate. @param rate: movie play rate to set. @return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol). ''' return libvlc_media_player_set_rate(self, rate) def get_state(self): '''Get current movie state. @return: the current state of the media player (playing, paused, ...) See libvlc_state_t. ''' return libvlc_media_player_get_state(self) def get_fps(self): '''Get movie fps rate. @return: frames per second (fps) for this playing movie, or 0 if unspecified. ''' return libvlc_media_player_get_fps(self) def has_vout(self): '''How many video outputs does this media player have? @return: the number of video outputs. ''' return libvlc_media_player_has_vout(self) def is_seekable(self): '''Is this media player seekable? @return: true if the media player can seek \libvlc_return_bool. ''' return libvlc_media_player_is_seekable(self) def can_pause(self): '''Can this media player be paused? @return: true if the media player can pause \libvlc_return_bool. ''' return libvlc_media_player_can_pause(self) def program_scrambled(self): '''Check if the current program is scrambled. @return: true if the current program is scrambled \libvlc_return_bool. @version: LibVLC 2.2.0 or later. ''' return libvlc_media_player_program_scrambled(self) def next_frame(self): '''Display the next frame (if supported). ''' return libvlc_media_player_next_frame(self) def navigate(self, navigate): '''Navigate through DVD Menu. @param navigate: the Navigation mode. @version: libVLC 2.0.0 or later. ''' return libvlc_media_player_navigate(self, navigate) def set_video_title_display(self, position, timeout): '''Set if, and how, the video title will be shown when media is played. @param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed. @param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable). @version: libVLC 2.1.0 or later. ''' return libvlc_media_player_set_video_title_display(self, position, timeout) def toggle_fullscreen(self): '''Toggle fullscreen status on non-embedded video outputs. @warning: The same limitations applies to this function as to L{set_fullscreen}(). ''' return libvlc_toggle_fullscreen(self) def set_fullscreen(self, b_fullscreen): '''Enable or disable fullscreen. @warning: With most window managers, only a top-level windows can be in full-screen mode. Hence, this function will not operate properly if L{set_xwindow}() was used to embed the video in a non-top-level window. In that case, the embedding window must be reparented to the root window B{before} fullscreen mode is enabled. You will want to reparent it back to its normal parent when disabling fullscreen. @param b_fullscreen: boolean for fullscreen status. ''' return libvlc_set_fullscreen(self, b_fullscreen) def get_fullscreen(self): '''Get current fullscreen status. @return: the fullscreen status (boolean) \libvlc_return_bool. ''' return libvlc_get_fullscreen(self) def video_set_key_input(self, on): '''Enable or disable key press events handling, according to the LibVLC hotkeys configuration. By default and for historical reasons, keyboard events are handled by the LibVLC video widget. @note: On X11, there can be only one subscriber for key press and mouse click events per window. If your application has subscribed to those events for the X window ID of the video widget, then LibVLC will not be able to handle key presses and mouse clicks in any case. @warning: This function is only implemented for X11 and Win32 at the moment. @param on: true to handle key press events, false to ignore them. ''' return libvlc_video_set_key_input(self, on) def video_set_mouse_input(self, on): '''Enable or disable mouse click events handling. By default, those events are handled. This is needed for DVD menus to work, as well as a few video filters such as "puzzle". See L{video_set_key_input}(). @warning: This function is only implemented for X11 and Win32 at the moment. @param on: true to handle mouse click events, false to ignore them. ''' return libvlc_video_set_mouse_input(self, on) def video_get_scale(self): '''Get the current video scaling factor. See also L{video_set_scale}(). @return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically. ''' return libvlc_video_get_scale(self) def video_set_scale(self, f_factor): '''Set the video scaling factor. That is the ratio of the number of pixels on screen to the number of pixels in the original decoded video in each dimension. Zero is a special value; it will adjust the video to the output window/drawable (in windowed mode) or the entire screen. Note that not all video outputs support scaling. @param f_factor: the scaling factor, or zero. ''' return libvlc_video_set_scale(self, f_factor) def video_get_aspect_ratio(self): '''Get current video aspect ratio. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{free}()). ''' return libvlc_video_get_aspect_ratio(self) def video_set_aspect_ratio(self, psz_aspect): '''Set new video aspect ratio. @param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored. ''' return libvlc_video_set_aspect_ratio(self, str_to_bytes(psz_aspect)) def video_get_spu(self): '''Get current video subtitle. @return: the video subtitle selected, or -1 if none. ''' return libvlc_video_get_spu(self) def video_get_spu_count(self): '''Get the number of available video subtitles. @return: the number of available video subtitles. ''' return libvlc_video_get_spu_count(self) def video_set_spu(self, i_spu): '''Set new video subtitle. @param i_spu: video subtitle track to select (i_id from track description). @return: 0 on success, -1 if out of range. ''' return libvlc_video_set_spu(self, i_spu) def video_set_subtitle_file(self, psz_subtitle): '''Set new video subtitle file. @param psz_subtitle: new video subtitle file. @return: the success status (boolean). ''' return libvlc_video_set_subtitle_file(self, str_to_bytes(psz_subtitle)) def video_get_spu_delay(self): '''Get the current subtitle delay. Positive values means subtitles are being displayed later, negative values earlier. @return: time (in microseconds) the display of subtitles is being delayed. @version: LibVLC 2.0.0 or later. ''' return libvlc_video_get_spu_delay(self) def video_set_spu_delay(self, i_delay): '''Set the subtitle delay. This affects the timing of when the subtitle will be displayed. Positive values result in subtitles being displayed later, while negative values will result in subtitles being displayed earlier. The subtitle delay will be reset to zero each time the media changes. @param i_delay: time (in microseconds) the display of subtitles should be delayed. @return: 0 on success, -1 on error. @version: LibVLC 2.0.0 or later. ''' return libvlc_video_set_spu_delay(self, i_delay) def video_get_crop_geometry(self): '''Get current crop filter geometry. @return: the crop filter geometry or NULL if unset. ''' return libvlc_video_get_crop_geometry(self) def video_set_crop_geometry(self, psz_geometry): '''Set new crop filter geometry. @param psz_geometry: new crop filter geometry (NULL to unset). ''' return libvlc_video_set_crop_geometry(self, str_to_bytes(psz_geometry)) def video_get_teletext(self): '''Get current teletext page requested. @return: the current teletext page requested. ''' return libvlc_video_get_teletext(self) def video_set_teletext(self, i_page): '''Set new teletext page to retrieve. @param i_page: teletex page number requested. ''' return libvlc_video_set_teletext(self, i_page) def toggle_teletext(self): '''Toggle teletext transparent status on video output. ''' return libvlc_toggle_teletext(self) def video_get_track_count(self): '''Get number of available video tracks. @return: the number of available video tracks (int). ''' return libvlc_video_get_track_count(self) def video_get_track(self): '''Get current video track. @return: the video track ID (int) or -1 if no active input. ''' return libvlc_video_get_track(self) def video_set_track(self, i_track): '''Set video track. @param i_track: the track ID (i_id field from track description). @return: 0 on success, -1 if out of range. ''' return libvlc_video_set_track(self, i_track) def video_take_snapshot(self, num, psz_filepath, i_width, i_height): '''Take a snapshot of the current video window. If i_width AND i_height is 0, original size is used. If i_width XOR i_height is 0, original aspect-ratio is preserved. @param num: number of video output (typically 0 for the first/only one). @param psz_filepath: the path where to save the screenshot to. @param i_width: the snapshot's width. @param i_height: the snapshot's height. @return: 0 on success, -1 if the video was not found. ''' return libvlc_video_take_snapshot(self, num, str_to_bytes(psz_filepath), i_width, i_height) def video_set_deinterlace(self, psz_mode): '''Enable or disable deinterlace filter. @param psz_mode: type of deinterlace filter, NULL to disable. ''' return libvlc_video_set_deinterlace(self, str_to_bytes(psz_mode)) def video_get_marquee_int(self, option): '''Get an integer marquee option value. @param option: marq option to get See libvlc_video_marquee_int_option_t. ''' return libvlc_video_get_marquee_int(self, option) def video_get_marquee_string(self, option): '''Get a string marquee option value. @param option: marq option to get See libvlc_video_marquee_string_option_t. ''' return libvlc_video_get_marquee_string(self, option) def video_set_marquee_int(self, option, i_val): '''Enable, disable or set an integer marquee option Setting libvlc_marquee_Enable has the side effect of enabling (arg !0) or disabling (arg 0) the marq filter. @param option: marq option to set See libvlc_video_marquee_int_option_t. @param i_val: marq option value. ''' return libvlc_video_set_marquee_int(self, option, i_val) def video_set_marquee_string(self, option, psz_text): '''Set a marquee string option. @param option: marq option to set See libvlc_video_marquee_string_option_t. @param psz_text: marq option value. ''' return libvlc_video_set_marquee_string(self, option, str_to_bytes(psz_text)) def video_get_logo_int(self, option): '''Get integer logo option. @param option: logo option to get, values of libvlc_video_logo_option_t. ''' return libvlc_video_get_logo_int(self, option) def video_set_logo_int(self, option, value): '''Set logo option as integer. Options that take a different type value are ignored. Passing libvlc_logo_enable as option value has the side effect of starting (arg !0) or stopping (arg 0) the logo filter. @param option: logo option to set, values of libvlc_video_logo_option_t. @param value: logo option value. ''' return libvlc_video_set_logo_int(self, option, value) def video_set_logo_string(self, option, psz_value): '''Set logo option as string. Options that take a different type value are ignored. @param option: logo option to set, values of libvlc_video_logo_option_t. @param psz_value: logo option value. ''' return libvlc_video_set_logo_string(self, option, str_to_bytes(psz_value)) def video_get_adjust_int(self, option): '''Get integer adjust option. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later. ''' return libvlc_video_get_adjust_int(self, option) def video_set_adjust_int(self, option, value): '''Set adjust option as integer. Options that take a different type value are ignored. Passing libvlc_adjust_enable as option value has the side effect of starting (arg !0) or stopping (arg 0) the adjust filter. @param option: adust option to set, values of libvlc_video_adjust_option_t. @param value: adjust option value. @version: LibVLC 1.1.1 and later. ''' return libvlc_video_set_adjust_int(self, option, value) def video_get_adjust_float(self, option): '''Get float adjust option. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later. ''' return libvlc_video_get_adjust_float(self, option) def video_set_adjust_float(self, option, value): '''Set adjust option as float. Options that take a different type value are ignored. @param option: adust option to set, values of libvlc_video_adjust_option_t. @param value: adjust option value. @version: LibVLC 1.1.1 and later. ''' return libvlc_video_set_adjust_float(self, option, value) def audio_output_set(self, psz_name): '''Selects an audio output module. @note: Any change will take be effect only after playback is stopped and restarted. Audio output cannot be changed while playing. @param psz_name: name of audio output, use psz_name of See L{AudioOutput}. @return: 0 if function succeded, -1 on error. ''' return libvlc_audio_output_set(self, str_to_bytes(psz_name)) def audio_output_device_enum(self): '''Gets a list of potential audio output devices, See L{audio_output_device_set}(). @note: Not all audio outputs support enumerating devices. The audio output may be functional even if the list is empty (NULL). @note: The list may not be exhaustive. @warning: Some audio output devices in the list might not actually work in some circumstances. By default, it is recommended to not specify any explicit audio device. @return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{audio_output_device_list_release}(). @version: LibVLC 2.2.0 or later. ''' return libvlc_audio_output_device_enum(self) def audio_output_device_set(self, module, device_id): '''Configures an explicit audio output device. If the module paramater is NULL, audio output will be moved to the device specified by the device identifier string immediately. This is the recommended usage. A list of adequate potential device strings can be obtained with L{audio_output_device_enum}(). However passing NULL is supported in LibVLC version 2.2.0 and later only; in earlier versions, this function would have no effects when the module parameter was NULL. If the module parameter is not NULL, the device parameter of the corresponding audio output, if it exists, will be set to the specified string. Note that some audio output modules do not have such a parameter (notably MMDevice and PulseAudio). A list of adequate potential device strings can be obtained with L{audio_output_device_list_get}(). @note: This function does not select the specified audio output plugin. L{audio_output_set}() is used for that purpose. @warning: The syntax for the device parameter depends on the audio output. Some audio output modules require further parameters (e.g. a channels map in the case of ALSA). @param module: If NULL, current audio output module. if non-NULL, name of audio output module. @param device_id: device identifier string. @return: Nothing. Errors are ignored (this is a design bug). ''' return libvlc_audio_output_device_set(self, str_to_bytes(module), str_to_bytes(device_id)) def audio_output_device_get(self): '''Get the current audio output device identifier. This complements L{audio_output_device_set}(). @warning: The initial value for the current audio output device identifier may not be set or may be some unknown value. A LibVLC application should compare this value against the known device identifiers (e.g. those that were previously retrieved by a call to L{audio_output_device_enum} or L{audio_output_device_list_get}) to find the current audio output device. It is possible that the selected audio output device changes (an external change) without a call to L{audio_output_device_set}. That may make this method unsuitable to use if a LibVLC application is attempting to track dynamic audio device changes as they happen. @return: the current audio output device identifier NULL if no device is selected or in case of error (the result must be released with free() or L{free}()). @version: LibVLC 3.0.0 or later. ''' return libvlc_audio_output_device_get(self) def audio_toggle_mute(self): '''Toggle mute status. ''' return libvlc_audio_toggle_mute(self) def audio_get_mute(self): '''Get current mute status. @return: the mute status (boolean) if defined, -1 if undefined/unapplicable. ''' return libvlc_audio_get_mute(self) def audio_set_mute(self, status): '''Set mute status. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ''' return libvlc_audio_set_mute(self, status) def audio_get_volume(self): '''Get current software audio volume. @return: the software volume in percents (0 = mute, 100 = nominal / 0dB). ''' return libvlc_audio_get_volume(self) def audio_set_volume(self, i_volume): '''Set current software audio volume. @param i_volume: the volume in percents (0 = mute, 100 = 0dB). @return: 0 if the volume was set, -1 if it was out of range. ''' return libvlc_audio_set_volume(self, i_volume) def audio_get_track_count(self): '''Get number of available audio tracks. @return: the number of available audio tracks (int), or -1 if unavailable. ''' return libvlc_audio_get_track_count(self) def audio_get_track(self): '''Get current audio track. @return: the audio track ID or -1 if no active input. ''' return libvlc_audio_get_track(self) def audio_set_track(self, i_track): '''Set current audio track. @param i_track: the track ID (i_id field from track description). @return: 0 on success, -1 on error. ''' return libvlc_audio_set_track(self, i_track) def audio_get_channel(self): '''Get current audio channel. @return: the audio channel See libvlc_audio_output_channel_t. ''' return libvlc_audio_get_channel(self) def audio_set_channel(self, channel): '''Set current audio channel. @param channel: the audio channel, See libvlc_audio_output_channel_t. @return: 0 on success, -1 on error. ''' return libvlc_audio_set_channel(self, channel) def audio_get_delay(self): '''Get current audio delay. @return: the audio delay (microseconds). @version: LibVLC 1.1.1 or later. ''' return libvlc_audio_get_delay(self) def audio_set_delay(self, i_delay): '''Set current audio delay. The audio delay will be reset to zero each time the media changes. @param i_delay: the audio delay (microseconds). @return: 0 on success, -1 on error. @version: LibVLC 1.1.1 or later. ''' return libvlc_audio_set_delay(self, i_delay) def set_equalizer(self, p_equalizer): '''Apply new equalizer settings to a media player. The equalizer is first created by invoking L{audio_equalizer_new}() or L{audio_equalizer_new_from_preset}(). It is possible to apply new equalizer settings to a media player whether the media player is currently playing media or not. Invoking this method will immediately apply the new equalizer settings to the audio output of the currently playing media if there is any. If there is no currently playing media, the new equalizer settings will be applied later if and when new media is played. Equalizer settings will automatically be applied to subsequently played media. To disable the equalizer for a media player invoke this method passing NULL for the p_equalizer parameter. The media player does not keep a reference to the supplied equalizer so it is safe for an application to release the equalizer reference any time after this method returns. @param p_equalizer: opaque equalizer handle, or NULL to disable the equalizer for this media player. @return: zero on success, -1 on error. @version: LibVLC 2.2.0 or later. ''' return libvlc_media_player_set_equalizer(self, p_equalizer) # LibVLC __version__ functions # def libvlc_errmsg(): '''A human-readable error message for the last LibVLC error in the calling thread. The resulting string is valid until another error occurs (at least until the next LibVLC call). @warning This will be NULL if there was no error. ''' f = _Cfunctions.get('libvlc_errmsg', None) or \ _Cfunction('libvlc_errmsg', (), None, ctypes.c_char_p) return f() def libvlc_clearerr(): '''Clears the LibVLC error status for the current thread. This is optional. By default, the error status is automatically overridden when a new error occurs, and destroyed when the thread exits. ''' f = _Cfunctions.get('libvlc_clearerr', None) or \ _Cfunction('libvlc_clearerr', (), None, None) return f() def libvlc_vprinterr(fmt, ap): '''Sets the LibVLC error status and message for the current thread. Any previous error is overridden. @param fmt: the format string. @param ap: the arguments. @return: a nul terminated string in any case. ''' f = _Cfunctions.get('libvlc_vprinterr', None) or \ _Cfunction('libvlc_vprinterr', ((1,), (1,),), None, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p) return f(fmt, ap) def libvlc_new(argc, argv): '''Create and initialize a libvlc instance. This functions accept a list of "command line" arguments similar to the main(). These arguments affect the LibVLC instance default configuration. @param argc: the number of arguments (should be 0). @param argv: list of arguments (should be NULL). @return: the libvlc instance or NULL in case of error. @version Arguments are meant to be passed from the command line to LibVLC, just like VLC media player does. The list of valid arguments depends on the LibVLC version, the operating system and platform, and set of available LibVLC plugins. Invalid or unsupported arguments will cause the function to fail (i.e. return NULL). Also, some arguments may alter the behaviour or otherwise interfere with other LibVLC functions. @warning There is absolutely no warranty or promise of forward, backward and cross-platform compatibility with regards to L{libvlc_new}() arguments. We recommend that you do not use them, other than when debugging. ''' f = _Cfunctions.get('libvlc_new', None) or \ _Cfunction('libvlc_new', ((1,), (1,),), class_result(Instance), ctypes.c_void_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p)) return f(argc, argv) def libvlc_release(p_instance): '''Decrement the reference count of a libvlc instance, and destroy it if it reaches zero. @param p_instance: the instance to destroy. ''' f = _Cfunctions.get('libvlc_release', None) or \ _Cfunction('libvlc_release', ((1,),), None, None, Instance) return f(p_instance) def libvlc_retain(p_instance): '''Increments the reference count of a libvlc instance. The initial reference count is 1 after L{libvlc_new}() returns. @param p_instance: the instance to reference. ''' f = _Cfunctions.get('libvlc_retain', None) or \ _Cfunction('libvlc_retain', ((1,),), None, None, Instance) return f(p_instance) def libvlc_add_intf(p_instance, name): '''Try to start a user interface for the libvlc instance. @param p_instance: the instance. @param name: interface name, or NULL for default. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_add_intf', None) or \ _Cfunction('libvlc_add_intf', ((1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p) return f(p_instance, name) def libvlc_set_user_agent(p_instance, name, http): '''Sets the application name. LibVLC passes this as the user agent string when a protocol requires it. @param p_instance: LibVLC instance. @param name: human-readable application name, e.g. "FooBar player 1.2.3". @param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0". @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_set_user_agent', None) or \ _Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None, None, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, name, http) def libvlc_set_app_id(p_instance, id, version, icon): '''Sets some meta-information about the application. See also L{libvlc_set_user_agent}(). @param p_instance: LibVLC instance. @param id: Java-style application identifier, e.g. "com.acme.foobar". @param version: application version numbers, e.g. "1.2.3". @param icon: application icon name, e.g. "foobar". @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_set_app_id', None) or \ _Cfunction('libvlc_set_app_id', ((1,), (1,), (1,), (1,),), None, None, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, id, version, icon) def libvlc_get_version(): '''Retrieve libvlc version. Example: "1.1.0-git The Luggage". @return: a string containing the libvlc version. ''' f = _Cfunctions.get('libvlc_get_version', None) or \ _Cfunction('libvlc_get_version', (), None, ctypes.c_char_p) return f() def libvlc_get_compiler(): '''Retrieve libvlc compiler version. Example: "gcc version 4.2.3 (Ubuntu 4.2.3-2ubuntu6)". @return: a string containing the libvlc compiler version. ''' f = _Cfunctions.get('libvlc_get_compiler', None) or \ _Cfunction('libvlc_get_compiler', (), None, ctypes.c_char_p) return f() def libvlc_get_changeset(): '''Retrieve libvlc changeset. Example: "aa9bce0bc4". @return: a string containing the libvlc changeset. ''' f = _Cfunctions.get('libvlc_get_changeset', None) or \ _Cfunction('libvlc_get_changeset', (), None, ctypes.c_char_p) return f() def libvlc_free(ptr): '''Frees an heap allocation returned by a LibVLC function. If you know you're using the same underlying C run-time as the LibVLC implementation, then you can call ANSI C free() directly instead. @param ptr: the pointer. ''' f = _Cfunctions.get('libvlc_free', None) or \ _Cfunction('libvlc_free', ((1,),), None, None, ctypes.c_void_p) return f(ptr) def libvlc_event_attach(p_event_manager, i_event_type, f_callback, user_data): '''Register for an event notification. @param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to. @param i_event_type: the desired event to which we want to listen. @param f_callback: the function to call when i_event_type occurs. @param user_data: user provided data to carry with the event. @return: 0 on success, ENOMEM on error. ''' f = _Cfunctions.get('libvlc_event_attach', None) or \ _Cfunction('libvlc_event_attach', ((1,), (1,), (1,), (1,),), None, ctypes.c_int, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p) return f(p_event_manager, i_event_type, f_callback, user_data) def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data): '''Unregister an event notification. @param p_event_manager: the event manager. @param i_event_type: the desired event to which we want to unregister. @param f_callback: the function to call when i_event_type occurs. @param p_user_data: user provided data to carry with the event. ''' f = _Cfunctions.get('libvlc_event_detach', None) or \ _Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None, None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p) return f(p_event_manager, i_event_type, f_callback, p_user_data) def libvlc_event_type_name(event_type): '''Get an event's type name. @param event_type: the desired event. ''' f = _Cfunctions.get('libvlc_event_type_name', None) or \ _Cfunction('libvlc_event_type_name', ((1,),), None, ctypes.c_char_p, ctypes.c_uint) return f(event_type) def libvlc_log_get_context(ctx): '''Gets debugging information about a log message: the name of the VLC module emitting the message and the message location within the source code. The returned module name and file name will be NULL if unknown. The returned line number will similarly be zero if unknown. @param ctx: message context (as passed to the @ref libvlc_log_cb callback). @return: module module name storage (or NULL), file source code file name storage (or NULL), line source code file line number storage (or NULL). @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_get_context', None) or \ _Cfunction('libvlc_log_get_context', ((1,), (2,), (2,), (2,),), None, None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint)) return f(ctx) def libvlc_log_get_object(ctx, id): '''Gets VLC object information about a log message: the type name of the VLC object emitting the message, the object header if any and a temporaly-unique object identifier. This information is mainly meant for B{manual} troubleshooting. The returned type name may be "generic" if unknown, but it cannot be NULL. The returned header will be NULL if unset; in current versions, the header is used to distinguish for VLM inputs. The returned object ID will be zero if the message is not associated with any VLC object. @param ctx: message context (as passed to the @ref libvlc_log_cb callback). @return: name object name storage (or NULL), header object header (or NULL), line source code file line number storage (or NULL). @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_get_object', None) or \ _Cfunction('libvlc_log_get_object', ((1,), (2,), (2,), (1,),), None, None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint)) return f(ctx, id) def libvlc_log_unset(p_instance): '''Unsets the logging callback for a LibVLC instance. This is rarely needed: the callback is implicitly unset when the instance is destroyed. This function will wait for any pending callbacks invocation to complete (causing a deadlock if called from within the callback). @param p_instance: libvlc instance. @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_unset', None) or \ _Cfunction('libvlc_log_unset', ((1,),), None, None, Instance) return f(p_instance) def libvlc_log_set(cb, data, p_instance): '''Sets the logging callback for a LibVLC instance. This function is thread-safe: it will wait for any pending callbacks invocation to complete. @param cb: callback function pointer. @param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback. @param p_instance: libvlc instance. @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_set', None) or \ _Cfunction('libvlc_log_set', ((1,), (1,), (1,),), None, None, Instance, LogCb, ctypes.c_void_p) return f(cb, data, p_instance) def libvlc_log_set_file(p_instance, stream): '''Sets up logging to a file. @param p_instance: libvlc instance. @param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{libvlc_log_unset}()). @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_set_file', None) or \ _Cfunction('libvlc_log_set_file', ((1,), (1,),), None, None, Instance, FILE_ptr) return f(p_instance, stream) def libvlc_module_description_list_release(p_list): '''Release a list of module descriptions. @param p_list: the list to be released. ''' f = _Cfunctions.get('libvlc_module_description_list_release', None) or \ _Cfunction('libvlc_module_description_list_release', ((1,),), None, None, ctypes.POINTER(ModuleDescription)) return f(p_list) def libvlc_audio_filter_list_get(p_instance): '''Returns a list of audio filters that are available. @param p_instance: libvlc instance. @return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}. ''' f = _Cfunctions.get('libvlc_audio_filter_list_get', None) or \ _Cfunction('libvlc_audio_filter_list_get', ((1,),), None, ctypes.POINTER(ModuleDescription), Instance) return f(p_instance) def libvlc_video_filter_list_get(p_instance): '''Returns a list of video filters that are available. @param p_instance: libvlc instance. @return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}. ''' f = _Cfunctions.get('libvlc_video_filter_list_get', None) or \ _Cfunction('libvlc_video_filter_list_get', ((1,),), None, ctypes.POINTER(ModuleDescription), Instance) return f(p_instance) def libvlc_clock(): '''Return the current time as defined by LibVLC. The unit is the microsecond. Time increases monotonically (regardless of time zone changes and RTC adjustements). The origin is arbitrary but consistent across the whole system (e.g. the system uptim, the time since the system was booted). @note: On systems that support it, the POSIX monotonic clock is used. ''' f = _Cfunctions.get('libvlc_clock', None) or \ _Cfunction('libvlc_clock', (), None, ctypes.c_int64) return f() def libvlc_media_new_location(p_instance, psz_mrl): '''Create a media with a certain given media resource location, for instance a valid URL. @note: To refer to a local file with this function, the file://... URI syntax B{must} be used (see IETF RFC3986). We recommend using L{libvlc_media_new_path}() instead when dealing with local files. See L{libvlc_media_release}. @param p_instance: the instance. @param psz_mrl: the media location. @return: the newly created media or NULL on error. ''' f = _Cfunctions.get('libvlc_media_new_location', None) or \ _Cfunction('libvlc_media_new_location', ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, psz_mrl) def libvlc_media_new_path(p_instance, path): '''Create a media for a certain file path. See L{libvlc_media_release}. @param p_instance: the instance. @param path: local filesystem path. @return: the newly created media or NULL on error. ''' f = _Cfunctions.get('libvlc_media_new_path', None) or \ _Cfunction('libvlc_media_new_path', ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, path) def libvlc_media_new_fd(p_instance, fd): '''Create a media for an already open file descriptor. The file descriptor shall be open for reading (or reading and writing). Regular file descriptors, pipe read descriptors and character device descriptors (including TTYs) are supported on all platforms. Block device descriptors are supported where available. Directory descriptors are supported on systems that provide fdopendir(). Sockets are supported on all platforms where they are file descriptors, i.e. all except Windows. @note: This library will B{not} automatically close the file descriptor under any circumstance. Nevertheless, a file descriptor can usually only be rendered once in a media player. To render it a second time, the file descriptor should probably be rewound to the beginning with lseek(). See L{libvlc_media_release}. @param p_instance: the instance. @param fd: open file descriptor. @return: the newly created media or NULL on error. @version: LibVLC 1.1.5 and later. ''' f = _Cfunctions.get('libvlc_media_new_fd', None) or \ _Cfunction('libvlc_media_new_fd', ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_int) return f(p_instance, fd) def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque): '''Create a media with custom callbacks to read the data from. @param instance: LibVLC instance. @param open_cb: callback to open the custom bitstream input media. @param read_cb: callback to read data (must not be NULL). @param seek_cb: callback to seek, or NULL if seeking is not supported. @param close_cb: callback to close the media, or NULL if unnecessary. @param opaque: data pointer for the open callback. @return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}. @version: LibVLC 3.0.0 and later. ''' f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \ _Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p) return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque) def libvlc_media_new_as_node(p_instance, psz_name): '''Create a media as an empty node with a given name. See L{libvlc_media_release}. @param p_instance: the instance. @param psz_name: the name of the node. @return: the new empty media or NULL on error. ''' f = _Cfunctions.get('libvlc_media_new_as_node', None) or \ _Cfunction('libvlc_media_new_as_node', ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_media_add_option(p_md, psz_options): '''Add an option to the media. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. @note: The options are listed in 'vlc --long-help' from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{libvlc_new}() instead. @param p_md: the media descriptor. @param psz_options: the options (as a string). ''' f = _Cfunctions.get('libvlc_media_add_option', None) or \ _Cfunction('libvlc_media_add_option', ((1,), (1,),), None, None, Media, ctypes.c_char_p) return f(p_md, psz_options) def libvlc_media_add_option_flag(p_md, psz_options, i_flags): '''Add an option to the media with configurable flags. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. The options are detailed in vlc --long-help, for instance "--sout-all". Note that all options are not usable on medias: specifically, due to architectural issues, video-related options such as text renderer options cannot be set on a single media. They must be set on the whole libvlc instance instead. @param p_md: the media descriptor. @param psz_options: the options (as a string). @param i_flags: the flags for this option. ''' f = _Cfunctions.get('libvlc_media_add_option_flag', None) or \ _Cfunction('libvlc_media_add_option_flag', ((1,), (1,), (1,),), None, None, Media, ctypes.c_char_p, ctypes.c_uint) return f(p_md, psz_options, i_flags) def libvlc_media_retain(p_md): '''Retain a reference to a media descriptor object (libvlc_media_t). Use L{libvlc_media_release}() to decrement the reference count of a media descriptor object. @param p_md: the media descriptor. ''' f = _Cfunctions.get('libvlc_media_retain', None) or \ _Cfunction('libvlc_media_retain', ((1,),), None, None, Media) return f(p_md) def libvlc_media_release(p_md): '''Decrement the reference count of a media descriptor object. If the reference count is 0, then L{libvlc_media_release}() will release the media descriptor object. It will send out an libvlc_MediaFreed event to all listeners. If the media descriptor object has been released it should not be used again. @param p_md: the media descriptor. ''' f = _Cfunctions.get('libvlc_media_release', None) or \ _Cfunction('libvlc_media_release', ((1,),), None, None, Media) return f(p_md) def libvlc_media_get_mrl(p_md): '''Get the media resource locator (mrl) from a media descriptor object. @param p_md: a media descriptor object. @return: string with mrl of media descriptor object. ''' f = _Cfunctions.get('libvlc_media_get_mrl', None) or \ _Cfunction('libvlc_media_get_mrl', ((1,),), string_result, ctypes.c_void_p, Media) return f(p_md) def libvlc_media_duplicate(p_md): '''Duplicate a media descriptor object. @param p_md: a media descriptor object. ''' f = _Cfunctions.get('libvlc_media_duplicate', None) or \ _Cfunction('libvlc_media_duplicate', ((1,),), class_result(Media), ctypes.c_void_p, Media) return f(p_md) def libvlc_media_get_meta(p_md, e_meta): '''Read the meta of the media. If the media has not yet been parsed this will return NULL. This methods automatically calls L{libvlc_media_parse_async}(), so after calling it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous version ensure that you call L{libvlc_media_parse}() before get_meta(). See L{libvlc_media_parse} See L{libvlc_media_parse_async} See libvlc_MediaMetaChanged. @param p_md: the media descriptor. @param e_meta: the meta to read. @return: the media's meta. ''' f = _Cfunctions.get('libvlc_media_get_meta', None) or \ _Cfunction('libvlc_media_get_meta', ((1,), (1,),), string_result, ctypes.c_void_p, Media, Meta) return f(p_md, e_meta) def libvlc_media_set_meta(p_md, e_meta, psz_value): '''Set the meta of the media (this function will not save the meta, call L{libvlc_media_save_meta} in order to save the meta). @param p_md: the media descriptor. @param e_meta: the meta to write. @param psz_value: the media's meta. ''' f = _Cfunctions.get('libvlc_media_set_meta', None) or \ _Cfunction('libvlc_media_set_meta', ((1,), (1,), (1,),), None, None, Media, Meta, ctypes.c_char_p) return f(p_md, e_meta, psz_value) def libvlc_media_save_meta(p_md): '''Save the meta previously set. @param p_md: the media desriptor. @return: true if the write operation was successful. ''' f = _Cfunctions.get('libvlc_media_save_meta', None) or \ _Cfunction('libvlc_media_save_meta', ((1,),), None, ctypes.c_int, Media) return f(p_md) def libvlc_media_get_state(p_md): '''Get current state of media descriptor object. Possible media states are defined in libvlc_structures.c ( libvlc_NothingSpecial=0, libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused, libvlc_Stopped, libvlc_Ended, libvlc_Error). See libvlc_state_t. @param p_md: a media descriptor object. @return: state of media descriptor object. ''' f = _Cfunctions.get('libvlc_media_get_state', None) or \ _Cfunction('libvlc_media_get_state', ((1,),), None, State, Media) return f(p_md) def libvlc_media_get_stats(p_md, p_stats): '''Get the current statistics about the media. @param p_md:: media descriptor object. @param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller). @return: true if the statistics are available, false otherwise \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_get_stats', None) or \ _Cfunction('libvlc_media_get_stats', ((1,), (1,),), None, ctypes.c_int, Media, ctypes.POINTER(MediaStats)) return f(p_md, p_stats) def libvlc_media_subitems(p_md): '''Get subitems of media descriptor object. This will increment the reference count of supplied media descriptor object. Use L{libvlc_media_list_release}() to decrement the reference counting. @param p_md: media descriptor object. @return: list of media descriptor subitems or NULL. ''' f = _Cfunctions.get('libvlc_media_subitems', None) or \ _Cfunction('libvlc_media_subitems', ((1,),), class_result(MediaList), ctypes.c_void_p, Media) return f(p_md) def libvlc_media_event_manager(p_md): '''Get event manager from media descriptor object. NOTE: this function doesn't increment reference counting. @param p_md: a media descriptor object. @return: event manager object. ''' f = _Cfunctions.get('libvlc_media_event_manager', None) or \ _Cfunction('libvlc_media_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, Media) return f(p_md) def libvlc_media_get_duration(p_md): '''Get duration (in ms) of media descriptor object item. @param p_md: media descriptor object. @return: duration of media item or -1 on error. ''' f = _Cfunctions.get('libvlc_media_get_duration', None) or \ _Cfunction('libvlc_media_get_duration', ((1,),), None, ctypes.c_longlong, Media) return f(p_md) def libvlc_media_parse(p_md): '''Parse a media. This fetches (local) art, meta data and tracks information. The method is synchronous. See L{libvlc_media_parse_async} See L{libvlc_media_get_meta} See libvlc_media_get_tracks_info. @param p_md: media descriptor object. ''' f = _Cfunctions.get('libvlc_media_parse', None) or \ _Cfunction('libvlc_media_parse', ((1,),), None, None, Media) return f(p_md) def libvlc_media_parse_async(p_md): '''Parse a media. This fetches (local) art, meta data and tracks information. The method is the asynchronous of L{libvlc_media_parse}(). To track when this is over you can listen to libvlc_MediaParsedChanged event. However if the media was already parsed you will not receive this event. See L{libvlc_media_parse} See libvlc_MediaParsedChanged See L{libvlc_media_get_meta} See libvlc_media_get_tracks_info. @param p_md: media descriptor object. ''' f = _Cfunctions.get('libvlc_media_parse_async', None) or \ _Cfunction('libvlc_media_parse_async', ((1,),), None, None, Media) return f(p_md) def libvlc_media_parse_with_options(p_md, parse_flag): '''Parse the media asynchronously with options. This fetches (local or network) art, meta data and/or tracks information. This method is the extended version of L{libvlc_media_parse_async}(). To track when this is over you can listen to libvlc_MediaParsedChanged event. However if this functions returns an error, you will not receive this event. It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All these flags can be combined. By default, media is parsed if it's a local file. See libvlc_MediaParsedChanged See L{libvlc_media_get_meta} See L{libvlc_media_tracks_get} See libvlc_media_parse_flag_t. @param p_md: media descriptor object. @param parse_flag: parse options: @return: -1 in case of error, 0 otherwise. @version: LibVLC 3.0.0 or later. ''' f = _Cfunctions.get('libvlc_media_parse_with_options', None) or \ _Cfunction('libvlc_media_parse_with_options', ((1,), (1,),), None, ctypes.c_int, Media, MediaParseFlag) return f(p_md, parse_flag) def libvlc_media_is_parsed(p_md): '''Get Parsed status for media descriptor object. See libvlc_MediaParsedChanged. @param p_md: media descriptor object. @return: true if media object has been parsed otherwise it returns false \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_is_parsed', None) or \ _Cfunction('libvlc_media_is_parsed', ((1,),), None, ctypes.c_int, Media) return f(p_md) def libvlc_media_set_user_data(p_md, p_new_user_data): '''Sets media descriptor's user_data. user_data is specialized data accessed by the host application, VLC.framework uses it as a pointer to an native object that references a L{Media} pointer. @param p_md: media descriptor object. @param p_new_user_data: pointer to user data. ''' f = _Cfunctions.get('libvlc_media_set_user_data', None) or \ _Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None, None, Media, ctypes.c_void_p) return f(p_md, p_new_user_data) def libvlc_media_get_user_data(p_md): '''Get media descriptor's user_data. user_data is specialized data accessed by the host application, VLC.framework uses it as a pointer to an native object that references a L{Media} pointer. @param p_md: media descriptor object. ''' f = _Cfunctions.get('libvlc_media_get_user_data', None) or \ _Cfunction('libvlc_media_get_user_data', ((1,),), None, ctypes.c_void_p, Media) return f(p_md) def libvlc_media_tracks_get(p_md, tracks): '''Get media descriptor's elementary streams description Note, you need to call L{libvlc_media_parse}() or play the media at least once before calling this function. Not doing this will result in an empty array. @param p_md: media descriptor object. @param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed with L{libvlc_media_tracks_release}. @return: the number of Elementary Streams (zero on error). @version: LibVLC 2.1.0 and later. ''' f = _Cfunctions.get('libvlc_media_tracks_get', None) or \ _Cfunction('libvlc_media_tracks_get', ((1,), (1,),), None, ctypes.c_uint, Media, ctypes.POINTER(ctypes.POINTER(MediaTrack))) return f(p_md, tracks) def libvlc_media_get_codec_description(i_type, i_codec): '''Get codec description from media elementary stream. @param i_type: i_type from L{MediaTrack}. @param i_codec: i_codec or i_original_fourcc from L{MediaTrack}. @return: codec description. @version: LibVLC 3.0.0 and later. See L{MediaTrack}. ''' f = _Cfunctions.get('libvlc_media_get_codec_description', None) or \ _Cfunction('libvlc_media_get_codec_description', ((1,), (1,),), None, ctypes.c_char_p, TrackType, ctypes.c_uint32) return f(i_type, i_codec) def libvlc_media_tracks_release(p_tracks, i_count): '''Release media descriptor's elementary streams description array. @param p_tracks: tracks info array to release. @param i_count: number of elements in the array. @version: LibVLC 2.1.0 and later. ''' f = _Cfunctions.get('libvlc_media_tracks_release', None) or \ _Cfunction('libvlc_media_tracks_release', ((1,), (1,),), None, None, ctypes.POINTER(MediaTrack), ctypes.c_uint) return f(p_tracks, i_count) def libvlc_media_get_type(p_md): '''Get the media type of the media descriptor object. @param p_md: media descriptor object. @return: media type. @version: LibVLC 3.0.0 and later. See libvlc_media_type_t. ''' f = _Cfunctions.get('libvlc_media_get_type', None) or \ _Cfunction('libvlc_media_get_type', ((1,),), None, MediaType, Media) return f(p_md) def libvlc_media_discoverer_new(p_inst, psz_name): '''Create a media discoverer object by name. After this object is created, you should attach to events in order to be notified of the discoverer state. You should also attach to media_list events in order to be notified of new items discovered. You need to call L{libvlc_media_discoverer_start}() in order to start the discovery. See L{libvlc_media_discoverer_media_list} See L{libvlc_media_discoverer_event_manager} See L{libvlc_media_discoverer_start}. @param p_inst: libvlc instance. @param psz_name: service name. @return: media discover object or NULL in case of error. @version: LibVLC 3.0.0 or later. ''' f = _Cfunctions.get('libvlc_media_discoverer_new', None) or \ _Cfunction('libvlc_media_discoverer_new', ((1,), (1,),), class_result(MediaDiscoverer), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_inst, psz_name) def libvlc_media_discoverer_start(p_mdis): '''Start media discovery. To stop it, call L{libvlc_media_discoverer_stop}() or L{libvlc_media_discoverer_release}() directly. See L{libvlc_media_discoverer_stop}. @param p_mdis: media discover object. @return: -1 in case of error, 0 otherwise. @version: LibVLC 3.0.0 or later. ''' f = _Cfunctions.get('libvlc_media_discoverer_start', None) or \ _Cfunction('libvlc_media_discoverer_start', ((1,),), None, ctypes.c_int, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_stop(p_mdis): '''Stop media discovery. See L{libvlc_media_discoverer_start}. @param p_mdis: media discover object. @version: LibVLC 3.0.0 or later. ''' f = _Cfunctions.get('libvlc_media_discoverer_stop', None) or \ _Cfunction('libvlc_media_discoverer_stop', ((1,),), None, None, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_release(p_mdis): '''Release media discover object. If the reference count reaches 0, then the object will be released. @param p_mdis: media service discover object. ''' f = _Cfunctions.get('libvlc_media_discoverer_release', None) or \ _Cfunction('libvlc_media_discoverer_release', ((1,),), None, None, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_localized_name(p_mdis): '''Get media service discover object its localized name. @param p_mdis: media discover object. @return: localized name. ''' f = _Cfunctions.get('libvlc_media_discoverer_localized_name', None) or \ _Cfunction('libvlc_media_discoverer_localized_name', ((1,),), string_result, ctypes.c_void_p, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_media_list(p_mdis): '''Get media service discover media list. @param p_mdis: media service discover object. @return: list of media items. ''' f = _Cfunctions.get('libvlc_media_discoverer_media_list', None) or \ _Cfunction('libvlc_media_discoverer_media_list', ((1,),), class_result(MediaList), ctypes.c_void_p, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_event_manager(p_mdis): '''Get event manager from media service discover object. @param p_mdis: media service discover object. @return: event manager object. ''' f = _Cfunctions.get('libvlc_media_discoverer_event_manager', None) or \ _Cfunction('libvlc_media_discoverer_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, MediaDiscoverer) return f(p_mdis) def libvlc_media_discoverer_is_running(p_mdis): '''Query if media service discover object is running. @param p_mdis: media service discover object. @return: true if running, false if not \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_discoverer_is_running', None) or \ _Cfunction('libvlc_media_discoverer_is_running', ((1,),), None, ctypes.c_int, MediaDiscoverer) return f(p_mdis) def libvlc_media_library_new(p_instance): '''Create an new Media Library object. @param p_instance: the libvlc instance. @return: a new object or NULL on error. ''' f = _Cfunctions.get('libvlc_media_library_new', None) or \ _Cfunction('libvlc_media_library_new', ((1,),), class_result(MediaLibrary), ctypes.c_void_p, Instance) return f(p_instance) def libvlc_media_library_release(p_mlib): '''Release media library object. This functions decrements the reference count of the media library object. If it reaches 0, then the object will be released. @param p_mlib: media library object. ''' f = _Cfunctions.get('libvlc_media_library_release', None) or \ _Cfunction('libvlc_media_library_release', ((1,),), None, None, MediaLibrary) return f(p_mlib) def libvlc_media_library_retain(p_mlib): '''Retain a reference to a media library object. This function will increment the reference counting for this object. Use L{libvlc_media_library_release}() to decrement the reference count. @param p_mlib: media library object. ''' f = _Cfunctions.get('libvlc_media_library_retain', None) or \ _Cfunction('libvlc_media_library_retain', ((1,),), None, None, MediaLibrary) return f(p_mlib) def libvlc_media_library_load(p_mlib): '''Load media library. @param p_mlib: media library object. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_media_library_load', None) or \ _Cfunction('libvlc_media_library_load', ((1,),), None, ctypes.c_int, MediaLibrary) return f(p_mlib) def libvlc_media_library_media_list(p_mlib): '''Get media library subitems. @param p_mlib: media library object. @return: media list subitems. ''' f = _Cfunctions.get('libvlc_media_library_media_list', None) or \ _Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList), ctypes.c_void_p, MediaLibrary) return f(p_mlib) def libvlc_media_list_new(p_instance): '''Create an empty media list. @param p_instance: libvlc instance. @return: empty media list, or NULL on error. ''' f = _Cfunctions.get('libvlc_media_list_new', None) or \ _Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList), ctypes.c_void_p, Instance) return f(p_instance) def libvlc_media_list_release(p_ml): '''Release media list created with L{libvlc_media_list_new}(). @param p_ml: a media list created with L{libvlc_media_list_new}(). ''' f = _Cfunctions.get('libvlc_media_list_release', None) or \ _Cfunction('libvlc_media_list_release', ((1,),), None, None, MediaList) return f(p_ml) def libvlc_media_list_retain(p_ml): '''Retain reference to a media list. @param p_ml: a media list created with L{libvlc_media_list_new}(). ''' f = _Cfunctions.get('libvlc_media_list_retain', None) or \ _Cfunction('libvlc_media_list_retain', ((1,),), None, None, MediaList) return f(p_ml) def libvlc_media_list_set_media(p_ml, p_md): '''Associate media instance with this media list instance. If another media instance was present it will be released. The L{libvlc_media_list_lock} should NOT be held upon entering this function. @param p_ml: a media list instance. @param p_md: media instance to add. ''' f = _Cfunctions.get('libvlc_media_list_set_media', None) or \ _Cfunction('libvlc_media_list_set_media', ((1,), (1,),), None, None, MediaList, Media) return f(p_ml, p_md) def libvlc_media_list_media(p_ml): '''Get media instance from this media list instance. This action will increase the refcount on the media instance. The L{libvlc_media_list_lock} should NOT be held upon entering this function. @param p_ml: a media list instance. @return: media instance. ''' f = _Cfunctions.get('libvlc_media_list_media', None) or \ _Cfunction('libvlc_media_list_media', ((1,),), class_result(Media), ctypes.c_void_p, MediaList) return f(p_ml) def libvlc_media_list_add_media(p_ml, p_md): '''Add media instance to media list The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param p_md: a media instance. @return: 0 on success, -1 if the media list is read-only. ''' f = _Cfunctions.get('libvlc_media_list_add_media', None) or \ _Cfunction('libvlc_media_list_add_media', ((1,), (1,),), None, ctypes.c_int, MediaList, Media) return f(p_ml, p_md) def libvlc_media_list_insert_media(p_ml, p_md, i_pos): '''Insert media instance in media list on a position The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param p_md: a media instance. @param i_pos: position in array where to insert. @return: 0 on success, -1 if the media list is read-only. ''' f = _Cfunctions.get('libvlc_media_list_insert_media', None) or \ _Cfunction('libvlc_media_list_insert_media', ((1,), (1,), (1,),), None, ctypes.c_int, MediaList, Media, ctypes.c_int) return f(p_ml, p_md, i_pos) def libvlc_media_list_remove_index(p_ml, i_pos): '''Remove media instance from media list on a position The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param i_pos: position in array where to insert. @return: 0 on success, -1 if the list is read-only or the item was not found. ''' f = _Cfunctions.get('libvlc_media_list_remove_index', None) or \ _Cfunction('libvlc_media_list_remove_index', ((1,), (1,),), None, ctypes.c_int, MediaList, ctypes.c_int) return f(p_ml, i_pos) def libvlc_media_list_count(p_ml): '''Get count on media list items The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @return: number of items in media list. ''' f = _Cfunctions.get('libvlc_media_list_count', None) or \ _Cfunction('libvlc_media_list_count', ((1,),), None, ctypes.c_int, MediaList) return f(p_ml) def libvlc_media_list_item_at_index(p_ml, i_pos): '''List media instance in media list at a position The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param i_pos: position in array where to insert. @return: media instance at position i_pos, or NULL if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media. ''' f = _Cfunctions.get('libvlc_media_list_item_at_index', None) or \ _Cfunction('libvlc_media_list_item_at_index', ((1,), (1,),), class_result(Media), ctypes.c_void_p, MediaList, ctypes.c_int) return f(p_ml, i_pos) def libvlc_media_list_index_of_item(p_ml, p_md): '''Find index position of List media instance in media list. Warning: the function will return the first matched position. The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param p_md: media instance. @return: position of media instance or -1 if media not found. ''' f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \ _Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None, ctypes.c_int, MediaList, Media) return f(p_ml, p_md) def libvlc_media_list_is_readonly(p_ml): '''This indicates if this media list is read-only from a user point of view. @param p_ml: media list instance. @return: 1 on readonly, 0 on readwrite \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_list_is_readonly', None) or \ _Cfunction('libvlc_media_list_is_readonly', ((1,),), None, ctypes.c_int, MediaList) return f(p_ml) def libvlc_media_list_lock(p_ml): '''Get lock on media list items. @param p_ml: a media list instance. ''' f = _Cfunctions.get('libvlc_media_list_lock', None) or \ _Cfunction('libvlc_media_list_lock', ((1,),), None, None, MediaList) return f(p_ml) def libvlc_media_list_unlock(p_ml): '''Release lock on media list items The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. ''' f = _Cfunctions.get('libvlc_media_list_unlock', None) or \ _Cfunction('libvlc_media_list_unlock', ((1,),), None, None, MediaList) return f(p_ml) def libvlc_media_list_event_manager(p_ml): '''Get libvlc_event_manager from this media list instance. The p_event_manager is immutable, so you don't have to hold the lock. @param p_ml: a media list instance. @return: libvlc_event_manager. ''' f = _Cfunctions.get('libvlc_media_list_event_manager', None) or \ _Cfunction('libvlc_media_list_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, MediaList) return f(p_ml) def libvlc_media_list_player_new(p_instance): '''Create new media_list_player. @param p_instance: libvlc instance. @return: media list player instance or NULL on error. ''' f = _Cfunctions.get('libvlc_media_list_player_new', None) or \ _Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer), ctypes.c_void_p, Instance) return f(p_instance) def libvlc_media_list_player_release(p_mlp): '''Release a media_list_player after use Decrement the reference count of a media player object. If the reference count is 0, then L{libvlc_media_list_player_release}() will release the media player object. If the media player object has been released, then it should not be used again. @param p_mlp: media list player instance. ''' f = _Cfunctions.get('libvlc_media_list_player_release', None) or \ _Cfunction('libvlc_media_list_player_release', ((1,),), None, None, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_retain(p_mlp): '''Retain a reference to a media player list object. Use L{libvlc_media_list_player_release}() to decrement reference count. @param p_mlp: media player list object. ''' f = _Cfunctions.get('libvlc_media_list_player_retain', None) or \ _Cfunction('libvlc_media_list_player_retain', ((1,),), None, None, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_event_manager(p_mlp): '''Return the event manager of this media_list_player. @param p_mlp: media list player instance. @return: the event manager. ''' f = _Cfunctions.get('libvlc_media_list_player_event_manager', None) or \ _Cfunction('libvlc_media_list_player_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_set_media_player(p_mlp, p_mi): '''Replace media player in media_list_player with this instance. @param p_mlp: media list player instance. @param p_mi: media player instance. ''' f = _Cfunctions.get('libvlc_media_list_player_set_media_player', None) or \ _Cfunction('libvlc_media_list_player_set_media_player', ((1,), (1,),), None, None, MediaListPlayer, MediaPlayer) return f(p_mlp, p_mi) def libvlc_media_list_player_set_media_list(p_mlp, p_mlist): '''Set the media list associated with the player. @param p_mlp: media list player instance. @param p_mlist: list of media. ''' f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \ _Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None, None, MediaListPlayer, MediaList) return f(p_mlp, p_mlist) def libvlc_media_list_player_play(p_mlp): '''Play media list. @param p_mlp: media list player instance. ''' f = _Cfunctions.get('libvlc_media_list_player_play', None) or \ _Cfunction('libvlc_media_list_player_play', ((1,),), None, None, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_pause(p_mlp): '''Toggle pause (or resume) media list. @param p_mlp: media list player instance. ''' f = _Cfunctions.get('libvlc_media_list_player_pause', None) or \ _Cfunction('libvlc_media_list_player_pause', ((1,),), None, None, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_is_playing(p_mlp): '''Is media list playing? @param p_mlp: media list player instance. @return: true for playing and false for not playing \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_list_player_is_playing', None) or \ _Cfunction('libvlc_media_list_player_is_playing', ((1,),), None, ctypes.c_int, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_get_state(p_mlp): '''Get current libvlc_state of media list player. @param p_mlp: media list player instance. @return: libvlc_state_t for media list player. ''' f = _Cfunctions.get('libvlc_media_list_player_get_state', None) or \ _Cfunction('libvlc_media_list_player_get_state', ((1,),), None, State, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_play_item_at_index(p_mlp, i_index): '''Play media list item at position index. @param p_mlp: media list player instance. @param i_index: index in media list to play. @return: 0 upon success -1 if the item wasn't found. ''' f = _Cfunctions.get('libvlc_media_list_player_play_item_at_index', None) or \ _Cfunction('libvlc_media_list_player_play_item_at_index', ((1,), (1,),), None, ctypes.c_int, MediaListPlayer, ctypes.c_int) return f(p_mlp, i_index) def libvlc_media_list_player_play_item(p_mlp, p_md): '''Play the given media item. @param p_mlp: media list player instance. @param p_md: the media instance. @return: 0 upon success, -1 if the media is not part of the media list. ''' f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or \ _Cfunction('libvlc_media_list_player_play_item', ((1,), (1,),), None, ctypes.c_int, MediaListPlayer, Media) return f(p_mlp, p_md) def libvlc_media_list_player_stop(p_mlp): '''Stop playing media list. @param p_mlp: media list player instance. ''' f = _Cfunctions.get('libvlc_media_list_player_stop', None) or \ _Cfunction('libvlc_media_list_player_stop', ((1,),), None, None, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_next(p_mlp): '''Play next item from media list. @param p_mlp: media list player instance. @return: 0 upon success -1 if there is no next item. ''' f = _Cfunctions.get('libvlc_media_list_player_next', None) or \ _Cfunction('libvlc_media_list_player_next', ((1,),), None, ctypes.c_int, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_previous(p_mlp): '''Play previous item from media list. @param p_mlp: media list player instance. @return: 0 upon success -1 if there is no previous item. ''' f = _Cfunctions.get('libvlc_media_list_player_previous', None) or \ _Cfunction('libvlc_media_list_player_previous', ((1,),), None, ctypes.c_int, MediaListPlayer) return f(p_mlp) def libvlc_media_list_player_set_playback_mode(p_mlp, e_mode): '''Sets the playback mode for the playlist. @param p_mlp: media list player instance. @param e_mode: playback mode specification. ''' f = _Cfunctions.get('libvlc_media_list_player_set_playback_mode', None) or \ _Cfunction('libvlc_media_list_player_set_playback_mode', ((1,), (1,),), None, None, MediaListPlayer, PlaybackMode) return f(p_mlp, e_mode) def libvlc_media_player_new(p_libvlc_instance): '''Create an empty Media Player object. @param p_libvlc_instance: the libvlc instance in which the Media Player should be created. @return: a new media player object, or NULL on error. ''' f = _Cfunctions.get('libvlc_media_player_new', None) or \ _Cfunction('libvlc_media_player_new', ((1,),), class_result(MediaPlayer), ctypes.c_void_p, Instance) return f(p_libvlc_instance) def libvlc_media_player_new_from_media(p_md): '''Create a Media Player object from a Media. @param p_md: the media. Afterwards the p_md can be safely destroyed. @return: a new media player object, or NULL on error. ''' f = _Cfunctions.get('libvlc_media_player_new_from_media', None) or \ _Cfunction('libvlc_media_player_new_from_media', ((1,),), class_result(MediaPlayer), ctypes.c_void_p, Media) return f(p_md) def libvlc_media_player_release(p_mi): '''Release a media_player after use Decrement the reference count of a media player object. If the reference count is 0, then L{libvlc_media_player_release}() will release the media player object. If the media player object has been released, then it should not be used again. @param p_mi: the Media Player to free. ''' f = _Cfunctions.get('libvlc_media_player_release', None) or \ _Cfunction('libvlc_media_player_release', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_retain(p_mi): '''Retain a reference to a media player object. Use L{libvlc_media_player_release}() to decrement reference count. @param p_mi: media player object. ''' f = _Cfunctions.get('libvlc_media_player_retain', None) or \ _Cfunction('libvlc_media_player_retain', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_set_media(p_mi, p_md): '''Set the media that will be used by the media_player. If any, previous md will be released. @param p_mi: the Media Player. @param p_md: the Media. Afterwards the p_md can be safely destroyed. ''' f = _Cfunctions.get('libvlc_media_player_set_media', None) or \ _Cfunction('libvlc_media_player_set_media', ((1,), (1,),), None, None, MediaPlayer, Media) return f(p_mi, p_md) def libvlc_media_player_get_media(p_mi): '''Get the media used by the media_player. @param p_mi: the Media Player. @return: the media associated with p_mi, or NULL if no media is associated. ''' f = _Cfunctions.get('libvlc_media_player_get_media', None) or \ _Cfunction('libvlc_media_player_get_media', ((1,),), class_result(Media), ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_media_player_event_manager(p_mi): '''Get the Event Manager from which the media player send event. @param p_mi: the Media Player. @return: the event manager associated with p_mi. ''' f = _Cfunctions.get('libvlc_media_player_event_manager', None) or \ _Cfunction('libvlc_media_player_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_media_player_is_playing(p_mi): '''is_playing. @param p_mi: the Media Player. @return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_player_is_playing', None) or \ _Cfunction('libvlc_media_player_is_playing', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_play(p_mi): '''Play. @param p_mi: the Media Player. @return: 0 if playback started (and was already started), or -1 on error. ''' f = _Cfunctions.get('libvlc_media_player_play', None) or \ _Cfunction('libvlc_media_player_play', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_set_pause(mp, do_pause): '''Pause or resume (no effect if there is no media). @param mp: the Media Player. @param do_pause: play/resume if zero, pause if non-zero. @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_media_player_set_pause', None) or \ _Cfunction('libvlc_media_player_set_pause', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(mp, do_pause) def libvlc_media_player_pause(p_mi): '''Toggle pause (no effect if there is no media). @param p_mi: the Media Player. ''' f = _Cfunctions.get('libvlc_media_player_pause', None) or \ _Cfunction('libvlc_media_player_pause', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_stop(p_mi): '''Stop (no effect if there is no media). @param p_mi: the Media Player. ''' f = _Cfunctions.get('libvlc_media_player_stop', None) or \ _Cfunction('libvlc_media_player_stop', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque): '''Set callbacks and private data to render decoded video to a custom area in memory. Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}() to configure the decoded format. @param mp: the media player. @param lock: callback to lock video memory (must not be NULL). @param unlock: callback to unlock video memory (or NULL if not needed). @param display: callback to display video (or NULL if not needed). @param opaque: private pointer for the three callbacks (as first parameter). @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \ _Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None, None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p) return f(mp, lock, unlock, display, opaque) def libvlc_video_set_format(mp, chroma, width, height, pitch): '''Set decoded video chroma and dimensions. This only works in combination with L{libvlc_video_set_callbacks}(), and is mutually exclusive with L{libvlc_video_set_format_callbacks}(). @param mp: the media player. @param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV"). @param width: pixel width. @param height: pixel height. @param pitch: line pitch (in bytes). @version: LibVLC 1.1.1 or later. @bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{libvlc_video_set_format_callbacks}() instead. ''' f = _Cfunctions.get('libvlc_video_set_format', None) or \ _Cfunction('libvlc_video_set_format', ((1,), (1,), (1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint) return f(mp, chroma, width, height, pitch) def libvlc_video_set_format_callbacks(mp, setup, cleanup): '''Set decoded video chroma and dimensions. This only works in combination with L{libvlc_video_set_callbacks}(). @param mp: the media player. @param setup: callback to select the video format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_video_set_format_callbacks', None) or \ _Cfunction('libvlc_video_set_format_callbacks', ((1,), (1,), (1,),), None, None, MediaPlayer, VideoFormatCb, VideoCleanupCb) return f(mp, setup, cleanup) def libvlc_media_player_set_nsobject(p_mi, drawable): '''Set the NSView handler where the media player should render its video output. Use the vout called "macosx". The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding protocol: @begincode \@protocol VLCOpenGLVideoViewEmbedding <NSObject> - (void)addVoutSubview:(NSView *)view; - (void)removeVoutSubview:(NSView *)view; \@end @endcode Or it can be an NSView object. If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then the following code should work: @begincode NSView *video = [[NSView alloc] init]; QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent); L{libvlc_media_player_set_nsobject}(mp, video); [video release]; @endcode You can find a live example in VLCVideoView in VLCKit.framework. @param p_mi: the Media Player. @param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol. ''' f = _Cfunctions.get('libvlc_media_player_set_nsobject', None) or \ _Cfunction('libvlc_media_player_set_nsobject', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_void_p) return f(p_mi, drawable) def libvlc_media_player_get_nsobject(p_mi): '''Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}(). @param p_mi: the Media Player. @return: the NSView handler or 0 if none where set. ''' f = _Cfunctions.get('libvlc_media_player_get_nsobject', None) or \ _Cfunction('libvlc_media_player_get_nsobject', ((1,),), None, ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_media_player_set_agl(p_mi, drawable): '''Set the agl handler where the media player should render its video output. @param p_mi: the Media Player. @param drawable: the agl handler. ''' f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \ _Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint32) return f(p_mi, drawable) def libvlc_media_player_get_agl(p_mi): '''Get the agl handler previously set with L{libvlc_media_player_set_agl}(). @param p_mi: the Media Player. @return: the agl handler or 0 if none where set. ''' f = _Cfunctions.get('libvlc_media_player_get_agl', None) or \ _Cfunction('libvlc_media_player_get_agl', ((1,),), None, ctypes.c_uint32, MediaPlayer) return f(p_mi) def libvlc_media_player_set_xwindow(p_mi, drawable): '''Set an X Window System drawable where the media player should render its video output. If LibVLC was built without X11 output support, then this has no effects. The specified identifier must correspond to an existing Input/Output class X11 window. Pixmaps are B{not} supported. The caller shall ensure that the X11 server is the same as the one the VLC instance has been configured with. This function must be called before video playback is started; otherwise it will only take effect after playback stop and restart. @param p_mi: the Media Player. @param drawable: the ID of the X window. ''' f = _Cfunctions.get('libvlc_media_player_set_xwindow', None) or \ _Cfunction('libvlc_media_player_set_xwindow', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint32) return f(p_mi, drawable) def libvlc_media_player_get_xwindow(p_mi): '''Get the X Window System window identifier previously set with L{libvlc_media_player_set_xwindow}(). Note that this will return the identifier even if VLC is not currently using it (for instance if it is playing an audio-only input). @param p_mi: the Media Player. @return: an X window ID, or 0 if none where set. ''' f = _Cfunctions.get('libvlc_media_player_get_xwindow', None) or \ _Cfunction('libvlc_media_player_get_xwindow', ((1,),), None, ctypes.c_uint32, MediaPlayer) return f(p_mi) def libvlc_media_player_set_hwnd(p_mi, drawable): '''Set a Win32/Win64 API window handle (HWND) where the media player should render its video output. If LibVLC was built without Win32/Win64 API output support, then this has no effects. @param p_mi: the Media Player. @param drawable: windows handle of the drawable. ''' f = _Cfunctions.get('libvlc_media_player_set_hwnd', None) or \ _Cfunction('libvlc_media_player_set_hwnd', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_void_p) return f(p_mi, drawable) def libvlc_media_player_get_hwnd(p_mi): '''Get the Windows API window handle (HWND) previously set with L{libvlc_media_player_set_hwnd}(). The handle will be returned even if LibVLC is not currently outputting any video to it. @param p_mi: the Media Player. @return: a window handle or NULL if there are none. ''' f = _Cfunctions.get('libvlc_media_player_get_hwnd', None) or \ _Cfunction('libvlc_media_player_get_hwnd', ((1,),), None, ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque): '''Set callbacks and private data for decoded audio. Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}() to configure the decoded audio format. @param mp: the media player. @param play: callback to play audio samples (must not be NULL). @param pause: callback to pause playback (or NULL to ignore). @param resume: callback to resume playback (or NULL to ignore). @param flush: callback to flush audio buffers (or NULL to ignore). @param drain: callback to drain audio buffers (or NULL to ignore). @param opaque: private pointer for the audio callbacks (as first parameter). @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \ _Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p) return f(mp, play, pause, resume, flush, drain, opaque) def libvlc_audio_set_volume_callback(mp, set_volume): '''Set callbacks and private data for decoded audio. This only works in combination with L{libvlc_audio_set_callbacks}(). Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}() to configure the decoded audio format. @param mp: the media player. @param set_volume: callback to apply audio volume, or NULL to apply volume in software. @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_set_volume_callback', None) or \ _Cfunction('libvlc_audio_set_volume_callback', ((1,), (1,),), None, None, MediaPlayer, AudioSetVolumeCb) return f(mp, set_volume) def libvlc_audio_set_format_callbacks(mp, setup, cleanup): '''Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(). @param mp: the media player. @param setup: callback to select the audio format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or \ _Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,),), None, None, MediaPlayer, AudioSetupCb, AudioCleanupCb) return f(mp, setup, cleanup) def libvlc_audio_set_format(mp, format, rate, channels): '''Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(), and is mutually exclusive with L{libvlc_audio_set_format_callbacks}(). @param mp: the media player. @param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32"). @param rate: sample rate (expressed in Hz). @param channels: channels count. @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_set_format', None) or \ _Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint) return f(mp, format, rate, channels) def libvlc_media_player_get_length(p_mi): '''Get the current movie length (in ms). @param p_mi: the Media Player. @return: the movie length (in ms), or -1 if there is no media. ''' f = _Cfunctions.get('libvlc_media_player_get_length', None) or \ _Cfunction('libvlc_media_player_get_length', ((1,),), None, ctypes.c_longlong, MediaPlayer) return f(p_mi) def libvlc_media_player_get_time(p_mi): '''Get the current movie time (in ms). @param p_mi: the Media Player. @return: the movie time (in ms), or -1 if there is no media. ''' f = _Cfunctions.get('libvlc_media_player_get_time', None) or \ _Cfunction('libvlc_media_player_get_time', ((1,),), None, ctypes.c_longlong, MediaPlayer) return f(p_mi) def libvlc_media_player_set_time(p_mi, i_time): '''Set the movie time (in ms). This has no effect if no media is being played. Not all formats and protocols support this. @param p_mi: the Media Player. @param i_time: the movie time (in ms). ''' f = _Cfunctions.get('libvlc_media_player_set_time', None) or \ _Cfunction('libvlc_media_player_set_time', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_longlong) return f(p_mi, i_time) def libvlc_media_player_get_position(p_mi): '''Get movie position as percentage between 0.0 and 1.0. @param p_mi: the Media Player. @return: movie position, or -1. in case of error. ''' f = _Cfunctions.get('libvlc_media_player_get_position', None) or \ _Cfunction('libvlc_media_player_get_position', ((1,),), None, ctypes.c_float, MediaPlayer) return f(p_mi) def libvlc_media_player_set_position(p_mi, f_pos): '''Set movie position as percentage between 0.0 and 1.0. This has no effect if playback is not enabled. This might not work depending on the underlying input format and protocol. @param p_mi: the Media Player. @param f_pos: the position. ''' f = _Cfunctions.get('libvlc_media_player_set_position', None) or \ _Cfunction('libvlc_media_player_set_position', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_float) return f(p_mi, f_pos) def libvlc_media_player_set_chapter(p_mi, i_chapter): '''Set movie chapter (if applicable). @param p_mi: the Media Player. @param i_chapter: chapter number to play. ''' f = _Cfunctions.get('libvlc_media_player_set_chapter', None) or \ _Cfunction('libvlc_media_player_set_chapter', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, i_chapter) def libvlc_media_player_get_chapter(p_mi): '''Get movie chapter. @param p_mi: the Media Player. @return: chapter number currently playing, or -1 if there is no media. ''' f = _Cfunctions.get('libvlc_media_player_get_chapter', None) or \ _Cfunction('libvlc_media_player_get_chapter', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_get_chapter_count(p_mi): '''Get movie chapter count. @param p_mi: the Media Player. @return: number of chapters in movie, or -1. ''' f = _Cfunctions.get('libvlc_media_player_get_chapter_count', None) or \ _Cfunction('libvlc_media_player_get_chapter_count', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_will_play(p_mi): '''Is the player able to play. @param p_mi: the Media Player. @return: boolean \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_player_will_play', None) or \ _Cfunction('libvlc_media_player_will_play', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_get_chapter_count_for_title(p_mi, i_title): '''Get title chapter count. @param p_mi: the Media Player. @param i_title: title. @return: number of chapters in title, or -1. ''' f = _Cfunctions.get('libvlc_media_player_get_chapter_count_for_title', None) or \ _Cfunction('libvlc_media_player_get_chapter_count_for_title', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, i_title) def libvlc_media_player_set_title(p_mi, i_title): '''Set movie title. @param p_mi: the Media Player. @param i_title: title number to play. ''' f = _Cfunctions.get('libvlc_media_player_set_title', None) or \ _Cfunction('libvlc_media_player_set_title', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, i_title) def libvlc_media_player_get_title(p_mi): '''Get movie title. @param p_mi: the Media Player. @return: title number currently playing, or -1. ''' f = _Cfunctions.get('libvlc_media_player_get_title', None) or \ _Cfunction('libvlc_media_player_get_title', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_get_title_count(p_mi): '''Get movie title count. @param p_mi: the Media Player. @return: title number count, or -1. ''' f = _Cfunctions.get('libvlc_media_player_get_title_count', None) or \ _Cfunction('libvlc_media_player_get_title_count', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_previous_chapter(p_mi): '''Set previous chapter (if applicable). @param p_mi: the Media Player. ''' f = _Cfunctions.get('libvlc_media_player_previous_chapter', None) or \ _Cfunction('libvlc_media_player_previous_chapter', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_next_chapter(p_mi): '''Set next chapter (if applicable). @param p_mi: the Media Player. ''' f = _Cfunctions.get('libvlc_media_player_next_chapter', None) or \ _Cfunction('libvlc_media_player_next_chapter', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_get_rate(p_mi): '''Get the requested movie play rate. @warning: Depending on the underlying media, the requested rate may be different from the real playback rate. @param p_mi: the Media Player. @return: movie play rate. ''' f = _Cfunctions.get('libvlc_media_player_get_rate', None) or \ _Cfunction('libvlc_media_player_get_rate', ((1,),), None, ctypes.c_float, MediaPlayer) return f(p_mi) def libvlc_media_player_set_rate(p_mi, rate): '''Set movie play rate. @param p_mi: the Media Player. @param rate: movie play rate to set. @return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol). ''' f = _Cfunctions.get('libvlc_media_player_set_rate', None) or \ _Cfunction('libvlc_media_player_set_rate', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_float) return f(p_mi, rate) def libvlc_media_player_get_state(p_mi): '''Get current movie state. @param p_mi: the Media Player. @return: the current state of the media player (playing, paused, ...) See libvlc_state_t. ''' f = _Cfunctions.get('libvlc_media_player_get_state', None) or \ _Cfunction('libvlc_media_player_get_state', ((1,),), None, State, MediaPlayer) return f(p_mi) def libvlc_media_player_get_fps(p_mi): '''Get movie fps rate. @param p_mi: the Media Player. @return: frames per second (fps) for this playing movie, or 0 if unspecified. ''' f = _Cfunctions.get('libvlc_media_player_get_fps', None) or \ _Cfunction('libvlc_media_player_get_fps', ((1,),), None, ctypes.c_float, MediaPlayer) return f(p_mi) def libvlc_media_player_has_vout(p_mi): '''How many video outputs does this media player have? @param p_mi: the media player. @return: the number of video outputs. ''' f = _Cfunctions.get('libvlc_media_player_has_vout', None) or \ _Cfunction('libvlc_media_player_has_vout', ((1,),), None, ctypes.c_uint, MediaPlayer) return f(p_mi) def libvlc_media_player_is_seekable(p_mi): '''Is this media player seekable? @param p_mi: the media player. @return: true if the media player can seek \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_player_is_seekable', None) or \ _Cfunction('libvlc_media_player_is_seekable', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_can_pause(p_mi): '''Can this media player be paused? @param p_mi: the media player. @return: true if the media player can pause \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_media_player_can_pause', None) or \ _Cfunction('libvlc_media_player_can_pause', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_program_scrambled(p_mi): '''Check if the current program is scrambled. @param p_mi: the media player. @return: true if the current program is scrambled \libvlc_return_bool. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_media_player_program_scrambled', None) or \ _Cfunction('libvlc_media_player_program_scrambled', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_media_player_next_frame(p_mi): '''Display the next frame (if supported). @param p_mi: the media player. ''' f = _Cfunctions.get('libvlc_media_player_next_frame', None) or \ _Cfunction('libvlc_media_player_next_frame', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_media_player_navigate(p_mi, navigate): '''Navigate through DVD Menu. @param p_mi: the Media Player. @param navigate: the Navigation mode. @version: libVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_media_player_navigate', None) or \ _Cfunction('libvlc_media_player_navigate', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint) return f(p_mi, navigate) def libvlc_media_player_set_video_title_display(p_mi, position, timeout): '''Set if, and how, the video title will be shown when media is played. @param p_mi: the media player. @param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed. @param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable). @version: libVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_media_player_set_video_title_display', None) or \ _Cfunction('libvlc_media_player_set_video_title_display', ((1,), (1,), (1,),), None, None, MediaPlayer, Position, ctypes.c_int) return f(p_mi, position, timeout) def libvlc_track_description_list_release(p_track_description): '''Release (free) L{TrackDescription}. @param p_track_description: the structure to release. ''' f = _Cfunctions.get('libvlc_track_description_list_release', None) or \ _Cfunction('libvlc_track_description_list_release', ((1,),), None, None, ctypes.POINTER(TrackDescription)) return f(p_track_description) def libvlc_toggle_fullscreen(p_mi): '''Toggle fullscreen status on non-embedded video outputs. @warning: The same limitations applies to this function as to L{libvlc_set_fullscreen}(). @param p_mi: the media player. ''' f = _Cfunctions.get('libvlc_toggle_fullscreen', None) or \ _Cfunction('libvlc_toggle_fullscreen', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_set_fullscreen(p_mi, b_fullscreen): '''Enable or disable fullscreen. @warning: With most window managers, only a top-level windows can be in full-screen mode. Hence, this function will not operate properly if L{libvlc_media_player_set_xwindow}() was used to embed the video in a non-top-level window. In that case, the embedding window must be reparented to the root window B{before} fullscreen mode is enabled. You will want to reparent it back to its normal parent when disabling fullscreen. @param p_mi: the media player. @param b_fullscreen: boolean for fullscreen status. ''' f = _Cfunctions.get('libvlc_set_fullscreen', None) or \ _Cfunction('libvlc_set_fullscreen', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, b_fullscreen) def libvlc_get_fullscreen(p_mi): '''Get current fullscreen status. @param p_mi: the media player. @return: the fullscreen status (boolean) \libvlc_return_bool. ''' f = _Cfunctions.get('libvlc_get_fullscreen', None) or \ _Cfunction('libvlc_get_fullscreen', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_set_key_input(p_mi, on): '''Enable or disable key press events handling, according to the LibVLC hotkeys configuration. By default and for historical reasons, keyboard events are handled by the LibVLC video widget. @note: On X11, there can be only one subscriber for key press and mouse click events per window. If your application has subscribed to those events for the X window ID of the video widget, then LibVLC will not be able to handle key presses and mouse clicks in any case. @warning: This function is only implemented for X11 and Win32 at the moment. @param p_mi: the media player. @param on: true to handle key press events, false to ignore them. ''' f = _Cfunctions.get('libvlc_video_set_key_input', None) or \ _Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint) return f(p_mi, on) def libvlc_video_set_mouse_input(p_mi, on): '''Enable or disable mouse click events handling. By default, those events are handled. This is needed for DVD menus to work, as well as a few video filters such as "puzzle". See L{libvlc_video_set_key_input}(). @warning: This function is only implemented for X11 and Win32 at the moment. @param p_mi: the media player. @param on: true to handle mouse click events, false to ignore them. ''' f = _Cfunctions.get('libvlc_video_set_mouse_input', None) or \ _Cfunction('libvlc_video_set_mouse_input', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint) return f(p_mi, on) def libvlc_video_get_size(p_mi, num): '''Get the pixel dimensions of a video. @param p_mi: media player. @param num: number of the video (starting from, and most commonly 0). @return: px pixel width, py pixel height. ''' f = _Cfunctions.get('libvlc_video_get_size', None) or \ _Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) return f(p_mi, num) def libvlc_video_get_cursor(p_mi, num): '''Get the mouse pointer coordinates over a video. Coordinates are expressed in terms of the decoded video resolution, B{not} in terms of pixels on the screen/viewport (to get the latter, you can query your windowing system directly). Either of the coordinates may be negative or larger than the corresponding dimension of the video, if the cursor is outside the rendering area. @warning: The coordinates may be out-of-date if the pointer is not located on the video rendering area. LibVLC does not track the pointer if it is outside of the video widget. @note: LibVLC does not support multiple pointers (it does of course support multiple input devices sharing the same pointer) at the moment. @param p_mi: media player. @param num: number of the video (starting from, and most commonly 0). @return: px abscissa, py ordinate. ''' f = _Cfunctions.get('libvlc_video_get_cursor', None) or \ _Cfunction('libvlc_video_get_cursor', ((1,), (1,), (2,), (2,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)) return f(p_mi, num) def libvlc_video_get_scale(p_mi): '''Get the current video scaling factor. See also L{libvlc_video_set_scale}(). @param p_mi: the media player. @return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically. ''' f = _Cfunctions.get('libvlc_video_get_scale', None) or \ _Cfunction('libvlc_video_get_scale', ((1,),), None, ctypes.c_float, MediaPlayer) return f(p_mi) def libvlc_video_set_scale(p_mi, f_factor): '''Set the video scaling factor. That is the ratio of the number of pixels on screen to the number of pixels in the original decoded video in each dimension. Zero is a special value; it will adjust the video to the output window/drawable (in windowed mode) or the entire screen. Note that not all video outputs support scaling. @param p_mi: the media player. @param f_factor: the scaling factor, or zero. ''' f = _Cfunctions.get('libvlc_video_set_scale', None) or \ _Cfunction('libvlc_video_set_scale', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_float) return f(p_mi, f_factor) def libvlc_video_get_aspect_ratio(p_mi): '''Get current video aspect ratio. @param p_mi: the media player. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()). ''' f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \ _Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result, ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_video_set_aspect_ratio(p_mi, psz_aspect): '''Set new video aspect ratio. @param p_mi: the media player. @param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored. ''' f = _Cfunctions.get('libvlc_video_set_aspect_ratio', None) or \ _Cfunction('libvlc_video_set_aspect_ratio', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_aspect) def libvlc_video_get_spu(p_mi): '''Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none. ''' f = _Cfunctions.get('libvlc_video_get_spu', None) or \ _Cfunction('libvlc_video_get_spu', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_get_spu_count(p_mi): '''Get the number of available video subtitles. @param p_mi: the media player. @return: the number of available video subtitles. ''' f = _Cfunctions.get('libvlc_video_get_spu_count', None) or \ _Cfunction('libvlc_video_get_spu_count', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_get_spu_description(p_mi): '''Get the description of available video subtitles. @param p_mi: the media player. @return: list containing description of available video subtitles. ''' f = _Cfunctions.get('libvlc_video_get_spu_description', None) or \ _Cfunction('libvlc_video_get_spu_description', ((1,),), None, ctypes.POINTER(TrackDescription), MediaPlayer) return f(p_mi) def libvlc_video_set_spu(p_mi, i_spu): '''Set new video subtitle. @param p_mi: the media player. @param i_spu: video subtitle track to select (i_id from track description). @return: 0 on success, -1 if out of range. ''' f = _Cfunctions.get('libvlc_video_set_spu', None) or \ _Cfunction('libvlc_video_set_spu', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, i_spu) def libvlc_video_set_subtitle_file(p_mi, psz_subtitle): '''Set new video subtitle file. @param p_mi: the media player. @param psz_subtitle: new video subtitle file. @return: the success status (boolean). ''' f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \ _Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_subtitle) def libvlc_video_get_spu_delay(p_mi): '''Get the current subtitle delay. Positive values means subtitles are being displayed later, negative values earlier. @param p_mi: media player. @return: time (in microseconds) the display of subtitles is being delayed. @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_video_get_spu_delay', None) or \ _Cfunction('libvlc_video_get_spu_delay', ((1,),), None, ctypes.c_int64, MediaPlayer) return f(p_mi) def libvlc_video_set_spu_delay(p_mi, i_delay): '''Set the subtitle delay. This affects the timing of when the subtitle will be displayed. Positive values result in subtitles being displayed later, while negative values will result in subtitles being displayed earlier. The subtitle delay will be reset to zero each time the media changes. @param p_mi: media player. @param i_delay: time (in microseconds) the display of subtitles should be delayed. @return: 0 on success, -1 on error. @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_video_set_spu_delay', None) or \ _Cfunction('libvlc_video_set_spu_delay', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int64) return f(p_mi, i_delay) def libvlc_video_get_title_description(p_mi): '''Get the description of available titles. @param p_mi: the media player. @return: list containing description of available titles. ''' f = _Cfunctions.get('libvlc_video_get_title_description', None) or \ _Cfunction('libvlc_video_get_title_description', ((1,),), None, ctypes.POINTER(TrackDescription), MediaPlayer) return f(p_mi) def libvlc_video_get_chapter_description(p_mi, i_title): '''Get the description of available chapters for specific title. @param p_mi: the media player. @param i_title: selected title. @return: list containing description of available chapter for title i_title. ''' f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \ _Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None, ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int) return f(p_mi, i_title) def libvlc_video_get_crop_geometry(p_mi): '''Get current crop filter geometry. @param p_mi: the media player. @return: the crop filter geometry or NULL if unset. ''' f = _Cfunctions.get('libvlc_video_get_crop_geometry', None) or \ _Cfunction('libvlc_video_get_crop_geometry', ((1,),), string_result, ctypes.c_void_p, MediaPlayer) return f(p_mi) def libvlc_video_set_crop_geometry(p_mi, psz_geometry): '''Set new crop filter geometry. @param p_mi: the media player. @param psz_geometry: new crop filter geometry (NULL to unset). ''' f = _Cfunctions.get('libvlc_video_set_crop_geometry', None) or \ _Cfunction('libvlc_video_set_crop_geometry', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_geometry) def libvlc_video_get_teletext(p_mi): '''Get current teletext page requested. @param p_mi: the media player. @return: the current teletext page requested. ''' f = _Cfunctions.get('libvlc_video_get_teletext', None) or \ _Cfunction('libvlc_video_get_teletext', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_set_teletext(p_mi, i_page): '''Set new teletext page to retrieve. @param p_mi: the media player. @param i_page: teletex page number requested. ''' f = _Cfunctions.get('libvlc_video_set_teletext', None) or \ _Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, i_page) def libvlc_toggle_teletext(p_mi): '''Toggle teletext transparent status on video output. @param p_mi: the media player. ''' f = _Cfunctions.get('libvlc_toggle_teletext', None) or \ _Cfunction('libvlc_toggle_teletext', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_video_get_track_count(p_mi): '''Get number of available video tracks. @param p_mi: media player. @return: the number of available video tracks (int). ''' f = _Cfunctions.get('libvlc_video_get_track_count', None) or \ _Cfunction('libvlc_video_get_track_count', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_get_track_description(p_mi): '''Get the description of available video tracks. @param p_mi: media player. @return: list with description of available video tracks, or NULL on error. ''' f = _Cfunctions.get('libvlc_video_get_track_description', None) or \ _Cfunction('libvlc_video_get_track_description', ((1,),), None, ctypes.POINTER(TrackDescription), MediaPlayer) return f(p_mi) def libvlc_video_get_track(p_mi): '''Get current video track. @param p_mi: media player. @return: the video track ID (int) or -1 if no active input. ''' f = _Cfunctions.get('libvlc_video_get_track', None) or \ _Cfunction('libvlc_video_get_track', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_video_set_track(p_mi, i_track): '''Set video track. @param p_mi: media player. @param i_track: the track ID (i_id field from track description). @return: 0 on success, -1 if out of range. ''' f = _Cfunctions.get('libvlc_video_set_track', None) or \ _Cfunction('libvlc_video_set_track', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, i_track) def libvlc_video_take_snapshot(p_mi, num, psz_filepath, i_width, i_height): '''Take a snapshot of the current video window. If i_width AND i_height is 0, original size is used. If i_width XOR i_height is 0, original aspect-ratio is preserved. @param p_mi: media player instance. @param num: number of video output (typically 0 for the first/only one). @param psz_filepath: the path where to save the screenshot to. @param i_width: the snapshot's width. @param i_height: the snapshot's height. @return: 0 on success, -1 if the video was not found. ''' f = _Cfunctions.get('libvlc_video_take_snapshot', None) or \ _Cfunction('libvlc_video_take_snapshot', ((1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int) return f(p_mi, num, psz_filepath, i_width, i_height) def libvlc_video_set_deinterlace(p_mi, psz_mode): '''Enable or disable deinterlace filter. @param p_mi: libvlc media player. @param psz_mode: type of deinterlace filter, NULL to disable. ''' f = _Cfunctions.get('libvlc_video_set_deinterlace', None) or \ _Cfunction('libvlc_video_set_deinterlace', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_mode) def libvlc_video_get_marquee_int(p_mi, option): '''Get an integer marquee option value. @param p_mi: libvlc media player. @param option: marq option to get See libvlc_video_marquee_int_option_t. ''' f = _Cfunctions.get('libvlc_video_get_marquee_int', None) or \ _Cfunction('libvlc_video_get_marquee_int', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint) return f(p_mi, option) def libvlc_video_get_marquee_string(p_mi, option): '''Get a string marquee option value. @param p_mi: libvlc media player. @param option: marq option to get See libvlc_video_marquee_string_option_t. ''' f = _Cfunctions.get('libvlc_video_get_marquee_string', None) or \ _Cfunction('libvlc_video_get_marquee_string', ((1,), (1,),), string_result, ctypes.c_void_p, MediaPlayer, ctypes.c_uint) return f(p_mi, option) def libvlc_video_set_marquee_int(p_mi, option, i_val): '''Enable, disable or set an integer marquee option Setting libvlc_marquee_Enable has the side effect of enabling (arg !0) or disabling (arg 0) the marq filter. @param p_mi: libvlc media player. @param option: marq option to set See libvlc_video_marquee_int_option_t. @param i_val: marq option value. ''' f = _Cfunctions.get('libvlc_video_set_marquee_int', None) or \ _Cfunction('libvlc_video_set_marquee_int', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_int) return f(p_mi, option, i_val) def libvlc_video_set_marquee_string(p_mi, option, psz_text): '''Set a marquee string option. @param p_mi: libvlc media player. @param option: marq option to set See libvlc_video_marquee_string_option_t. @param psz_text: marq option value. ''' f = _Cfunctions.get('libvlc_video_set_marquee_string', None) or \ _Cfunction('libvlc_video_set_marquee_string', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p) return f(p_mi, option, psz_text) def libvlc_video_get_logo_int(p_mi, option): '''Get integer logo option. @param p_mi: libvlc media player instance. @param option: logo option to get, values of libvlc_video_logo_option_t. ''' f = _Cfunctions.get('libvlc_video_get_logo_int', None) or \ _Cfunction('libvlc_video_get_logo_int', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint) return f(p_mi, option) def libvlc_video_set_logo_int(p_mi, option, value): '''Set logo option as integer. Options that take a different type value are ignored. Passing libvlc_logo_enable as option value has the side effect of starting (arg !0) or stopping (arg 0) the logo filter. @param p_mi: libvlc media player instance. @param option: logo option to set, values of libvlc_video_logo_option_t. @param value: logo option value. ''' f = _Cfunctions.get('libvlc_video_set_logo_int', None) or \ _Cfunction('libvlc_video_set_logo_int', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_int) return f(p_mi, option, value) def libvlc_video_set_logo_string(p_mi, option, psz_value): '''Set logo option as string. Options that take a different type value are ignored. @param p_mi: libvlc media player instance. @param option: logo option to set, values of libvlc_video_logo_option_t. @param psz_value: logo option value. ''' f = _Cfunctions.get('libvlc_video_set_logo_string', None) or \ _Cfunction('libvlc_video_set_logo_string', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p) return f(p_mi, option, psz_value) def libvlc_video_get_adjust_int(p_mi, option): '''Get integer adjust option. @param p_mi: libvlc media player instance. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later. ''' f = _Cfunctions.get('libvlc_video_get_adjust_int', None) or \ _Cfunction('libvlc_video_get_adjust_int', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint) return f(p_mi, option) def libvlc_video_set_adjust_int(p_mi, option, value): '''Set adjust option as integer. Options that take a different type value are ignored. Passing libvlc_adjust_enable as option value has the side effect of starting (arg !0) or stopping (arg 0) the adjust filter. @param p_mi: libvlc media player instance. @param option: adust option to set, values of libvlc_video_adjust_option_t. @param value: adjust option value. @version: LibVLC 1.1.1 and later. ''' f = _Cfunctions.get('libvlc_video_set_adjust_int', None) or \ _Cfunction('libvlc_video_set_adjust_int', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_int) return f(p_mi, option, value) def libvlc_video_get_adjust_float(p_mi, option): '''Get float adjust option. @param p_mi: libvlc media player instance. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later. ''' f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \ _Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None, ctypes.c_float, MediaPlayer, ctypes.c_uint) return f(p_mi, option) def libvlc_video_set_adjust_float(p_mi, option, value): '''Set adjust option as float. Options that take a different type value are ignored. @param p_mi: libvlc media player instance. @param option: adust option to set, values of libvlc_video_adjust_option_t. @param value: adjust option value. @version: LibVLC 1.1.1 and later. ''' f = _Cfunctions.get('libvlc_video_set_adjust_float', None) or \ _Cfunction('libvlc_video_set_adjust_float', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_float) return f(p_mi, option, value) def libvlc_audio_output_list_get(p_instance): '''Gets the list of available audio output modules. @param p_instance: libvlc instance. @return: list of available audio outputs. It must be freed it with In case of error, NULL is returned. ''' f = _Cfunctions.get('libvlc_audio_output_list_get', None) or \ _Cfunction('libvlc_audio_output_list_get', ((1,),), None, ctypes.POINTER(AudioOutput), Instance) return f(p_instance) def libvlc_audio_output_list_release(p_list): '''Frees the list of available audio output modules. @param p_list: list with audio outputs for release. ''' f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \ _Cfunction('libvlc_audio_output_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutput)) return f(p_list) def libvlc_audio_output_set(p_mi, psz_name): '''Selects an audio output module. @note: Any change will take be effect only after playback is stopped and restarted. Audio output cannot be changed while playing. @param p_mi: media player. @param psz_name: name of audio output, use psz_name of See L{AudioOutput}. @return: 0 if function succeded, -1 on error. ''' f = _Cfunctions.get('libvlc_audio_output_set', None) or \ _Cfunction('libvlc_audio_output_set', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_name) def libvlc_audio_output_device_enum(mp): '''Gets a list of potential audio output devices, See L{libvlc_audio_output_device_set}(). @note: Not all audio outputs support enumerating devices. The audio output may be functional even if the list is empty (NULL). @note: The list may not be exhaustive. @warning: Some audio output devices in the list might not actually work in some circumstances. By default, it is recommended to not specify any explicit audio device. @param mp: media player. @return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{libvlc_audio_output_device_list_release}(). @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_output_device_enum', None) or \ _Cfunction('libvlc_audio_output_device_enum', ((1,),), None, ctypes.POINTER(AudioOutputDevice), MediaPlayer) return f(mp) def libvlc_audio_output_device_list_get(p_instance, aout): '''Gets a list of audio output devices for a given audio output module, See L{libvlc_audio_output_device_set}(). @note: Not all audio outputs support this. In particular, an empty (NULL) list of devices does B{not} imply that the specified audio output does not work. @note: The list might not be exhaustive. @warning: Some audio output devices in the list might not actually work in some circumstances. By default, it is recommended to not specify any explicit audio device. @param p_instance: libvlc instance. @param psz_aout: audio output name (as returned by L{libvlc_audio_output_list_get}()). @return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{libvlc_audio_output_device_list_release}(). @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_audio_output_device_list_get', None) or \ _Cfunction('libvlc_audio_output_device_list_get', ((1,), (1,),), None, ctypes.POINTER(AudioOutputDevice), Instance, ctypes.c_char_p) return f(p_instance, aout) def libvlc_audio_output_device_list_release(p_list): '''Frees a list of available audio output devices. @param p_list: list with audio outputs for release. @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_audio_output_device_list_release', None) or \ _Cfunction('libvlc_audio_output_device_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutputDevice)) return f(p_list) def libvlc_audio_output_device_set(mp, module, device_id): '''Configures an explicit audio output device. If the module paramater is NULL, audio output will be moved to the device specified by the device identifier string immediately. This is the recommended usage. A list of adequate potential device strings can be obtained with L{libvlc_audio_output_device_enum}(). However passing NULL is supported in LibVLC version 2.2.0 and later only; in earlier versions, this function would have no effects when the module parameter was NULL. If the module parameter is not NULL, the device parameter of the corresponding audio output, if it exists, will be set to the specified string. Note that some audio output modules do not have such a parameter (notably MMDevice and PulseAudio). A list of adequate potential device strings can be obtained with L{libvlc_audio_output_device_list_get}(). @note: This function does not select the specified audio output plugin. L{libvlc_audio_output_set}() is used for that purpose. @warning: The syntax for the device parameter depends on the audio output. Some audio output modules require further parameters (e.g. a channels map in the case of ALSA). @param mp: media player. @param module: If NULL, current audio output module. if non-NULL, name of audio output module. @param device_id: device identifier string. @return: Nothing. Errors are ignored (this is a design bug). ''' f = _Cfunctions.get('libvlc_audio_output_device_set', None) or \ _Cfunction('libvlc_audio_output_device_set', ((1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p, ctypes.c_char_p) return f(mp, module, device_id) def libvlc_audio_output_device_get(mp): '''Get the current audio output device identifier. This complements L{libvlc_audio_output_device_set}(). @warning: The initial value for the current audio output device identifier may not be set or may be some unknown value. A LibVLC application should compare this value against the known device identifiers (e.g. those that were previously retrieved by a call to L{libvlc_audio_output_device_enum} or L{libvlc_audio_output_device_list_get}) to find the current audio output device. It is possible that the selected audio output device changes (an external change) without a call to L{libvlc_audio_output_device_set}. That may make this method unsuitable to use if a LibVLC application is attempting to track dynamic audio device changes as they happen. @param mp: media player. @return: the current audio output device identifier NULL if no device is selected or in case of error (the result must be released with free() or L{libvlc_free}()). @version: LibVLC 3.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_output_device_get', None) or \ _Cfunction('libvlc_audio_output_device_get', ((1,),), None, ctypes.c_char_p, MediaPlayer) return f(mp) def libvlc_audio_toggle_mute(p_mi): '''Toggle mute status. @param p_mi: media player @warning Toggling mute atomically is not always possible: On some platforms, other processes can mute the VLC audio playback stream asynchronously. Thus, there is a small race condition where toggling will not work. See also the limitations of L{libvlc_audio_set_mute}(). ''' f = _Cfunctions.get('libvlc_audio_toggle_mute', None) or \ _Cfunction('libvlc_audio_toggle_mute', ((1,),), None, None, MediaPlayer) return f(p_mi) def libvlc_audio_get_mute(p_mi): '''Get current mute status. @param p_mi: media player. @return: the mute status (boolean) if defined, -1 if undefined/unapplicable. ''' f = _Cfunctions.get('libvlc_audio_get_mute', None) or \ _Cfunction('libvlc_audio_get_mute', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_audio_set_mute(p_mi, status): '''Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ''' f = _Cfunctions.get('libvlc_audio_set_mute', None) or \ _Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, status) def libvlc_audio_get_volume(p_mi): '''Get current software audio volume. @param p_mi: media player. @return: the software volume in percents (0 = mute, 100 = nominal / 0dB). ''' f = _Cfunctions.get('libvlc_audio_get_volume', None) or \ _Cfunction('libvlc_audio_get_volume', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_audio_set_volume(p_mi, i_volume): '''Set current software audio volume. @param p_mi: media player. @param i_volume: the volume in percents (0 = mute, 100 = 0dB). @return: 0 if the volume was set, -1 if it was out of range. ''' f = _Cfunctions.get('libvlc_audio_set_volume', None) or \ _Cfunction('libvlc_audio_set_volume', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, i_volume) def libvlc_audio_get_track_count(p_mi): '''Get number of available audio tracks. @param p_mi: media player. @return: the number of available audio tracks (int), or -1 if unavailable. ''' f = _Cfunctions.get('libvlc_audio_get_track_count', None) or \ _Cfunction('libvlc_audio_get_track_count', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_audio_get_track_description(p_mi): '''Get the description of available audio tracks. @param p_mi: media player. @return: list with description of available audio tracks, or NULL. ''' f = _Cfunctions.get('libvlc_audio_get_track_description', None) or \ _Cfunction('libvlc_audio_get_track_description', ((1,),), None, ctypes.POINTER(TrackDescription), MediaPlayer) return f(p_mi) def libvlc_audio_get_track(p_mi): '''Get current audio track. @param p_mi: media player. @return: the audio track ID or -1 if no active input. ''' f = _Cfunctions.get('libvlc_audio_get_track', None) or \ _Cfunction('libvlc_audio_get_track', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_audio_set_track(p_mi, i_track): '''Set current audio track. @param p_mi: media player. @param i_track: the track ID (i_id field from track description). @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_audio_set_track', None) or \ _Cfunction('libvlc_audio_set_track', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, i_track) def libvlc_audio_get_channel(p_mi): '''Get current audio channel. @param p_mi: media player. @return: the audio channel See libvlc_audio_output_channel_t. ''' f = _Cfunctions.get('libvlc_audio_get_channel', None) or \ _Cfunction('libvlc_audio_get_channel', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi) def libvlc_audio_set_channel(p_mi, channel): '''Set current audio channel. @param p_mi: media player. @param channel: the audio channel, See libvlc_audio_output_channel_t. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_audio_set_channel', None) or \ _Cfunction('libvlc_audio_set_channel', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int) return f(p_mi, channel) def libvlc_audio_get_delay(p_mi): '''Get current audio delay. @param p_mi: media player. @return: the audio delay (microseconds). @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_audio_get_delay', None) or \ _Cfunction('libvlc_audio_get_delay', ((1,),), None, ctypes.c_int64, MediaPlayer) return f(p_mi) def libvlc_audio_set_delay(p_mi, i_delay): '''Set current audio delay. The audio delay will be reset to zero each time the media changes. @param p_mi: media player. @param i_delay: the audio delay (microseconds). @return: 0 on success, -1 on error. @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_audio_set_delay', None) or \ _Cfunction('libvlc_audio_set_delay', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_int64) return f(p_mi, i_delay) def libvlc_audio_equalizer_get_preset_count(): '''Get the number of equalizer presets. @return: number of presets. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_count', None) or \ _Cfunction('libvlc_audio_equalizer_get_preset_count', (), None, ctypes.c_uint) return f() def libvlc_audio_equalizer_get_preset_name(u_index): '''Get the name of a particular equalizer preset. This name can be used, for example, to prepare a preset label or menu in a user interface. @param u_index: index of the preset, counting from zero. @return: preset name, or NULL if there is no such preset. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_name', None) or \ _Cfunction('libvlc_audio_equalizer_get_preset_name', ((1,),), None, ctypes.c_char_p, ctypes.c_uint) return f(u_index) def libvlc_audio_equalizer_get_band_count(): '''Get the number of distinct frequency bands for an equalizer. @return: number of frequency bands. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_band_count', None) or \ _Cfunction('libvlc_audio_equalizer_get_band_count', (), None, ctypes.c_uint) return f() def libvlc_audio_equalizer_get_band_frequency(u_index): '''Get a particular equalizer band frequency. This value can be used, for example, to create a label for an equalizer band control in a user interface. @param u_index: index of the band, counting from zero. @return: equalizer band frequency (Hz), or -1 if there is no such band. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or \ _Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None, ctypes.c_float, ctypes.c_uint) return f(u_index) def libvlc_audio_equalizer_new(): '''Create a new default equalizer, with all frequency values zeroed. The new equalizer can subsequently be applied to a media player by invoking L{libvlc_media_player_set_equalizer}(). The returned handle should be freed via L{libvlc_audio_equalizer_release}() when it is no longer needed. @return: opaque equalizer handle, or NULL on error. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_new', None) or \ _Cfunction('libvlc_audio_equalizer_new', (), None, ctypes.c_void_p) return f() def libvlc_audio_equalizer_new_from_preset(u_index): '''Create a new equalizer, with initial frequency values copied from an existing preset. The new equalizer can subsequently be applied to a media player by invoking L{libvlc_media_player_set_equalizer}(). The returned handle should be freed via L{libvlc_audio_equalizer_release}() when it is no longer needed. @param u_index: index of the preset, counting from zero. @return: opaque equalizer handle, or NULL on error. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_new_from_preset', None) or \ _Cfunction('libvlc_audio_equalizer_new_from_preset', ((1,),), None, ctypes.c_void_p, ctypes.c_uint) return f(u_index) def libvlc_audio_equalizer_release(p_equalizer): '''Release a previously created equalizer instance. The equalizer was previously created by using L{libvlc_audio_equalizer_new}() or L{libvlc_audio_equalizer_new_from_preset}(). It is safe to invoke this method with a NULL p_equalizer parameter for no effect. @param p_equalizer: opaque equalizer handle, or NULL. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_release', None) or \ _Cfunction('libvlc_audio_equalizer_release', ((1,),), None, None, ctypes.c_void_p) return f(p_equalizer) def libvlc_audio_equalizer_set_preamp(p_equalizer, f_preamp): '''Set a new pre-amplification value for an equalizer. The new equalizer settings are subsequently applied to a media player by invoking L{libvlc_media_player_set_equalizer}(). The supplied amplification value will be clamped to the -20.0 to +20.0 range. @param p_equalizer: valid equalizer handle, must not be NULL. @param f_preamp: preamp value (-20.0 to 20.0 Hz). @return: zero on success, -1 on error. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_set_preamp', None) or \ _Cfunction('libvlc_audio_equalizer_set_preamp', ((1,), (1,),), None, ctypes.c_int, ctypes.c_void_p, ctypes.c_float) return f(p_equalizer, f_preamp) def libvlc_audio_equalizer_get_preamp(p_equalizer): '''Get the current pre-amplification value from an equalizer. @param p_equalizer: valid equalizer handle, must not be NULL. @return: preamp value (Hz). @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_preamp', None) or \ _Cfunction('libvlc_audio_equalizer_get_preamp', ((1,),), None, ctypes.c_float, ctypes.c_void_p) return f(p_equalizer) def libvlc_audio_equalizer_set_amp_at_index(p_equalizer, f_amp, u_band): '''Set a new amplification value for a particular equalizer frequency band. The new equalizer settings are subsequently applied to a media player by invoking L{libvlc_media_player_set_equalizer}(). The supplied amplification value will be clamped to the -20.0 to +20.0 range. @param p_equalizer: valid equalizer handle, must not be NULL. @param f_amp: amplification value (-20.0 to 20.0 Hz). @param u_band: index, counting from zero, of the frequency band to set. @return: zero on success, -1 on error. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_set_amp_at_index', None) or \ _Cfunction('libvlc_audio_equalizer_set_amp_at_index', ((1,), (1,), (1,),), None, ctypes.c_int, ctypes.c_void_p, ctypes.c_float, ctypes.c_uint) return f(p_equalizer, f_amp, u_band) def libvlc_audio_equalizer_get_amp_at_index(p_equalizer, u_band): '''Get the amplification value for a particular equalizer frequency band. @param p_equalizer: valid equalizer handle, must not be NULL. @param u_band: index, counting from zero, of the frequency band to get. @return: amplification value (Hz); NaN if there is no such frequency band. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_amp_at_index', None) or \ _Cfunction('libvlc_audio_equalizer_get_amp_at_index', ((1,), (1,),), None, ctypes.c_float, ctypes.c_void_p, ctypes.c_uint) return f(p_equalizer, u_band) def libvlc_media_player_set_equalizer(p_mi, p_equalizer): '''Apply new equalizer settings to a media player. The equalizer is first created by invoking L{libvlc_audio_equalizer_new}() or L{libvlc_audio_equalizer_new_from_preset}(). It is possible to apply new equalizer settings to a media player whether the media player is currently playing media or not. Invoking this method will immediately apply the new equalizer settings to the audio output of the currently playing media if there is any. If there is no currently playing media, the new equalizer settings will be applied later if and when new media is played. Equalizer settings will automatically be applied to subsequently played media. To disable the equalizer for a media player invoke this method passing NULL for the p_equalizer parameter. The media player does not keep a reference to the supplied equalizer so it is safe for an application to release the equalizer reference any time after this method returns. @param p_mi: opaque media player handle. @param p_equalizer: opaque equalizer handle, or NULL to disable the equalizer for this media player. @return: zero on success, -1 on error. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_media_player_set_equalizer', None) or \ _Cfunction('libvlc_media_player_set_equalizer', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_void_p) return f(p_mi, p_equalizer) def libvlc_vlm_release(p_instance): '''Release the vlm instance related to the given L{Instance}. @param p_instance: the instance. ''' f = _Cfunctions.get('libvlc_vlm_release', None) or \ _Cfunction('libvlc_vlm_release', ((1,),), None, None, Instance) return f(p_instance) def libvlc_vlm_add_broadcast(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Add a broadcast, with one input. @param p_instance: the instance. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_add_broadcast', None) or \ _Cfunction('libvlc_vlm_add_broadcast', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int) return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop) def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux): '''Add a vod, with one input. @param p_instance: the instance. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \ _Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p) return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux) def libvlc_vlm_del_media(p_instance, psz_name): '''Delete a media (VOD or broadcast). @param p_instance: the instance. @param psz_name: the media to delete. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_del_media', None) or \ _Cfunction('libvlc_vlm_del_media', ((1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_vlm_set_enabled(p_instance, psz_name, b_enabled): '''Enable or disable a media (VOD or broadcast). @param p_instance: the instance. @param psz_name: the media to work on. @param b_enabled: the new status. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_set_enabled', None) or \ _Cfunction('libvlc_vlm_set_enabled', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, b_enabled) def libvlc_vlm_set_output(p_instance, psz_name, psz_output): '''Set the output for a media. @param p_instance: the instance. @param psz_name: the media to work on. @param psz_output: the output MRL (the parameter to the "sout" variable). @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_set_output', None) or \ _Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, psz_name, psz_output) def libvlc_vlm_set_input(p_instance, psz_name, psz_input): '''Set a media's input MRL. This will delete all existing inputs and add the specified one. @param p_instance: the instance. @param psz_name: the media to work on. @param psz_input: the input MRL. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_set_input', None) or \ _Cfunction('libvlc_vlm_set_input', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, psz_name, psz_input) def libvlc_vlm_add_input(p_instance, psz_name, psz_input): '''Add a media's input MRL. This will add the specified one. @param p_instance: the instance. @param psz_name: the media to work on. @param psz_input: the input MRL. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_add_input', None) or \ _Cfunction('libvlc_vlm_add_input', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, psz_name, psz_input) def libvlc_vlm_set_loop(p_instance, psz_name, b_loop): '''Set a media's loop status. @param p_instance: the instance. @param psz_name: the media to work on. @param b_loop: the new status. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_set_loop', None) or \ _Cfunction('libvlc_vlm_set_loop', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, b_loop) def libvlc_vlm_set_mux(p_instance, psz_name, psz_mux): '''Set a media's vod muxer. @param p_instance: the instance. @param psz_name: the media to work on. @param psz_mux: the new muxer. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_set_mux', None) or \ _Cfunction('libvlc_vlm_set_mux', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, psz_name, psz_mux) def libvlc_vlm_change_media(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Edit the parameters of a media. This will delete all existing inputs and add the specified one. @param p_instance: the instance. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_change_media', None) or \ _Cfunction('libvlc_vlm_change_media', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int) return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop) def libvlc_vlm_play_media(p_instance, psz_name): '''Play the named broadcast. @param p_instance: the instance. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_play_media', None) or \ _Cfunction('libvlc_vlm_play_media', ((1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_vlm_stop_media(p_instance, psz_name): '''Stop the named broadcast. @param p_instance: the instance. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_stop_media', None) or \ _Cfunction('libvlc_vlm_stop_media', ((1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_vlm_pause_media(p_instance, psz_name): '''Pause the named broadcast. @param p_instance: the instance. @param psz_name: the name of the broadcast. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_pause_media', None) or \ _Cfunction('libvlc_vlm_pause_media', ((1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_vlm_seek_media(p_instance, psz_name, f_percentage): '''Seek in the named broadcast. @param p_instance: the instance. @param psz_name: the name of the broadcast. @param f_percentage: the percentage to seek to. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_seek_media', None) or \ _Cfunction('libvlc_vlm_seek_media', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_float) return f(p_instance, psz_name, f_percentage) def libvlc_vlm_show_media(p_instance, psz_name): '''Return information about the named media as a JSON string representation. This function is mainly intended for debugging use, if you want programmatic access to the state of a vlm_media_instance_t, please use the corresponding libvlc_vlm_get_media_instance_xxx -functions. Currently there are no such functions available for vlm_media_t though. @param p_instance: the instance. @param psz_name: the name of the media, if the name is an empty string, all media is described. @return: string with information about named media, or NULL on error. ''' f = _Cfunctions.get('libvlc_vlm_show_media', None) or \ _Cfunction('libvlc_vlm_show_media', ((1,), (1,),), string_result, ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, psz_name) def libvlc_vlm_get_media_instance_position(p_instance, psz_name, i_instance): '''Get vlm_media instance position by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: position as float or -1. on error. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_position', None) or \ _Cfunction('libvlc_vlm_get_media_instance_position', ((1,), (1,), (1,),), None, ctypes.c_float, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_time(p_instance, psz_name, i_instance): '''Get vlm_media instance time by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: time as integer or -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_time', None) or \ _Cfunction('libvlc_vlm_get_media_instance_time', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_length(p_instance, psz_name, i_instance): '''Get vlm_media instance length by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: length of media item or -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_length', None) or \ _Cfunction('libvlc_vlm_get_media_instance_length', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_rate(p_instance, psz_name, i_instance): '''Get vlm_media instance playback rate by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: playback rate or -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_rate', None) or \ _Cfunction('libvlc_vlm_get_media_instance_rate', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_title(p_instance, psz_name, i_instance): '''Get vlm_media instance title number by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: title as number or -1 on error. @bug: will always return 0. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_title', None) or \ _Cfunction('libvlc_vlm_get_media_instance_title', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_chapter(p_instance, psz_name, i_instance): '''Get vlm_media instance chapter number by name or instance id. @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: chapter as number or -1 on error. @bug: will always return 0. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_chapter', None) or \ _Cfunction('libvlc_vlm_get_media_instance_chapter', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_media_instance_seekable(p_instance, psz_name, i_instance): '''Is libvlc instance seekable ? @param p_instance: a libvlc instance. @param psz_name: name of vlm media instance. @param i_instance: instance id. @return: 1 if seekable, 0 if not, -1 if media does not exist. @bug: will always return 0. ''' f = _Cfunctions.get('libvlc_vlm_get_media_instance_seekable', None) or \ _Cfunction('libvlc_vlm_get_media_instance_seekable', ((1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int) return f(p_instance, psz_name, i_instance) def libvlc_vlm_get_event_manager(p_instance): '''Get libvlc_event_manager from a vlm media. The p_event_manager is immutable, so you don't have to hold the lock. @param p_instance: a libvlc instance. @return: libvlc_event_manager. ''' f = _Cfunctions.get('libvlc_vlm_get_event_manager', None) or \ _Cfunction('libvlc_vlm_get_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, Instance) return f(p_instance) # 4 function(s) blacklisted: # libvlc_audio_output_get_device_type # libvlc_audio_output_set_device_type # libvlc_printerr # libvlc_set_exit_handler # 29 function(s) not wrapped as methods: # libvlc_audio_equalizer_get_amp_at_index # libvlc_audio_equalizer_get_band_count # libvlc_audio_equalizer_get_band_frequency # libvlc_audio_equalizer_get_preamp # libvlc_audio_equalizer_get_preset_count # libvlc_audio_equalizer_get_preset_name # libvlc_audio_equalizer_new # libvlc_audio_equalizer_new_from_preset # libvlc_audio_equalizer_release # libvlc_audio_equalizer_set_amp_at_index # libvlc_audio_equalizer_set_preamp # libvlc_audio_output_device_list_release # libvlc_audio_output_list_release # libvlc_clearerr # libvlc_clock # libvlc_errmsg # libvlc_event_type_name # libvlc_free # libvlc_get_changeset # libvlc_get_compiler # libvlc_get_version # libvlc_log_get_context # libvlc_log_get_object # libvlc_media_get_codec_description # libvlc_media_tracks_release # libvlc_module_description_list_release # libvlc_new # libvlc_track_description_list_release # libvlc_vprinterr # Start of footer.py # # Backward compatibility def callbackmethod(callback): """Now obsolete @callbackmethod decorator.""" return callback # libvlc_free is not present in some versions of libvlc. If it is not # in the library, then emulate it by calling libc.free if not hasattr(dll, 'libvlc_free'): # need to find the free function in the C runtime. This is # platform specific. # For Linux and MacOSX libc_path = find_library('c') if libc_path: libc = ctypes.CDLL(libc_path) libvlc_free = libc.free else: # On win32, it is impossible to guess the proper lib to call # (msvcrt, mingw...). Just ignore the call: it will memleak, # but not prevent to run the application. def libvlc_free(p): pass # ensure argtypes is right, because default type of int won't work # on 64-bit systems libvlc_free.argtypes = [ ctypes.c_void_p ] # Version functions def _dot2int(v): '''(INTERNAL) Convert 'i.i.i[.i]' str to int. ''' t = [int(i) for i in v.split('.')] if len(t) == 3: t.append(0) elif len(t) != 4: raise ValueError('"i.i.i[.i]": %r' % (v,)) if min(t) < 0 or max(t) > 255: raise ValueError('[0..255]: %r' % (v,)) i = t.pop(0) while t: i = (i << 8) + t.pop(0) return i def hex_version(): """Return the version of these bindings in hex or 0 if unavailable. """ try: return _dot2int(__version__.split('-')[-1]) except (NameError, ValueError): return 0 def libvlc_hex_version(): """Return the libvlc version in hex or 0 if unavailable. """ try: return _dot2int(bytes_to_str(libvlc_get_version()).split()[0]) except ValueError: return 0 def debug_callback(event, *args, **kwds): '''Example callback, useful for debugging. ''' l = ['event %s' % (event.type,)] if args: l.extend(map(str, args)) if kwds: l.extend(sorted('%s=%s' % t for t in kwds.items())) print('Debug callback (%s)' % ', '.join(l)) if __name__ == '__main__': try: from msvcrt import getch except ImportError: import termios import tty def getch(): # getchar(), getc(stdin) #PYCHOK flake fd = sys.stdin.fileno() old = termios.tcgetattr(fd) try: tty.setraw(fd) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old) return ch def end_callback(event): print('End of media stream (event %s)' % event.type) sys.exit(0) echo_position = False def pos_callback(event, player): if echo_position: sys.stdout.write('\r%s to %.2f%% (%.2f%%)' % (event.type, event.u.new_position * 100, player.get_position() * 100)) sys.stdout.flush() def print_version(): """Print libvlc version""" try: print('Build date: %s (%#x)' % (build_date, hex_version())) print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version())) print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler())) if plugin_path: print('Plugin path: %s' % plugin_path) except: print('Error: %s' % sys.exc_info()[1]) if sys.argv[1:] and sys.argv[1] not in ('-h', '--help'): movie = os.path.expanduser(sys.argv[1]) if not os.access(movie, os.R_OK): print('Error: %s file not readable' % movie) sys.exit(1) instance = Instance("--sub-source marq") try: media = instance.media_new(movie) except NameError: print('NameError: %s (%s vs LibVLC %s)' % (sys.exc_info()[1], __version__, libvlc_get_version())) sys.exit(1) player = instance.media_player_new() player.set_media(media) player.play() # Some marquee examples. Marquee requires '--sub-source marq' in the # Instance() call above. See <http://www.videolan.org/doc/play-howto/en/ch04.html> player.video_set_marquee_int(VideoMarqueeOption.Enable, 1) player.video_set_marquee_int(VideoMarqueeOption.Size, 24) # pixels player.video_set_marquee_int(VideoMarqueeOption.Position, Position.Bottom) if False: # only one marquee can be specified player.video_set_marquee_int(VideoMarqueeOption.Timeout, 5000) # millisec, 0==forever t = media.get_mrl() # movie else: # update marquee text periodically player.video_set_marquee_int(VideoMarqueeOption.Timeout, 0) # millisec, 0==forever player.video_set_marquee_int(VideoMarqueeOption.Refresh, 1000) # millisec (or sec?) ##t = '$L / $D or $P at $T' t = '%Y-%m-%d %H:%M:%S' player.video_set_marquee_string(VideoMarqueeOption.Text, str_to_bytes(t)) # Some event manager examples. Note, the callback can be any Python # callable and does not need to be decorated. Optionally, specify # any number of positional and/or keyword arguments to be passed # to the callback (in addition to the first one, an Event instance). event_manager = player.event_manager() event_manager.event_attach(EventType.MediaPlayerEndReached, end_callback) event_manager.event_attach(EventType.MediaPlayerPositionChanged, pos_callback, player) def mspf(): """Milliseconds per frame.""" return int(1000 // (player.get_fps() or 25)) def print_info(): """Print information about the media""" try: print_version() media = player.get_media() print('State: %s' % player.get_state()) print('Media: %s' % bytes_to_str(media.get_mrl())) print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count())) print('Current time: %s/%s' % (player.get_time(), media.get_duration())) print('Position: %s' % player.get_position()) print('FPS: %s (%d ms)' % (player.get_fps(), mspf())) print('Rate: %s' % player.get_rate()) print('Video size: %s' % str(player.video_get_size(0))) # num=0 print('Scale: %s' % player.video_get_scale()) print('Aspect ratio: %s' % player.video_get_aspect_ratio()) #print('Window:' % player.get_hwnd() except Exception: print('Error: %s' % sys.exc_info()[1]) def sec_forward(): """Go forward one sec""" player.set_time(player.get_time() + 1000) def sec_backward(): """Go backward one sec""" player.set_time(player.get_time() - 1000) def frame_forward(): """Go forward one frame""" player.set_time(player.get_time() + mspf()) def frame_backward(): """Go backward one frame""" player.set_time(player.get_time() - mspf()) def print_help(): """Print help""" print('Single-character commands:') for k, m in sorted(keybindings.items()): m = (m.__doc__ or m.__name__).splitlines()[0] print(' %s: %s.' % (k, m.rstrip('.'))) print('0-9: go to that fraction of the movie') def quit_app(): """Stop and exit""" sys.exit(0) def toggle_echo_position(): """Toggle echoing of media position""" global echo_position echo_position = not echo_position keybindings = { ' ': player.pause, '+': sec_forward, '-': sec_backward, '.': frame_forward, ',': frame_backward, 'f': player.toggle_fullscreen, 'i': print_info, 'p': toggle_echo_position, 'q': quit_app, '?': print_help, } print('Press q to quit, ? to get help.%s' % os.linesep) while True: k = getch() print('> %s' % k) if k in keybindings: keybindings[k]() elif k.isdigit(): # jump to fraction of the movie. player.set_position(float('0.'+k)) else: print('Usage: %s <movie_filename>' % sys.argv[0]) print('Once launched, type ? for help.') print('') print_version()
mit
pombredanne/pants
src/python/pants/backend/jvm/subsystems/zinc_language_mixin.py
10
1890
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) class ZincLanguageMixin(object): """A mixin for subsystems for languages compiled with Zinc.""" @classmethod def register_options(cls, register): super(ZincLanguageMixin, cls).register_options(register) # NB: This option is fingerprinted because the default value is not included in a target's # fingerprint. This also has the effect of invalidating only the relevant tasks: ZincCompile # in this case. register('--strict-deps', advanced=True, default=False, fingerprint=True, type=bool, help='The default for the "strict_deps" argument for targets of this language.') register('--fatal-warnings', advanced=True, type=bool, fingerprint=True, help='The default for the "fatal_warnings" argument for targets of this language.') register('--zinc-file-manager', advanced=True, default=True, type=bool, fingerprint=True, help='Use zinc provided file manager to ensure transactional rollback.') @property def strict_deps(self): """When True, limits compile time deps to those that are directly declared by a target. :rtype: bool """ return self.get_options().strict_deps @property def fatal_warnings(self): """If true, make warnings fatal for targets that do not specify fatal_warnings. :rtype: bool """ return self.get_options().fatal_warnings @property def zinc_file_manager(self): """If false, the default file manager will be used instead of the zinc provided one. :rtype: bool """ return self.get_options().zinc_file_manager
apache-2.0
Nihn/Diamond-1
src/diamond/handler/sentry.py
28
10750
# coding=utf-8 """ Diamond handler that check if values are too high or too low, if so send an alert to a Sentry server This handler requires the Python module Raven: http://raven.readthedocs.org/en/latest/index.html To work this handler need a similar configuration: [[SentryHandler]] # Create a new project in Sentry and copy the DSN here: dsn = http://user:pass@hostname/id [[[load]]] name = Load Average # check for load average of the last 15 minutes path = loadavg.15 max = 8.5 [[[free_memory]]] name = Free Memory path = memory.MemFree min = 66020000 """ import logging import re from Handler import Handler from diamond.collector import get_hostname from configobj import Section try: import raven.handlers.logging except ImportError: raven = None __author__ = 'Bruno Clermont' __email__ = '[email protected]' class InvalidRule(ValueError): """ invalid rule """ pass class BaseResult(object): """ Base class for a Rule minimum/maximum check result """ adjective = None def __init__(self, value, threshold): """ @type value: float @param value: metric value @type threshold: float @param threshold: value that trigger a warning """ self.value = value self.threshold = threshold if not raven: self.log.error('raven.handlers.logging import failed. ' 'Handler disabled') self.enabled = False return @property def verbose_message(self): """return more complete message""" if self.threshold is None: return 'No threshold' return '%.1f is %s than %.1f' % (self.value, self.adjective, self.threshold) @property def _is_error(self): raise NotImplementedError('_is_error') @property def is_error(self): """ for some reason python do this: >>> 1.0 > None True >>> 1.0 < None False so we just check if min/max is not None before return _is_error """ if self.threshold is None: return False return self._is_error def __str__(self): name = self.__class__.__name__.lower() if self.threshold is None: return '%s: %.1f no threshold' % (name, self.value) return '%.1f (%s: %.1f)' % (self.value, name, self.threshold) class Minimum(BaseResult): """ Minimum result """ adjective = 'lower' @property def _is_error(self): """if it's too low""" return self.value < self.threshold class Maximum(BaseResult): """ Maximum result """ adjective = 'higher' @property def _is_error(self): """if it's too high""" return self.value > self.threshold class Rule(object): """ Alert rule """ def __init__(self, name, path, min=None, max=None): """ @type name: string @param name: rule name, used to identify this rule in Sentry @type path: string @param path: un-compiled regular expression of the path of the rule @type min: string of float/int, int or float. will be convert to float @param min: optional minimal value that if value goes below it send an alert to Sentry @type max: string of float/int, int or float. will be convert to float @param max: optional maximal value that if value goes over it send an alert to Sentry """ self.name = name # counters that can be used to debug rule self.counter_errors = 0 self.counter_pass = 0 # force min and max to be float try: self.min = float(min) except TypeError: self.min = None try: self.max = float(max) except TypeError: self.max = None if self.min is None and self.max is None: raise InvalidRule("%s: %s: both min and max are unset or invalid" % (name, path)) if self.min is not None and self.max is not None: if self.min > self.max: raise InvalidRule("min %.1f is larger than max %.1f" % ( self.min, self.max)) # compile path regular expression self.regexp = re.compile(r'(?P<prefix>.*)\.(?P<path>%s)$' % path) def process(self, metric, handler): """ process a single diamond metric @type metric: diamond.metric.Metric @param metric: metric to process @type handler: diamond.handler.sentry.SentryHandler @param handler: configured Sentry graphite handler @rtype None """ match = self.regexp.match(metric.path) if match: minimum = Minimum(metric.value, self.min) maximum = Maximum(metric.value, self.max) if minimum.is_error or maximum.is_error: self.counter_errors += 1 message = "%s Warning on %s: %.1f" % (self.name, handler.hostname, metric.value) culprit = "%s %s" % (handler.hostname, match.group('path')) handler.raven_logger.error(message, extra={ 'culprit': culprit, 'data': { 'metric prefix': match.group('prefix'), 'metric path': match.group('path'), 'minimum check': minimum.verbose_message, 'maximum check': maximum.verbose_message, 'metric original path': metric.path, 'metric value': metric.value, 'metric precision': metric.precision, 'metric timestamp': metric.timestamp, 'minimum threshold': self.min, 'maximum threshold': self.max, 'path regular expression': self.regexp.pattern, 'total errors': self.counter_errors, 'total pass': self.counter_pass, 'hostname': handler.hostname } } ) else: self.counter_pass += 1 def __repr__(self): return '%s: min:%s max:%s %s' % (self.name, self.min, self.max, self.regexp.pattern) class SentryHandler(Handler): """ Diamond handler that check if a metric goes too low or too high """ # valid key name in rules sub-section VALID_RULES_KEYS = ('name', 'path', 'min', 'max') def __init__(self, config=None): """ @type config: configobj.ConfigObj """ Handler.__init__(self, config) if not raven: return # init sentry/raven self.sentry_log_handler = raven.handlers.logging.SentryHandler( self.config['dsn']) self.raven_logger = logging.getLogger(self.__class__.__name__) self.raven_logger.addHandler(self.sentry_log_handler) self.configure_sentry_errors() self.rules = self.compile_rules() self.hostname = get_hostname(self.config) if not len(self.rules): self.log.warning("No rules, this graphite handler is unused") def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(SentryHandler, self).get_default_config_help() config.update({ 'dsn': '', }) return config def get_default_config(self): """ Return the default config for the handler """ config = super(SentryHandler, self).get_default_config() config.update({ 'dsn': '', }) return config def compile_rules(self): """ Compile alert rules @rtype list of Rules """ output = [] # validate configuration, skip invalid section for key_name, section in self.config.items(): rule = self.compile_section(section) if rule is not None: output.append(rule) return output def compile_section(self, section): """ Validate if a section is a valid rule @type section: configobj.Section @param section: section to validate @rtype Rule or None @return None if invalid """ if section.__class__ != Section: # not a section, just skip return # name and path are mandatory keys = section.keys() for key in ('name', 'path'): if key not in keys: self.log.warning("section %s miss key '%s' ignore", key, section.name) return # just warn if invalid key in section for key in keys: if key not in self.VALID_RULES_KEYS: self.log.warning("invalid key %s in section %s", key, section.name) # need at least a min or a max if 'min' not in keys and 'max' not in keys: self.log.warning("either 'min' or 'max' is defined in %s", section.name) return # add rule to the list kwargs = { 'name': section['name'], 'path': section['path'] } for argument in ('min', 'max'): try: kwargs[argument] = section[argument] except KeyError: pass # init rule try: return Rule(**kwargs) except InvalidRule, err: self.log.error(str(err)) def configure_sentry_errors(self): """ Configure sentry.errors to use the same loggers as the root handler @rtype: None """ sentry_errors_logger = logging.getLogger('sentry.errors') root_logger = logging.getLogger() for handler in root_logger.handlers: sentry_errors_logger.addHandler(handler) def process(self, metric): """ process a single metric @type metric: diamond.metric.Metric @param metric: metric to process @rtype None """ for rule in self.rules: rule.process(metric, self) def __repr__(self): return "SentryHandler '%s' %d rules" % ( self.sentry_log_handler.client.servers, len(self.rules))
mit
richardbeare/ITK
Wrapping/Generators/Doc/doxy2swig.py
2
11932
#!/usr/bin/env python """Doxygen XML to SWIG docstring converter. Converts Doxygen generated XML files into a file containing docstrings that can be used by SWIG-1.3.x. Note that you need to get SWIG version > 1.3.23 or use Robin Dunn's docstring patch to be able to use the resulting output. Usage: doxy2swig.py input.xml output.i input.xml is your doxygen generated XML file and output.i is where the output will be written (the file will be clobbered). """ # This code is implemented using Mark Pilgrim's code as a guideline: # http://www.faqs.org/docs/diveintopython/kgp_divein.html # # Author: Prabhu Ramachandran # License: BSD style from xml.dom import minidom import re import textwrap import sys import types import os.path def my_open_read(source): if hasattr(source, "read"): return source else: return open(source) def my_open_write(dest, mode='w'): if hasattr(dest, "write"): return dest else: return open(dest, mode) class Doxy2SWIG: """Converts Doxygen generated XML files into a file containing docstrings that can be used by SWIG-1.3.x that have support for feature("docstring"). Once the data is parsed it is stored in self.pieces. """ def __init__(self, src): """Initialize the instance given a source object (file or filename). """ f = my_open_read(src) self.my_dir = os.path.dirname(f.name) self.xmldoc = minidom.parse(f).documentElement f.close() self.pieces = [] self.pieces.append('\n// File: %s\n'%\ os.path.basename(f.name)) self.space_re = re.compile(r'\s+') self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)') self.multi = 0 self.ignores = ('inheritancegraph', 'param', 'listofallmembers', 'innerclass', 'name', 'declname', 'incdepgraph', 'invincdepgraph', 'programlisting', 'type', 'references', 'referencedby', 'location', 'collaborationgraph', 'reimplements', 'reimplementedby', 'derivedcompoundref', 'basecompoundref') #self.generics = [] def generate(self): """Parses the file set in the initialization. The resulting data is stored in `self.pieces`. """ self.parse(self.xmldoc) def parse(self, node): """Parse a given node. This function in turn calls the `parse_<nodeType>` functions which handle the respective nodes. """ pm = getattr(self, "parse_%s"%node.__class__.__name__) pm(node) def parse_Document(self, node): self.parse(node.documentElement) def parse_Text(self, node): txt = node.data txt = txt.replace('\\', r'\\\\') txt = txt.replace('"', r'\"') # ignore pure whitespace m = self.space_re.match(txt) if m and len(m.group()) == len(txt): pass else: self.add_text(textwrap.fill(txt)) def parse_Element(self, node): """Parse an `ELEMENT_NODE`. This calls specific `do_<tagName>` handers for different elements. If no handler is available the `generic_parse` method is called. All tagNames specified in `self.ignores` are simply ignored. """ name = node.tagName ignores = self.ignores if name in ignores: return attr = "do_%s" % name if hasattr(self, attr): handlerMethod = getattr(self, attr) handlerMethod(node) else: self.generic_parse(node) #if name not in self.generics: self.generics.append(name) def add_text(self, value): """Adds text corresponding to `value` into `self.pieces`.""" listTypes = (list, tuple) if type(value) in listTypes: self.pieces.extend(value) else: self.pieces.append(value) def get_specific_nodes(self, node, names): """Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name. """ nodes = [(x.tagName, x) for x in node.childNodes \ if x.nodeType == x.ELEMENT_NODE and \ x.tagName in names] return dict(nodes) def generic_parse(self, node, pad=0): """A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes are processed. Defaults to 0. """ npiece = 0 if pad: npiece = len(self.pieces) if pad == 2: self.add_text('\n') for n in node.childNodes: self.parse(n) if pad: if len(self.pieces) > npiece: self.add_text('\n') def space_parse(self, node): self.add_text(' ') self.generic_parse(node) do_ref = space_parse do_emphasis = space_parse do_bold = space_parse do_computeroutput = space_parse do_formula = space_parse def do_compoundname(self, node): self.add_text('\n\n') data = node.firstChild.data self.add_text('%%feature("docstring") %s "\n'%data) def do_compounddef(self, node): kind = node.attributes['kind'].value if kind in ('class', 'struct'): prot = node.attributes['prot'].value if prot != 'public': return names = ('compoundname', 'briefdescription', 'detaileddescription', 'includes') first = self.get_specific_nodes(node, names) for n in names: if n in first: self.parse(first[n]) self.add_text(['";','\n']) for n in node.childNodes: if n not in first.values(): self.parse(n) elif kind in ('file', 'namespace'): nodes = node.getElementsByTagName('sectiondef') for n in nodes: self.parse(n) def do_includes(self, node): self.add_text('C++ includes: ') self.generic_parse(node, pad=1) def do_parameterlist(self, node): self.add_text(['\n', '\n', 'Parameters:', '\n']) self.generic_parse(node, pad=1) def do_para(self, node): self.add_text('\n') self.generic_parse(node, pad=1) def do_parametername(self, node): self.add_text('\n') self.add_text("%s: "%node.firstChild.data) def do_parameterdefinition(self, node): self.generic_parse(node, pad=1) def do_detaileddescription(self, node): self.generic_parse(node, pad=1) def do_briefdescription(self, node): self.generic_parse(node, pad=1) def do_memberdef(self, node): prot = node.attributes['prot'].value id = node.attributes['id'].value kind = node.attributes['kind'].value tmp = node.parentNode.parentNode.parentNode compdef = tmp.getElementsByTagName('compounddef')[0] cdef_kind = compdef.attributes['kind'].value if prot == 'public': first = self.get_specific_nodes(node, ('definition', 'name')) name = first['name'].firstChild.data if name[:8] == 'operator': # Don't handle operators yet. return defn = first['definition'].firstChild.data self.add_text('\n') self.add_text('%feature("docstring") ') anc = node.parentNode.parentNode if cdef_kind in ('file', 'namespace'): ns_node = anc.getElementsByTagName('innernamespace') if not ns_node and cdef_kind == 'namespace': ns_node = anc.getElementsByTagName('compoundname') if ns_node: ns = ns_node[0].firstChild.data self.add_text(' %s::%s "\n%s'%(ns, name, defn)) else: self.add_text(' %s "\n%s'%(name, defn)) elif cdef_kind in ('class', 'struct'): # Get the full function name. anc_node = anc.getElementsByTagName('compoundname') cname = anc_node[0].firstChild.data self.add_text(' %s::%s "\n%s'%(cname, name, defn)) for n in node.childNodes: if n not in first.values(): self.parse(n) self.add_text(['";', '\n']) def do_definition(self, node): data = node.firstChild.data self.add_text('%s "\n%s'%(data, data)) def do_sectiondef(self, node): kind = node.attributes['kind'].value if kind in ('public-func', 'func'): self.generic_parse(node) def do_simplesect(self, node): kind = node.attributes['kind'].value if kind in ('date', 'rcs', 'version'): pass elif kind == 'warning': self.add_text(['\n', 'WARNING: ']) self.generic_parse(node) elif kind == 'see': self.add_text('\n') self.add_text('See: ') self.generic_parse(node) else: self.generic_parse(node) def do_argsstring(self, node): self.generic_parse(node, pad=1) def do_member(self, node): kind = node.attributes['kind'].value refid = node.attributes['refid'].value if kind == 'function' and refid[:9] == 'namespace': self.generic_parse(node) def do_doxygenindex(self, node): self.multi = 1 comps = node.getElementsByTagName('compound') for c in comps: refid = c.attributes['refid'].value fname = refid + '.xml' if not os.path.exists(fname): fname = os.path.join(self.my_dir, fname) print("parsing file: %s"%fname) p = Doxy2SWIG(fname) p.generate() self.pieces.extend(self.clean_pieces(p.pieces)) def write(self, fname, mode='w'): o = my_open_write(fname, mode) if self.multi: o.write("".join(self.pieces)) else: o.write("".join(self.clean_pieces(self.pieces))) o.close() def clean_pieces(self, pieces): """Cleans the list of strings given as `pieces`. It replaces multiple newlines by a maximum of 2 and returns a new list. It also wraps the paragraphs nicely. """ ret = [] count = 0 for i in pieces: if i == '\n': count = count + 1 else: if i == '";': if count: ret.append('\n') elif count > 2: ret.append('\n\n') elif count: ret.append('\n'*count) count = 0 ret.append(i) _data = "".join(ret) ret = [] for i in _data.split('\n\n'): if i == 'Parameters:': ret.extend(['Parameters:\n-----------', '\n\n']) elif i.find('// File:') > -1: # leave comments alone. ret.extend([i, '\n']) else: _tmp = textwrap.fill(i.strip()) _tmp = self.lead_spc.sub(r'\1"\2', _tmp) ret.extend([_tmp, '\n\n']) return ret def main(input, output): p = Doxy2SWIG(input) p.generate() p.write(output) if __name__ == '__main__': if len(sys.argv) != 3: print(__doc__) sys.exit(1) main(sys.argv[1], sys.argv[2])
apache-2.0
styx0x6/gremlins
lib/thirdparty/requests/sessions.py
115
26216
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os from collections import Mapping from datetime import datetime from .auth import _basic_auth_str from .compat import cookielib, OrderedDict, urljoin, urlparse from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .packages.urllib3._collections import RecentlyUsedContainer from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI REDIRECT_CACHE_SIZE = 1000 def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs): """Receives a Response. Returns a generator of Responses.""" i = 0 hist = [] # keep track of history while resp.is_redirect: prepared_request = req.copy() if i > 0: # Update history and keep track of redirects. hist.append(resp) new_hist = list(hist) resp.history = new_hist try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) # Release the connection back into the pool. resp.close() url = resp.headers['location'] # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) # The scheme should be lower case... parsed = urlparse(url) url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) # Cache the url, unless it redirects to itself. if resp.is_permanent_redirect and req.url != prepared_request.url: self.redirect_cache[req.url] = prepared_request.url self.rebuild_method(prepared_request, resp) # https://github.com/kennethreitz/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/kennethreitz/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) i += 1 yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers: # If we get redirected to a new host, we should strip out any # authentication headers. original_parsed = urlparse(response.request.url) redirect_parsed = urlparse(url) if (original_parsed.hostname != redirect_parsed.hostname): del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() if proxies is not None else {} if self.trust_env and not should_bypass_proxies(url): environ_proxies = get_environ_proxies(url) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) # Only store 1000 redirects to prevent using infinite memory self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method = method.upper(), url = url, headers = headers, files = files, data = data or {}, json = json, params = params or {}, auth = auth, cookies = cookies, hooks = hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """ Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Resolve URL in redirect cache, if available. if allow_redirects: checked_urls = set() while request.url in self.redirect_cache: checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if new_url in checked_urls: break request.url = new_url # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = datetime.utcnow() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) r.elapsed = datetime.utcnow() - start # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. env_proxies = get_environ_proxies(url) or {} for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by key length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) state['redirect_cache'] = dict(self.redirect_cache) return state def __setstate__(self, state): redirect_cache = state.pop('redirect_cache', {}) for attr, value in state.items(): setattr(self, attr, value) self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE) for redirect, to in redirect_cache.items(): self.redirect_cache[redirect] = to def session(): """ Returns a :class:`Session` for context-management. :rtype: Session """ return Session()
agpl-3.0
NicolasLM/sauna
sauna/plugins/ext/memcached.py
2
3980
import re import socket from sauna.plugins import (Plugin, bytes_to_human, human_to_bytes, PluginRegister) my_plugin = PluginRegister('Memcached') @my_plugin.plugin() class Memcached(Plugin): def __init__(self, config): super().__init__(config) self.config = { 'host': config.get('host', 'localhost'), 'port': config.get('port', 11211), 'timeout': config.get('timeout', 5) } self._stats = None @my_plugin.check() def accepting_connections(self, check_config): try: accept_connections = self.stats['accepting_conns'] == 1 except OSError as e: return (Plugin.STATUS_CRIT, 'Memcached is not accepting connections: {}'.format(e)) if accept_connections: return Plugin.STATUS_OK, 'Memcached is accepting connections' else: return Plugin.STATUS_CRIT, 'Memcached is not accepting connections' @my_plugin.check() def bytes(self, check_config): status = self._value_to_status_less(self.stats['bytes'], check_config, human_to_bytes) output = 'Memcached memory: {}'.format( bytes_to_human(self.stats['bytes']) ) return status, output @my_plugin.check() def used_percent(self, check_config): used_percent = int( self.stats['bytes'] * 100 / self.stats['limit_maxbytes'] ) status = self._value_to_status_less(used_percent, check_config, self._strip_percent_sign) output = 'Memcached memory used: {}% of {}'.format( used_percent, bytes_to_human(self.stats['limit_maxbytes']) ) return status, output @my_plugin.check() def current_items(self, check_config): status = self._value_to_status_less(self.stats['curr_items'], check_config) output = 'Memcached holds {} items'.format(self.stats['curr_items']) return status, output @property def stats(self): if not self._stats: self._stats = self._raw_stats_to_dict( self._fetch_memcached_stats() ) return self._stats @classmethod def _raw_stats_to_dict(cls, stats_data): """Convert raw memcached output to a dict of stats.""" stats_string = stats_data.decode('ascii') stats_string = stats_string.replace('\r\n', '\n') matches = re.findall(r'^STAT (\w+) (\d+)$', stats_string, flags=re.MULTILINE) return {match[0]: int(match[1]) for match in matches} def _fetch_memcached_stats(self): """Connect to Memcached and retrieve stats.""" data = bytes() with socket.create_connection((self.config['host'], self.config['port']), timeout=self.config['timeout']) as s: s.sendall(b'stats\r\n') while True: buffer = bytearray(4096) bytes_received = s.recv_into(buffer) if bytes_received == 0: # Remote host closed connection break data += buffer if b'\r\nEND\r\n' in data: # End of the stats command break return data @staticmethod def config_sample(): return ''' # Memcached - type: Memcached checks: - type: bytes warn: 128M crit: 256M - type: used_percent warn: 80% crit: 90% - type: current_items warn: 10000 crit: 20000 - type: accepting_connections config: host: localhost port: 11211 '''
bsd-2-clause
dablak/boto
boto/iam/connection.py
7
53721
# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010-2011, Eucalyptus Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto import boto.jsonresponse from boto.compat import json from boto.resultset import ResultSet from boto.iam.summarymap import SummaryMap from boto.connection import AWSQueryConnection ASSUME_ROLE_POLICY_DOCUMENT = json.dumps({ 'Statement': [{'Principal': {'Service': ['ec2.amazonaws.com']}, 'Effect': 'Allow', 'Action': ['sts:AssumeRole']}]}) class IAMConnection(AWSQueryConnection): APIVersion = '2010-05-08' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', debug=0, https_connection_factory=None, path='/', security_token=None, validate_certs=True, profile_name=None): super(IAMConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, path, security_token, validate_certs=validate_certs, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] def get_response(self, action, params, path='/', parent=None, verb='POST', list_marker='Set'): """ Utility method to handle calls to IAM and parsing of responses. """ if not parent: parent = self response = self.make_request(action, params, path, verb) body = response.read() boto.log.debug(body) if response.status == 200: if body: e = boto.jsonresponse.Element(list_marker=list_marker, pythonize_name=True) h = boto.jsonresponse.XmlHandler(e, parent) h.parse(body) return e else: # Support empty responses, e.g. deleting a SAML provider # according to the official documentation. return {} else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) # # Group methods # def get_all_groups(self, path_prefix='/', marker=None, max_items=None): """ List the groups that have the specified path prefix. :type path_prefix: string :param path_prefix: If provided, only groups whose paths match the provided prefix will be returned. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {} if path_prefix: params['PathPrefix'] = path_prefix if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListGroups', params, list_marker='Groups') def get_group(self, group_name, marker=None, max_items=None): """ Return a list of users that are in the specified group. :type group_name: string :param group_name: The name of the group whose information should be returned. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'GroupName': group_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('GetGroup', params, list_marker='Users') def create_group(self, group_name, path='/'): """ Create a group. :type group_name: string :param group_name: The name of the new group :type path: string :param path: The path to the group (Optional). Defaults to /. """ params = {'GroupName': group_name, 'Path': path} return self.get_response('CreateGroup', params) def delete_group(self, group_name): """ Delete a group. The group must not contain any Users or have any attached policies :type group_name: string :param group_name: The name of the group to delete. """ params = {'GroupName': group_name} return self.get_response('DeleteGroup', params) def update_group(self, group_name, new_group_name=None, new_path=None): """ Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path. """ params = {'GroupName': group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params) def add_user_to_group(self, group_name, user_name): """ Add a user to a group :type group_name: string :param group_name: The name of the group :type user_name: string :param user_name: The to be added to the group. """ params = {'GroupName': group_name, 'UserName': user_name} return self.get_response('AddUserToGroup', params) def remove_user_from_group(self, group_name, user_name): """ Remove a user from a group. :type group_name: string :param group_name: The name of the group :type user_name: string :param user_name: The user to remove from the group. """ params = {'GroupName': group_name, 'UserName': user_name} return self.get_response('RemoveUserFromGroup', params) def put_group_policy(self, group_name, policy_name, policy_json): """ Adds or updates the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. :type policy_json: string :param policy_json: The policy document. """ params = {'GroupName': group_name, 'PolicyName': policy_name, 'PolicyDocument': policy_json} return self.get_response('PutGroupPolicy', params, verb='POST') def get_all_group_policies(self, group_name, marker=None, max_items=None): """ List the names of the policies associated with the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'GroupName': group_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListGroupPolicies', params, list_marker='PolicyNames') def get_group_policy(self, group_name, policy_name): """ Retrieves the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. """ params = {'GroupName': group_name, 'PolicyName': policy_name} return self.get_response('GetGroupPolicy', params, verb='POST') def delete_group_policy(self, group_name, policy_name): """ Deletes the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete. """ params = {'GroupName': group_name, 'PolicyName': policy_name} return self.get_response('DeleteGroupPolicy', params, verb='POST') def get_all_users(self, path_prefix='/', marker=None, max_items=None): """ List the users that have the specified path prefix. :type path_prefix: string :param path_prefix: If provided, only users whose paths match the provided prefix will be returned. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'PathPrefix': path_prefix} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListUsers', params, list_marker='Users') # # User methods # def create_user(self, user_name, path='/'): """ Create a user. :type user_name: string :param user_name: The name of the new user :type path: string :param path: The path in which the user will be created. Defaults to /. """ params = {'UserName': user_name, 'Path': path} return self.get_response('CreateUser', params) def delete_user(self, user_name): """ Delete a user including the user's path, GUID and ARN. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The name of the user to delete. """ params = {'UserName': user_name} return self.get_response('DeleteUser', params) def get_user(self, user_name=None): """ Retrieve information about the specified user. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The name of the user to retrieve. If not specified, defaults to user making request. """ params = {} if user_name: params['UserName'] = user_name return self.get_response('GetUser', params) def update_user(self, user_name, new_user_name=None, new_path=None): """ Updates name and/or path of the specified user. :type user_name: string :param user_name: The name of the user :type new_user_name: string :param new_user_name: If provided, the username of the user will be changed to this username. :type new_path: string :param new_path: If provided, the path of the user will be changed to this path. """ params = {'UserName': user_name} if new_user_name: params['NewUserName'] = new_user_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateUser', params) def get_all_user_policies(self, user_name, marker=None, max_items=None): """ List the names of the policies associated with the specified user. :type user_name: string :param user_name: The name of the user the policy is associated with. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'UserName': user_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListUserPolicies', params, list_marker='PolicyNames') def put_user_policy(self, user_name, policy_name, policy_json): """ Adds or updates the specified policy document for the specified user. :type user_name: string :param user_name: The name of the user the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. :type policy_json: string :param policy_json: The policy document. """ params = {'UserName': user_name, 'PolicyName': policy_name, 'PolicyDocument': policy_json} return self.get_response('PutUserPolicy', params, verb='POST') def get_user_policy(self, user_name, policy_name): """ Retrieves the specified policy document for the specified user. :type user_name: string :param user_name: The name of the user the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. """ params = {'UserName': user_name, 'PolicyName': policy_name} return self.get_response('GetUserPolicy', params, verb='POST') def delete_user_policy(self, user_name, policy_name): """ Deletes the specified policy document for the specified user. :type user_name: string :param user_name: The name of the user the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete. """ params = {'UserName': user_name, 'PolicyName': policy_name} return self.get_response('DeleteUserPolicy', params, verb='POST') def get_groups_for_user(self, user_name, marker=None, max_items=None): """ List the groups that a specified user belongs to. :type user_name: string :param user_name: The name of the user to list groups for. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'UserName': user_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListGroupsForUser', params, list_marker='Groups') # # Access Keys # def get_all_access_keys(self, user_name, marker=None, max_items=None): """ Get all access keys associated with an account. :type user_name: string :param user_name: The username of the user :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'UserName': user_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListAccessKeys', params, list_marker='AccessKeyMetadata') def create_access_key(self, user_name=None): """ Create a new AWS Secret Access Key and corresponding AWS Access Key ID for the specified user. The default status for new keys is Active If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The username of the user """ params = {'UserName': user_name} return self.get_response('CreateAccessKey', params) def update_access_key(self, access_key_id, status, user_name=None): """ Changes the status of the specified access key from Active to Inactive or vice versa. This action can be used to disable a user's key as part of a key rotation workflow. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type access_key_id: string :param access_key_id: The ID of the access key. :type status: string :param status: Either Active or Inactive. :type user_name: string :param user_name: The username of user (optional). """ params = {'AccessKeyId': access_key_id, 'Status': status} if user_name: params['UserName'] = user_name return self.get_response('UpdateAccessKey', params) def delete_access_key(self, access_key_id, user_name=None): """ Delete an access key associated with a user. If the user_name is not specified, it is determined implicitly based on the AWS Access Key ID used to sign the request. :type access_key_id: string :param access_key_id: The ID of the access key to be deleted. :type user_name: string :param user_name: The username of the user """ params = {'AccessKeyId': access_key_id} if user_name: params['UserName'] = user_name return self.get_response('DeleteAccessKey', params) # # Signing Certificates # def get_all_signing_certs(self, marker=None, max_items=None, user_name=None): """ Get all signing certificates associated with an account. If the user_name is not specified, it is determined implicitly based on the AWS Access Key ID used to sign the request. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. :type user_name: string :param user_name: The username of the user """ params = {} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items if user_name: params['UserName'] = user_name return self.get_response('ListSigningCertificates', params, list_marker='Certificates') def update_signing_cert(self, cert_id, status, user_name=None): """ Change the status of the specified signing certificate from Active to Inactive or vice versa. If the user_name is not specified, it is determined implicitly based on the AWS Access Key ID used to sign the request. :type cert_id: string :param cert_id: The ID of the signing certificate :type status: string :param status: Either Active or Inactive. :type user_name: string :param user_name: The username of the user """ params = {'CertificateId': cert_id, 'Status': status} if user_name: params['UserName'] = user_name return self.get_response('UpdateSigningCertificate', params) def upload_signing_cert(self, cert_body, user_name=None): """ Uploads an X.509 signing certificate and associates it with the specified user. If the user_name is not specified, it is determined implicitly based on the AWS Access Key ID used to sign the request. :type cert_body: string :param cert_body: The body of the signing certificate. :type user_name: string :param user_name: The username of the user """ params = {'CertificateBody': cert_body} if user_name: params['UserName'] = user_name return self.get_response('UploadSigningCertificate', params, verb='POST') def delete_signing_cert(self, cert_id, user_name=None): """ Delete a signing certificate associated with a user. If the user_name is not specified, it is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The username of the user :type cert_id: string :param cert_id: The ID of the certificate. """ params = {'CertificateId': cert_id} if user_name: params['UserName'] = user_name return self.get_response('DeleteSigningCertificate', params) # # Server Certificates # def list_server_certs(self, path_prefix='/', marker=None, max_items=None): """ Lists the server certificates that have the specified path prefix. If none exist, the action returns an empty list. :type path_prefix: string :param path_prefix: If provided, only certificates whose paths match the provided prefix will be returned. :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {} if path_prefix: params['PathPrefix'] = path_prefix if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListServerCertificates', params, list_marker='ServerCertificateMetadataList') # Preserves backwards compatibility. # TODO: Look into deprecating this eventually? get_all_server_certs = list_server_certs def update_server_cert(self, cert_name, new_cert_name=None, new_path=None): """ Updates the name and/or the path of the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate that you want to update. :type new_cert_name: string :param new_cert_name: The new name for the server certificate. Include this only if you are updating the server certificate's name. :type new_path: string :param new_path: If provided, the path of the certificate will be changed to this path. """ params = {'ServerCertificateName': cert_name} if new_cert_name: params['NewServerCertificateName'] = new_cert_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateServerCertificate', params) def upload_server_cert(self, cert_name, cert_body, private_key, cert_chain=None, path=None): """ Uploads a server certificate entity for the AWS Account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded. :type cert_name: string :param cert_name: The name for the server certificate. Do not include the path in this value. :type cert_body: string :param cert_body: The contents of the public key certificate in PEM-encoded format. :type private_key: string :param private_key: The contents of the private key in PEM-encoded format. :type cert_chain: string :param cert_chain: The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain. :type path: string :param path: The path for the server certificate. """ params = {'ServerCertificateName': cert_name, 'CertificateBody': cert_body, 'PrivateKey': private_key} if cert_chain: params['CertificateChain'] = cert_chain if path: params['Path'] = path return self.get_response('UploadServerCertificate', params, verb='POST') def get_server_certificate(self, cert_name): """ Retrieves information about the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate you want to retrieve information about. """ params = {'ServerCertificateName': cert_name} return self.get_response('GetServerCertificate', params) def delete_server_cert(self, cert_name): """ Delete the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate you want to delete. """ params = {'ServerCertificateName': cert_name} return self.get_response('DeleteServerCertificate', params) # # MFA Devices # def get_all_mfa_devices(self, user_name, marker=None, max_items=None): """ Get all MFA devices associated with an account. :type user_name: string :param user_name: The username of the user :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response. """ params = {'UserName': user_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListMFADevices', params, list_marker='MFADevices') def enable_mfa_device(self, user_name, serial_number, auth_code_1, auth_code_2): """ Enables the specified MFA device and associates it with the specified user. :type user_name: string :param user_name: The username of the user :type serial_number: string :param serial_number: The serial number which uniquely identifies the MFA device. :type auth_code_1: string :param auth_code_1: An authentication code emitted by the device. :type auth_code_2: string :param auth_code_2: A subsequent authentication code emitted by the device. """ params = {'UserName': user_name, 'SerialNumber': serial_number, 'AuthenticationCode1': auth_code_1, 'AuthenticationCode2': auth_code_2} return self.get_response('EnableMFADevice', params) def deactivate_mfa_device(self, user_name, serial_number): """ Deactivates the specified MFA device and removes it from association with the user. :type user_name: string :param user_name: The username of the user :type serial_number: string :param serial_number: The serial number which uniquely identifies the MFA device. """ params = {'UserName': user_name, 'SerialNumber': serial_number} return self.get_response('DeactivateMFADevice', params) def resync_mfa_device(self, user_name, serial_number, auth_code_1, auth_code_2): """ Syncronizes the specified MFA device with the AWS servers. :type user_name: string :param user_name: The username of the user :type serial_number: string :param serial_number: The serial number which uniquely identifies the MFA device. :type auth_code_1: string :param auth_code_1: An authentication code emitted by the device. :type auth_code_2: string :param auth_code_2: A subsequent authentication code emitted by the device. """ params = {'UserName': user_name, 'SerialNumber': serial_number, 'AuthenticationCode1': auth_code_1, 'AuthenticationCode2': auth_code_2} return self.get_response('ResyncMFADevice', params) # # Login Profiles # def get_login_profiles(self, user_name): """ Retrieves the login profile for the specified user. :type user_name: string :param user_name: The username of the user """ params = {'UserName': user_name} return self.get_response('GetLoginProfile', params) def create_login_profile(self, user_name, password): """ Creates a login profile for the specified user, give the user the ability to access AWS services and the AWS Management Console. :type user_name: string :param user_name: The name of the user :type password: string :param password: The new password for the user """ params = {'UserName': user_name, 'Password': password} return self.get_response('CreateLoginProfile', params) def delete_login_profile(self, user_name): """ Deletes the login profile associated with the specified user. :type user_name: string :param user_name: The name of the user to delete. """ params = {'UserName': user_name} return self.get_response('DeleteLoginProfile', params) def update_login_profile(self, user_name, password): """ Resets the password associated with the user's login profile. :type user_name: string :param user_name: The name of the user :type password: string :param password: The new password for the user """ params = {'UserName': user_name, 'Password': password} return self.get_response('UpdateLoginProfile', params) def create_account_alias(self, alias): """ Creates a new alias for the AWS account. For more information on account id aliases, please see http://goo.gl/ToB7G :type alias: string :param alias: The alias to attach to the account. """ params = {'AccountAlias': alias} return self.get_response('CreateAccountAlias', params) def delete_account_alias(self, alias): """ Deletes an alias for the AWS account. For more information on account id aliases, please see http://goo.gl/ToB7G :type alias: string :param alias: The alias to remove from the account. """ params = {'AccountAlias': alias} return self.get_response('DeleteAccountAlias', params) def get_account_alias(self): """ Get the alias for the current account. This is referred to in the docs as list_account_aliases, but it seems you can only have one account alias currently. For more information on account id aliases, please see http://goo.gl/ToB7G """ return self.get_response('ListAccountAliases', {}, list_marker='AccountAliases') def get_signin_url(self, service='ec2'): """ Get the URL where IAM users can use their login profile to sign in to this account's console. :type service: string :param service: Default service to go to in the console. """ alias = self.get_account_alias() if not alias: raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') if self.host == 'iam.us-gov.amazonaws.com': return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (alias, service) else: return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service) def get_account_summary(self): """ Get the alias for the current account. This is referred to in the docs as list_account_aliases, but it seems you can only have one account alias currently. For more information on account id aliases, please see http://goo.gl/ToB7G """ return self.get_object('GetAccountSummary', {}, SummaryMap) # # IAM Roles # def add_role_to_instance_profile(self, instance_profile_name, role_name): """ Adds the specified role to the specified instance profile. :type instance_profile_name: string :param instance_profile_name: Name of the instance profile to update. :type role_name: string :param role_name: Name of the role to add. """ return self.get_response('AddRoleToInstanceProfile', {'InstanceProfileName': instance_profile_name, 'RoleName': role_name}) def create_instance_profile(self, instance_profile_name, path=None): """ Creates a new instance profile. :type instance_profile_name: string :param instance_profile_name: Name of the instance profile to create. :type path: string :param path: The path to the instance profile. """ params = {'InstanceProfileName': instance_profile_name} if path is not None: params['Path'] = path return self.get_response('CreateInstanceProfile', params) def create_role(self, role_name, assume_role_policy_document=None, path=None): """ Creates a new role for your AWS account. The policy grants permission to an EC2 instance to assume the role. The policy is URL-encoded according to RFC 3986. Currently, only EC2 instances can assume roles. :type role_name: string :param role_name: Name of the role to create. :type assume_role_policy_document: string :param assume_role_policy_document: The policy that grants an entity permission to assume the role. :type path: string :param path: The path to the instance profile. """ params = {'RoleName': role_name} if assume_role_policy_document is None: # This is the only valid assume_role_policy_document currently, so # this is used as a default value if no assume_role_policy_document # is provided. params['AssumeRolePolicyDocument'] = ASSUME_ROLE_POLICY_DOCUMENT else: params['AssumeRolePolicyDocument'] = assume_role_policy_document if path is not None: params['Path'] = path return self.get_response('CreateRole', params) def delete_instance_profile(self, instance_profile_name): """ Deletes the specified instance profile. The instance profile must not have an associated role. :type instance_profile_name: string :param instance_profile_name: Name of the instance profile to delete. """ return self.get_response( 'DeleteInstanceProfile', {'InstanceProfileName': instance_profile_name}) def delete_role(self, role_name): """ Deletes the specified role. The role must not have any policies attached. :type role_name: string :param role_name: Name of the role to delete. """ return self.get_response('DeleteRole', {'RoleName': role_name}) def delete_role_policy(self, role_name, policy_name): """ Deletes the specified policy associated with the specified role. :type role_name: string :param role_name: Name of the role associated with the policy. :type policy_name: string :param policy_name: Name of the policy to delete. """ return self.get_response( 'DeleteRolePolicy', {'RoleName': role_name, 'PolicyName': policy_name}) def get_instance_profile(self, instance_profile_name): """ Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. :type instance_profile_name: string :param instance_profile_name: Name of the instance profile to get information about. """ return self.get_response('GetInstanceProfile', {'InstanceProfileName': instance_profile_name}) def get_role(self, role_name): """ Retrieves information about the specified role, including the role's path, GUID, ARN, and the policy granting permission to EC2 to assume the role. :type role_name: string :param role_name: Name of the role associated with the policy. """ return self.get_response('GetRole', {'RoleName': role_name}) def get_role_policy(self, role_name, policy_name): """ Retrieves the specified policy document for the specified role. :type role_name: string :param role_name: Name of the role associated with the policy. :type policy_name: string :param policy_name: Name of the policy to get. """ return self.get_response('GetRolePolicy', {'RoleName': role_name, 'PolicyName': policy_name}) def list_instance_profiles(self, path_prefix=None, marker=None, max_items=None): """ Lists the instance profiles that have the specified path prefix. If there are none, the action returns an empty list. :type path_prefix: string :param path_prefix: The path prefix for filtering the results. For example: /application_abc/component_xyz/, which would get all instance profiles whose path starts with /application_abc/component_xyz/. :type marker: string :param marker: Use this parameter only when paginating results, and only in a subsequent request after you've received a response where the results are truncated. Set it to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this parameter only when paginating results to indicate the maximum number of user names you want in the response. """ params = {} if path_prefix is not None: params['PathPrefix'] = path_prefix if marker is not None: params['Marker'] = marker if max_items is not None: params['MaxItems'] = max_items return self.get_response('ListInstanceProfiles', params, list_marker='InstanceProfiles') def list_instance_profiles_for_role(self, role_name, marker=None, max_items=None): """ Lists the instance profiles that have the specified associated role. If there are none, the action returns an empty list. :type role_name: string :param role_name: The name of the role to list instance profiles for. :type marker: string :param marker: Use this parameter only when paginating results, and only in a subsequent request after you've received a response where the results are truncated. Set it to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this parameter only when paginating results to indicate the maximum number of user names you want in the response. """ params = {'RoleName': role_name} if marker is not None: params['Marker'] = marker if max_items is not None: params['MaxItems'] = max_items return self.get_response('ListInstanceProfilesForRole', params, list_marker='InstanceProfiles') def list_role_policies(self, role_name, marker=None, max_items=None): """ Lists the names of the policies associated with the specified role. If there are none, the action returns an empty list. :type role_name: string :param role_name: The name of the role to list policies for. :type marker: string :param marker: Use this parameter only when paginating results, and only in a subsequent request after you've received a response where the results are truncated. Set it to the value of the marker element in the response you just received. :type max_items: int :param max_items: Use this parameter only when paginating results to indicate the maximum number of user names you want in the response. """ params = {'RoleName': role_name} if marker is not None: params['Marker'] = marker if max_items is not None: params['MaxItems'] = max_items return self.get_response('ListRolePolicies', params, list_marker='PolicyNames') def list_roles(self, path_prefix=None, marker=None, max_items=None): """ Lists the roles that have the specified path prefix. If there are none, the action returns an empty list. :type path_prefix: string :param path_prefix: The path prefix for filtering the results. :type marker: string :param marker: Use this parameter only when paginating results, and only in a subsequent request after you've received a response where the results are truncated. Set it to the value of the marker element in the response you just received. :type max_items: int :param max_items: Use this parameter only when paginating results to indicate the maximum number of user names you want in the response. """ params = {} if path_prefix is not None: params['PathPrefix'] = path_prefix if marker is not None: params['Marker'] = marker if max_items is not None: params['MaxItems'] = max_items return self.get_response('ListRoles', params, list_marker='Roles') def put_role_policy(self, role_name, policy_name, policy_document): """ Adds (or updates) a policy document associated with the specified role. :type role_name: string :param role_name: Name of the role to associate the policy with. :type policy_name: string :param policy_name: Name of the policy document. :type policy_document: string :param policy_document: The policy document. """ return self.get_response('PutRolePolicy', {'RoleName': role_name, 'PolicyName': policy_name, 'PolicyDocument': policy_document}) def remove_role_from_instance_profile(self, instance_profile_name, role_name): """ Removes the specified role from the specified instance profile. :type instance_profile_name: string :param instance_profile_name: Name of the instance profile to update. :type role_name: string :param role_name: Name of the role to remove. """ return self.get_response('RemoveRoleFromInstanceProfile', {'InstanceProfileName': instance_profile_name, 'RoleName': role_name}) def update_assume_role_policy(self, role_name, policy_document): """ Updates the policy that grants an entity permission to assume a role. Currently, only an Amazon EC2 instance can assume a role. :type role_name: string :param role_name: Name of the role to update. :type policy_document: string :param policy_document: The policy that grants an entity permission to assume the role. """ return self.get_response('UpdateAssumeRolePolicy', {'RoleName': role_name, 'PolicyDocument': policy_document}) def create_saml_provider(self, saml_metadata_document, name): """ Creates an IAM entity to describe an identity provider (IdP) that supports SAML 2.0. The SAML provider that you create with this operation can be used as a principal in a role's trust policy to establish a trust relationship between AWS and a SAML identity provider. You can create an IAM role that supports Web-based single sign-on (SSO) to the AWS Management Console or one that supports API access to AWS. When you create the SAML provider, you upload an a SAML metadata document that you get from your IdP and that includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP. This operation requires `Signature Version 4`_. For more information, see `Giving Console Access Using SAML`_ and `Creating Temporary Security Credentials for SAML Federation`_ in the Using Temporary Credentials guide. :type saml_metadata_document: string :param saml_metadata_document: An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP. For more information, see `Creating Temporary Security Credentials for SAML Federation`_ in the Using Temporary Security Credentials guide. :type name: string :param name: The name of the provider to create. """ params = { 'SAMLMetadataDocument': saml_metadata_document, 'Name': name, } return self.get_response('CreateSAMLProvider', params) def list_saml_providers(self): """ Lists the SAML providers in the account. This operation requires `Signature Version 4`_. """ return self.get_response('ListSAMLProviders', {}) def get_saml_provider(self, saml_provider_arn): """ Returns the SAML provider metadocument that was uploaded when the provider was created or updated. This operation requires `Signature Version 4`_. :type saml_provider_arn: string :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML provider to get information about. """ params = {'SAMLProviderArn': saml_provider_arn } return self.get_response('GetSAMLProvider', params) def update_saml_provider(self, saml_provider_arn, saml_metadata_document): """ Updates the metadata document for an existing SAML provider. This operation requires `Signature Version 4`_. :type saml_provider_arn: string :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML provider to update. :type saml_metadata_document: string :param saml_metadata_document: An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP. """ params = { 'SAMLMetadataDocument': saml_metadata_document, 'SAMLProviderArn': saml_provider_arn, } return self.get_response('UpdateSAMLProvider', params) def delete_saml_provider(self, saml_provider_arn): """ Deletes a SAML provider. Deleting the provider does not update any roles that reference the SAML provider as a principal in their trust policies. Any attempt to assume a role that references a SAML provider that has been deleted will fail. This operation requires `Signature Version 4`_. :type saml_provider_arn: string :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML provider to delete. """ params = {'SAMLProviderArn': saml_provider_arn } return self.get_response('DeleteSAMLProvider', params)
mit
kelseyoo14/Wander
venv_2_7/lib/python2.7/site-packages/setuptools/command/develop.py
450
6610
from distutils.util import convert_path from distutils import log from distutils.errors import DistutilsError, DistutilsOptionError import os import glob from pkg_resources import Distribution, PathMetadata, normalize_path from setuptools.command.easy_install import easy_install from setuptools.compat import PY3 import setuptools class develop(easy_install): """Set up package for development""" description = "install package in 'development mode'" user_options = easy_install.user_options + [ ("uninstall", "u", "Uninstall this source package"), ("egg-path=", None, "Set the path to be used in the .egg-link file"), ] boolean_options = easy_install.boolean_options + ['uninstall'] command_consumes_arguments = False # override base def run(self): if self.uninstall: self.multi_version = True self.uninstall_link() else: self.install_for_development() self.warn_deprecated_options() def initialize_options(self): self.uninstall = None self.egg_path = None easy_install.initialize_options(self) self.setup_path = None self.always_copy_from = '.' # always copy eggs installed in curdir def finalize_options(self): ei = self.get_finalized_command("egg_info") if ei.broken_egg_info: template = "Please rename %r to %r before using 'develop'" args = ei.egg_info, ei.broken_egg_info raise DistutilsError(template % args) self.args = [ei.egg_name] easy_install.finalize_options(self) self.expand_basedirs() self.expand_dirs() # pick up setup-dir .egg files only: no .egg-info self.package_index.scan(glob.glob('*.egg')) self.egg_link = os.path.join(self.install_dir, ei.egg_name + '.egg-link') self.egg_base = ei.egg_base if self.egg_path is None: self.egg_path = os.path.abspath(ei.egg_base) target = normalize_path(self.egg_base) egg_path = normalize_path(os.path.join(self.install_dir, self.egg_path)) if egg_path != target: raise DistutilsOptionError( "--egg-path must be a relative path from the install" " directory to " + target ) # Make a distribution for the package's source self.dist = Distribution( target, PathMetadata(target, os.path.abspath(ei.egg_info)), project_name=ei.egg_name ) p = self.egg_base.replace(os.sep, '/') if p != os.curdir: p = '../' * (p.count('/') + 1) self.setup_path = p p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) if p != normalize_path(os.curdir): raise DistutilsOptionError( "Can't get a consistent path to setup script from" " installation directory", p, normalize_path(os.curdir)) def install_for_development(self): if PY3 and getattr(self.distribution, 'use_2to3', False): # If we run 2to3 we can not do this inplace: # Ensure metadata is up-to-date self.reinitialize_command('build_py', inplace=0) self.run_command('build_py') bpy_cmd = self.get_finalized_command("build_py") build_path = normalize_path(bpy_cmd.build_lib) # Build extensions self.reinitialize_command('egg_info', egg_base=build_path) self.run_command('egg_info') self.reinitialize_command('build_ext', inplace=0) self.run_command('build_ext') # Fixup egg-link and easy-install.pth ei_cmd = self.get_finalized_command("egg_info") self.egg_path = build_path self.dist.location = build_path # XXX self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) else: # Without 2to3 inplace works fine: self.run_command('egg_info') # Build extensions in-place self.reinitialize_command('build_ext', inplace=1) self.run_command('build_ext') self.install_site_py() # ensure that target dir is site-safe if setuptools.bootstrap_install_from: self.easy_install(setuptools.bootstrap_install_from) setuptools.bootstrap_install_from = None # create an .egg-link in the installation dir, pointing to our egg log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) if not self.dry_run: f = open(self.egg_link, "w") f.write(self.egg_path + "\n" + self.setup_path) f.close() # postprocess the installed distro, fixing up .pth, installing scripts, # and handling requirements self.process_distribution(None, self.dist, not self.no_deps) def uninstall_link(self): if os.path.exists(self.egg_link): log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) egg_link_file = open(self.egg_link) contents = [line.rstrip() for line in egg_link_file] egg_link_file.close() if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): log.warn("Link points to %s: uninstall aborted", contents) return if not self.dry_run: os.unlink(self.egg_link) if not self.dry_run: self.update_pth(self.dist) # remove any .pth link to us if self.distribution.scripts: # XXX should also check for entry point scripts! log.warn("Note: you must uninstall or replace scripts manually!") def install_egg_scripts(self, dist): if dist is not self.dist: # Installing a dependency, so fall back to normal behavior return easy_install.install_egg_scripts(self, dist) # create wrapper scripts in the script dir, pointing to dist.scripts # new-style... self.install_wrapper_scripts(dist) # ...and old-style for script_name in self.distribution.scripts or []: script_path = os.path.abspath(convert_path(script_name)) script_name = os.path.basename(script_path) f = open(script_path, 'rU') script_text = f.read() f.close() self.install_script(dist, script_name, script_text, script_path)
artistic-2.0
shakamunyi/sahara
sahara/service/api/v2/job_binaries.py
1
2190
# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context from sahara.service.edp.job_binaries import manager as jb_manager conductor = c.API def create_job_binary(values): return conductor.job_binary_create(context.ctx(), values) def get_job_binaries(**kwargs): return conductor.job_binary_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_binary(id): return conductor.job_binary_get(context.ctx(), id) def update_job_binary(id, values): return conductor.job_binary_update(context.ctx(), id, values) def delete_job_binary(id): conductor.job_binary_destroy(context.ctx(), id) def create_job_binary_internal(values): return conductor.job_binary_internal_create(context.ctx(), values) def get_job_binary_internals(**kwargs): return conductor.job_binary_internal_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_binary_internal(id): return conductor.job_binary_internal_get(context.ctx(), id) def delete_job_binary_internal(id): conductor.job_binary_internal_destroy(context.ctx(), id) def get_job_binary_internal_data(id): return conductor.job_binary_internal_get_raw_data(context.ctx(), id) def update_job_binary_internal(id, values): return conductor.job_binary_internal_update(context.ctx(), id, values) def get_job_binary_data(id): job_binary = conductor.job_binary_get(context.ctx(), id) return jb_manager.JOB_BINARIES.get_job_binary(job_binary.type). \ get_raw_data(job_binary, with_context=True)
apache-2.0
camptocamp/mapproxy
mapproxy/seed/script.py
6
15430
# This file is part of the MapProxy project. # Copyright (C) 2010 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import errno import os import re import signal import sys import time import logging from logging.config import fileConfig from subprocess import Popen from optparse import OptionParser, OptionValueError from mapproxy.config.loader import load_configuration, ConfigurationError from mapproxy.seed.config import load_seed_tasks_conf from mapproxy.seed.seeder import seed, SeedInterrupted from mapproxy.seed.cleanup import cleanup from mapproxy.seed.util import (format_seed_task, format_cleanup_task, ProgressLog, ProgressStore) from mapproxy.seed.cachelock import CacheLocker from mapproxy.compat import raw_input SECONDS_PER_DAY = 60 * 60 * 24 SECONDS_PER_MINUTE = 60 def setup_logging(logging_conf=None): if logging_conf is not None: fileConfig(logging_conf, {'here': './'}) mapproxy_log = logging.getLogger('mapproxy') mapproxy_log.setLevel(logging.WARN) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "[%(asctime)s] %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) mapproxy_log.addHandler(ch) def check_duration(option, opt, value, parser): try: setattr(parser.values, option.dest, parse_duration(value)) except ValueError: raise OptionValueError( "option %s: invalid duration value: %r, expected (10s, 15m, 0.5h, 3d, etc)" % (opt, value), ) def parse_duration(string): match = re.match(r'^(\d*.?\d+)(s|m|h|d)', string) if not match: raise ValueError('invalid duration, not in format: 10s, 0.5h, etc.') duration = float(match.group(1)) unit = match.group(2) if unit == 's': return duration duration *= 60 if unit == 'm': return duration duration *= 60 if unit == 'h': return duration duration *= 24 return duration class SeedScript(object): usage = "usage: %prog [options] seed_conf" parser = OptionParser(usage) parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0, help="reduce number of messages to stdout, repeat to disable progress output") parser.add_option("-s", "--seed-conf", dest="seed_file", default=None, help="seed configuration") parser.add_option("-f", "--proxy-conf", dest="conf_file", default=None, help="proxy configuration") parser.add_option("-c", "--concurrency", type="int", dest="concurrency", default=2, help="number of parallel seed processes") parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False, help="do not seed, just print output") parser.add_option("-l", "--skip-geoms-for-last-levels", type="int", dest="geom_levels", default=0, metavar="N", help="do not check for intersections between tiles" " and seed geometries on the last N levels") parser.add_option("--summary", action="store_true", dest="summary", default=False, help="print summary with all seeding tasks and exit." " does not seed anything.") parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False, help="print each task description and ask if it should be seeded") parser.add_option("--seed", action="append", dest="seed_names", metavar='task1,task2,...', help="seed only the named tasks. cleanup is disabled unless " "--cleanup is used. use ALL to select all tasks") parser.add_option("--cleanup", action="append", dest="cleanup_names", metavar='task1,task2,...', help="cleanup only the named tasks. seeding is disabled unless " "--seed is used. use ALL to select all tasks") parser.add_option("--use-cache-lock", action="store_true", default=False, help="use locking to prevent multiple mapproxy-seed calls " "to seed the same cache") parser.add_option("--continue", dest='continue_seed', action="store_true", default=False, help="continue an aborted seed progress") parser.add_option("--progress-file", dest='progress_file', default=None, help="filename for storing the seed progress (for --continue option)") parser.add_option("--duration", dest="duration", help="stop seeding after (120s, 15m, 4h, 0.5d, etc)", type=str, action="callback", callback=check_duration) parser.add_option("--reseed-file", dest="reseed_file", help="start of last re-seed", metavar="FILE", default=None) parser.add_option("--reseed-interval", dest="reseed_interval", help="only start seeding if --reseed-file is older then --reseed-interval", metavar="DURATION", type=str, action="callback", callback=check_duration, default=None) parser.add_option("--log-config", dest='logging_conf', default=None, help="logging configuration") def __call__(self): (options, args) = self.parser.parse_args() if len(args) != 1 and not options.seed_file: self.parser.print_help() sys.exit(1) if not options.seed_file: if len(args) != 1: self.parser.error('missing seed_conf file as last argument or --seed-conf option') else: options.seed_file = args[0] if not options.conf_file: self.parser.error('missing mapproxy configuration -f/--proxy-conf') setup_logging(options.logging_conf) if options.duration: # calls with --duration are handled in call_with_duration sys.exit(self.call_with_duration(options, args)) try: mapproxy_conf = load_configuration(options.conf_file, seed=True) except ConfigurationError as ex: print("ERROR: " + '\n\t'.join(str(ex).split('\n'))) sys.exit(2) if options.use_cache_lock: cache_locker = CacheLocker('.mapproxy_seed.lck') else: cache_locker = None if not sys.stdout.isatty() and options.quiet == 0: # disable verbose output for non-ttys options.quiet = 1 progress = None if options.continue_seed or options.progress_file: if not options.progress_file: options.progress_file = '.mapproxy_seed_progress' progress = ProgressStore(options.progress_file, continue_seed=options.continue_seed) if options.reseed_file: if not os.path.exists(options.reseed_file): # create --reseed-file if missing with open(options.reseed_file, 'w'): pass else: if progress and not os.path.exists(options.progress_file): # we have an existing --reseed-file but no --progress-file # meaning the last seed call was completed if options.reseed_interval and ( os.path.getmtime(options.reseed_file) > (time.time() - options.reseed_interval) ): print("no need for re-seeding") sys.exit(1) os.utime(options.reseed_file, (time.time(), time.time())) with mapproxy_conf: try: seed_conf = load_seed_tasks_conf(options.seed_file, mapproxy_conf) seed_names, cleanup_names = self.task_names(seed_conf, options) seed_tasks = seed_conf.seeds(seed_names) cleanup_tasks = seed_conf.cleanups(cleanup_names) except ConfigurationError as ex: print("error in configuration: " + '\n\t'.join(str(ex).split('\n'))) sys.exit(2) if options.summary: print('========== Seeding tasks ==========') for task in seed_tasks: print(format_seed_task(task)) print('========== Cleanup tasks ==========') for task in cleanup_tasks: print(format_cleanup_task(task)) return 0 try: if options.interactive: seed_tasks, cleanup_tasks = self.interactive(seed_tasks, cleanup_tasks) if seed_tasks: print('========== Seeding tasks ==========') print('Start seeding process (%d task%s)' % ( len(seed_tasks), 's' if len(seed_tasks) > 1 else '')) logger = ProgressLog(verbose=options.quiet==0, silent=options.quiet>=2, progress_store=progress) seed(seed_tasks, progress_logger=logger, dry_run=options.dry_run, concurrency=options.concurrency, cache_locker=cache_locker, skip_geoms_for_last_levels=options.geom_levels) if cleanup_tasks: print('========== Cleanup tasks ==========') print('Start cleanup process (%d task%s)' % ( len(cleanup_tasks), 's' if len(cleanup_tasks) > 1 else '')) logger = ProgressLog(verbose=options.quiet==0, silent=options.quiet>=2, progress_store=progress) cleanup(cleanup_tasks, verbose=options.quiet==0, dry_run=options.dry_run, concurrency=options.concurrency, progress_logger=logger, skip_geoms_for_last_levels=options.geom_levels) except SeedInterrupted: print('\ninterrupted...') return 3 except KeyboardInterrupt: print('\nexiting...') return 2 if progress: progress.remove() def task_names(self, seed_conf, options): seed_names = cleanup_names = [] if options.seed_names: seed_names = split_comma_seperated_option(options.seed_names) if seed_names == ['ALL']: seed_names = None else: avail_seed_names = seed_conf.seed_tasks_names() missing = set(seed_names).difference(avail_seed_names) if missing: print('unknown seed tasks: %s' % (', '.join(missing), )) print('available seed tasks: %s' % (', '.join(avail_seed_names), )) sys.exit(1) elif not options.cleanup_names: seed_names = None # seed all if options.cleanup_names: cleanup_names = split_comma_seperated_option(options.cleanup_names) if cleanup_names == ['ALL']: cleanup_names = None else: avail_cleanup_names = seed_conf.cleanup_tasks_names() missing = set(cleanup_names).difference(avail_cleanup_names) if missing: print('unknown cleanup tasks: %s' % (', '.join(missing), )) print('available cleanup tasks: %s' % (', '.join(avail_cleanup_names), )) sys.exit(1) elif not options.seed_names: cleanup_names = None # cleanup all return seed_names, cleanup_names def call_with_duration(self, options, args): # --duration is implemented by calling mapproxy-seed again in a separate # process (but without --duration) and terminating that process # after --duration argv = sys.argv[:] for i, arg in enumerate(sys.argv): if arg == '--duration': argv = sys.argv[:i] + sys.argv[i+2:] break elif arg.startswith('--duration='): argv = sys.argv[:i] + sys.argv[i+1:] break # call mapproxy-seed again, poll status, terminate after --duration cmd = Popen(args=argv) start = time.time() while True: if (time.time() - start) > options.duration: try: cmd.send_signal(signal.SIGINT) # try to stop with sigint # send sigterm after 10 seconds for _ in range(10): time.sleep(1) if cmd.poll() is not None: break else: cmd.terminate() except OSError as ex: if ex.errno != errno.ESRCH: # no such process raise return 0 if cmd.poll() is not None: return cmd.returncode try: time.sleep(1) except KeyboardInterrupt: # force termination start = 0 def interactive(self, seed_tasks, cleanup_tasks): selected_seed_tasks = [] print('========== Select seeding tasks ==========') for task in seed_tasks: print(format_seed_task(task)) if ask_yes_no_question(' Seed this task (y/n)? '): selected_seed_tasks.append(task) seed_tasks = selected_seed_tasks selected_cleanup_tasks = [] print('========== Select cleanup tasks ==========') for task in cleanup_tasks: print(format_cleanup_task(task)) if ask_yes_no_question(' Cleanup this task (y/n)? '): selected_cleanup_tasks.append(task) cleanup_tasks = selected_cleanup_tasks return seed_tasks, cleanup_tasks def main(): return SeedScript()() def ask_yes_no_question(question): while True: resp = raw_input(question).lower() if resp in ('y', 'yes'): return True elif resp in ('n', 'no'): return False def split_comma_seperated_option(option): """ >>> split_comma_seperated_option(['foo,bar', 'baz']) ['foo', 'bar', 'baz'] """ result = [] if option: for args in option: result.extend(args.split(',')) return result if __name__ == '__main__': main()
apache-2.0