repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
hujiajie/chromium-crosswalk | build/android/devil/utils/zip_utils.py | 69 | 1212 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import zipfile
def WriteToZipFile(zip_file, path, arc_path):
"""Recursively write |path| to |zip_file| as |arc_path|.
zip_file: An open instance of zipfile.ZipFile.
path: An absolute path to the file or directory to be zipped.
arc_path: A relative path within the zip file to which the file or directory
located at |path| should be written.
"""
if os.path.isdir(path):
for dir_path, _, file_names in os.walk(path):
dir_arc_path = os.path.join(arc_path, os.path.relpath(dir_path, path))
logging.debug('dir: %s -> %s', dir_path, dir_arc_path)
zip_file.write(dir_path, dir_arc_path, zipfile.ZIP_STORED)
for f in file_names:
file_path = os.path.join(dir_path, f)
file_arc_path = os.path.join(dir_arc_path, f)
logging.debug('file: %s -> %s', file_path, file_arc_path)
zip_file.write(file_path, file_arc_path, zipfile.ZIP_DEFLATED)
else:
logging.debug('file: %s -> %s', path, arc_path)
zip_file.write(path, arc_path, zipfile.ZIP_DEFLATED)
| bsd-3-clause |
dyninc/dns_lg | ldnsx.py | 1 | 36283 | # Copyright (c) 2011, Xelerance
# Author: Christopher Olah <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Xelerance nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Easy DNS (including DNSSEC) via ldns.
ldns is a great library. It is a powerfull tool for
working with DNS. python-ldns it is a straight up clone of the C
interface, howver that is not a very good interface for python. Its
documentation is incomplete and some functions don't work as
described. And some objects don't have a full python API.
ldnsx aims to fix this. It wraps around the ldns python bindings,
working around its limitations and providing a well-documented, more
pythonistic interface.
**WARNING:**
**API subject to change.** No backwards compatibility guarantee. Write software using this version at your own risk!
Examples
--------
Query the default resolver for google.com's A records. Print the response
packet.
>>> import ldnsx
>>> resolver = ldnsx.resolver()
>>> print resolver.query("google.com","A")
Print the root NS records from f.root-servers.net; if we get a
response, else an error message.
>>> import ldnsx
>>> pkt = ldnsx.resolver("f.root-servers.net").query(".", "NS")
>>> if pkt:
>>> for rr in pkt.answer():
>>> print rr
>>> else:
>>> print "response not received"
"""
import time, sys, calendar, warnings, socket
try:
import ldns
except ImportError:
print >> sys.stderr, "ldnsx requires the ldns-python sub-package from http://www.nlnetlabs.nl/projects/ldns/"
print >> sys.stderr, "Fedora/CentOS: yum install ldns-python"
print >> sys.stderr, "Debian/Ubuntu: apt-get install python-ldns"
print >> sys.stderr, "openSUSE: zypper in python-ldns"
sys.exit(1)
__version__ = "0.1"
def isValidIP(ipaddr):
try:
v4 = socket.inet_pton(socket.AF_INET,ipaddr)
return 4
except:
try:
v6 = socket.inet_pton(socket.AF_INET6,ipaddr)
return 6
except:
return 0
def query(name, rr_type, rr_class="IN", flags=["RD"], tries = 3, res=None):
"""Convenience function. Creates a resolver and then queries it. Refer to resolver.query()
* name -- domain to query for
* rr_type -- rr_type to query for
* flags -- flags for query (list of strings)
* tries -- number of times to retry the query on failure
* res -- configurations for the resolver as a dict -- see resolver()
"""
if isinstance(res, list) or isinstance(res, tuple):
res = resolver(*res)
elif isinstance(res, dict):
res = resolver(**res)
else:
res = resolver(res)
return res.query(name, rr_type, rr_class, flags, tries)
def get_rrs(name, rr_type, rr_class="IN", tries = 3, strict = False, res=None, **kwds):
"""Convenience function. Gets RRs for name of type rr_type trying tries times.
If strict, it raises and exception on failure, otherwise it returns [].
* name -- domain to query for
* rr_type -- rr_type to query for
* flags -- flags for query (list of strings)
* tries -- number of times to retry the query on failure
* strict -- if the query fails, do we return [] or raise an exception?
* res -- configurations for the resolver as a dict -- see resolver()
* kwds -- query filters, refer to packet.answer()
"""
if isinstance(res, list) or isinstance(res, tuple):
res = resolver(*res)
elif isinstance(res, dict):
res = resolver(**res)
else:
res = resolver(res)
if "|" in rr_type:
pkt = res.query(name, "ANY", rr_class=rr_class, tries=tries)
else:
pkt = res.query(name, rr_type, rr_class=rr_class, tries=tries)
if pkt:
if rr_type in ["", "ANY", "*"]:
return pkt.answer( **kwds)
else:
return pkt.answer(rr_type=rr_type, **kwds)
else:
if strict:
raise Exception("LDNS couldn't complete query")
else:
return []
def secure_query(name, rr_type, rr_class="IN", flags=["RD"], tries = 1, flex=False, res=None):
"""Convenience function. Creates a resolver and then does a DNSSEC query. Refer to resolver.query()
* name -- domain to query for
* rr_type -- rr_type to query for
* flags -- flags for query (list of strings)
* tries -- number of times to retry the query on failure
* flex -- if we can't verify data, exception or warning?
* res -- configurations for the resolver as a dict -- see resolver()"""
if isinstance(res, list) or isinstance(res, tuple):
res = resolver(*res)
elif isinstance(res, dict):
res = resolver(**res)
else:
res = resolver(res)
pkt = res.query(name, rr_type, rr_class, flags, tries)
if pkt.rcode() == "SERVFAIL":
raise Exception("%s lookup failed (server error or dnssec validation failed)" % name)
if pkt.rcode() == "NXDOMAIN":
if "AD" in pkt.flags():
raise Exception("%s lookup failed (non-existence proven by DNSSEC)" % hostname )
else:
raise Exception("%s lookup failed" % hostname )
if pkt.rcode() == "NOERROR":
if "AD" not in pkt.flags():
if not flex:
raise Exception("DNS lookup was insecure")
else:
warnings.warn("DNS lookup was insecure")
return pkt
else:
raise Exception("unknown ldns error, %s" % pkt.rcode())
class resolver:
""" A wrapper around ldns.ldns_resolver.
**Examples**
Making resolvers is easy!
>>> from ldnsx import resolver
>>> resolver() # from /etc/resolv.conf
<resolver: 192.168.111.9>
>>> resolver("") # resolver with no nameservers
<resolver: >
>>> resolver("193.110.157.135") #resolver pointing to ip addr
<resolver: 193.110.157.135>
>>> resolver("f.root-servers.net") # resolver pointing ip address(es) resolved from name
<resolver: 2001:500:2f::f, 192.5.5.241>
>>> resolver("193.110.157.135, 193.110.157.136")
>>> # resolver pointing to multiple ip addr, first takes precedence.
<resolver: 193.110.157.136, 193.110.157.135>
So is playing around with their nameservers!
>>> import ldnsx
>>> res = ldnsx.resolver("192.168.1.1")
>>> res.add_nameserver("192.168.1.2")
>>> res.add_nameserver("192.168.1.3")
>>> res.nameservers_ip()
["192.168.1.1","192.168.1.2","192.168.1.3"]
And querying!
>>> from ldnsx import resolver
>>> res= resolver()
>>> res.query("cow.com","A")
;; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 7663
;; flags: qr rd ra ; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;; cow.com. IN A
;; ANSWER SECTION:
cow.com. 300 IN A 208.87.34.18
;; AUTHORITY SECTION:
;; ADDITIONAL SECTION:
;; Query time: 313 msec
;; SERVER: 192.168.111.9
;; WHEN: Fri Jun 3 11:01:02 2011
;; MSG SIZE rcvd: 41
"""
def __init__(self, ns = None, dnssec = False, tcp = False, port = 53):
"""resolver constructor
* ns -- the nameserver/comma delimited nameserver list
defaults to settings from /etc/resolv.conf
* dnssec -- should the resolver try and use dnssec or not?
* tcp -- should the resolver use TCP
'auto' is a depricated work around for old ldns problems
* port -- the port to use, must be the same for all nameservers
"""
# We construct based on a file and dump the nameservers rather than using
# ldns_resolver_new() to avoid environment/configuration/magic specific
# bugs.
self._ldns_resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf")
if ns != None:
self.drop_nameservers()
nm_list = ns.split(',')
nm_list = map(lambda s: s.strip(), nm_list)
nm_list = filter(lambda s: s != "", nm_list)
nm_list.reverse()
for nm in nm_list:
self.add_nameserver(nm)
# Configure DNSSEC, tcp and port
self.set_dnssec(dnssec)
if tcp == 'auto':
self.autotcp = True
self._ldns_resolver.set_usevc(False)
else:
self.autotcp = False
self._ldns_resolver.set_usevc(tcp)
self._ldns_resolver.set_port(port)
def query(self, name, rr_type, rr_class="IN", flags=["RD"], tries = 3):
"""Run a query on the resolver.
* name -- name to query for
* rr_type -- the record type to query for
* rr_class -- the class to query for, defaults to IN (Internet)
* flags -- the flags to send the query with
* tries -- the number of times to attempt to acheive query in case of packet loss, etc
**Examples**
Let's get some A records!
>>> google_a_records = resolver.query("google.com","A").answer()
Using DNSSEC is easy :)
>>> dnssec_pkt = ldnsx.resolver(dnssec=True).query("xelerance.com")
We let you use strings to make things easy, but if you prefer stay close to DNS...
>>> AAAA = 28
>>> resolver.query("ipv6.google.com", AAAA)
**More about rr_type**
rr_type must be a supported resource record type. There are a large number of RR types:
=========== =================================== ==================
TYPE Value and meaning Reference
=========== =================================== ==================
A 1 a host address [RFC1035]
NS 2 an authoritative name server [RFC1035]
...
AAAA 28 IP6 Address [RFC3596]
...
DS 43 Delegation Signer [RFC4034][RFC3658]
...
DNSKEY 48 DNSKEY [RFC4034][RFC3755]
...
Unassigned 32770-65279
Private use 65280-65534
Reserved 65535
=========== =================================== ==================
(From http://www.iana.org/assignments/dns-parameters)
RR types are given as a string (eg. "A"). In the case of Unassigned/Private use/Reserved ones,
they are given as "TYPEXXXXX" where XXXXX is the number. ie. RR type 65280 is "TYPE65280". You
may also pass the integer, but you always be given the string.
If the version of ldnsx you are using is old, it is possible that there could be new rr_types that
we don't recognise mnemonic for. You can still use the number XXX or the string "TYPEXXX". To
determine what rr_type menmonics we support, please refer to resolver.supported_rr_types()
"""
# Determine rr_type int
if rr_type in _rr_types.keys():
_rr_type = _rr_types[rr_type]
elif isinstance(rr_type,int):
_rr_type = rr_type
elif isinstance(rr_type,str) and rr_type[0:4] == "TYPE":
try:
_rr_type = int(rr_type[4:])
except:
raise Exception("%s is a bad RR type. TYPEXXXX: XXXX must be a number")
else:
raise Exception("ldnsx (version %s) does not support the RR type %s." % (__version__, str(rr_type)) )
# Determine rr_class int
if rr_class == "IN": _rr_class = ldns.LDNS_RR_CLASS_IN
elif rr_class == "CH": _rr_class = ldns.LDNS_RR_CLASS_CH
elif rr_class == "HS": _rr_class = ldns.LDNS_RR_CLASS_HS
else:
raise Exception("ldnsx (version %s) does not support the RR class %s." % (__version__, str(rr_class)) )
# Determine flags int
_flags = 0
if "QR" in flags: _flags |= ldns.LDNS_QR
if "AA" in flags: _flags |= ldns.LDNS_AA
if "TC" in flags: _flags |= ldns.LDNS_TC
if "RD" in flags: _flags |= ldns.LDNS_RD
if "CD" in flags: _flags |= ldns.LDNS_CD
if "RA" in flags: _flags |= ldns.LDNS_RA
if "AD" in flags: _flags |= ldns.LDNS_AD
# Query
if tries == 0: return None
try:
pkt = self._ldns_resolver.query(name, _rr_type, _rr_class, _flags)
except KeyboardInterrupt: #Since so much time is spent waiting on ldns, this is very common place for Ctr-C to fall
raise
except: #Since the ldns exceptiion is not very descriptive...
raise Exception("ldns backend ran into problems. Likely, the name you were querying for, %s, was invalid." % name)
#Deal with failed queries
if not pkt:
if tries <= 1:
return None
else:
# One of the major causes of none-packets is truncation of packets
# When autotcp is set, we are in a flexible enough position to try and use tcp
# to get around this.
# Either way, we want to replace the resolver, since resolvers will sometimes
# just freeze up.
if self.autotcp:
self = resolver( ",".join(self.nameservers_ip()),tcp=True, dnssec = self._ldns_resolver.dnssec())
self.autotcp = True
pkt = self.query(name, rr_type, rr_class=rr_class, flags=flags, tries = tries-1)
self._ldns_resolver.set_usevc(False)
return pkt
else:
self = resolver( ",".join(self.nameservers_ip()), tcp = self._ldns_resolver.usevc(), dnssec = self._ldns_resolver.dnssec() )
time.sleep(1) # It could be that things are failing because of a brief outage
return self.query(name, rr_type, rr_class=rr_class, flags=flags, tries = tries-1)
elif self.autotcp:
pkt = packet(pkt)
if "TC" in pkt.flags():
self._ldns_resolver.set_usevc(True)
pkt2 = self.query(name, rr_type, rr_class=rr_class, flags=flags, tries = tries-1)
self._ldns_resolver.set_usevc(False)
if pkt2: return packet(pkt2)
return pkt
return packet(pkt)
#ret = []
#for rr in pkt.answer().rrs():
# ret.append([str(rr.owner()),rr.ttl(),rr.get_class_str(),rr.get_type_str()]+[str(rdf) for rdf in rr.rdfs()])
#return ret
def suported_rr_types(self):
""" Returns the supported DNS resource record types.
Refer to resolver.query() for thorough documentation of resource
record types or refer to:
http://www.iana.org/assignments/dns-parameters
"""
return _rr_types.keys()
def AXFR(self,name):
"""AXFR for name
* name -- name to AXFR for
This function is a generator. As it AXFRs it will yield you the records.
**Example**
Let's get a list of the tlds (gotta catch em all!):
>>> tlds = []
>>> for rr in resolver("f.root-servers.net").AXFR("."):
>>> if rr.rr_type() == "NS":
>>> tlds.append(rr.owner())
"""
#Dname seems to be unecessary on some computers, but it is on others. Avoid bugs.
if self._ldns_resolver.axfr_start(ldns.ldns_dname(name), ldns.LDNS_RR_CLASS_IN) != ldns.LDNS_STATUS_OK:
raise Exception("Starting AXFR failed. Error: %s" % ldns.ldns_get_errorstr_by_id(status))
pres = self._ldns_resolver.axfr_next()
while pres:
yield resource_record(pres)
pres = self._ldns_resolver.axfr_next()
def nameservers_ip(self):
""" returns a list of the resolvers nameservers (as IP addr)
"""
nm_stack2 =[]
nm_str_stack2=[]
nm = self._ldns_resolver.pop_nameserver()
while nm:
nm_stack2.append(nm)
nm_str_stack2.append(str(nm))
nm = self._ldns_resolver.pop_nameserver()
for nm in nm_stack2:
self._ldns_resolver.push_nameserver(nm)
nm_str_stack2.reverse()
return nm_str_stack2
def add_nameserver(self,ns):
""" Add a nameserver, IPv4/IPv6/name.
"""
if isValidIP(ns) == 4:
address = ldns.ldns_rdf_new_frm_str(ldns.LDNS_RDF_TYPE_A,ns)
self._ldns_resolver.push_nameserver(address)
elif isValidIP(ns) == 6:
address = ldns.ldns_rdf_new_frm_str(ldns.LDNS_RDF_TYPE_AAAA,ns)
self._ldns_resolver.push_nameserver(address)
else:
resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf")
#address = resolver.get_addr_by_name(ns)
address = resolver.get_addr_by_name(ldns.ldns_dname(ns))
if not address:
address = resolver.get_addr_by_name(ldns.ldns_dname(ns))
if not address:
raise Exception("Failed to resolve address for %s" % ns)
for rr in address.rrs():
self._ldns_resolver.push_nameserver_rr(rr)
def drop_nameservers(self):
"""Drops all nameservers.
This function causes the resolver to forget all nameservers.
"""
while self._ldns_resolver.pop_nameserver():
pass
def set_nameservers(self, nm_list):
"""Takes a list of nameservers and sets the resolver to use them
"""
self.drop_nameservers()
for nm in nm_list:
self.add_nameserver(nm)
def __repr__(self):
return "<resolver: %s>" % ", ".join(self.nameservers_ip())
__str__ = __repr__
def set_dnssec(self,new_dnssec_status):
"""Set whether the resolver uses DNSSEC.
"""
self._ldns_resolver.set_dnssec(new_dnssec_status)
class packet:
def _construct_rr_filter(self, **kwds):
def match(pattern, target):
if pattern[0] in ["<",">","!"]:
rel = pattern[0]
pattern=pattern[1:]
elif pattern[0:2] in ["<=","=>"]:
rel = pattern[0:2]
pattern=pattern[2:]
else:
rel = "="
for val in pattern.split("|"):
if {"<" : target < val,
">" : target > val,
"!" : target != val,
"=" : target == val,
">=": target >= val,
"<=": target <= val}[rel]:
return True
return False
def f(rr):
for key in kwds.keys():
if ( ( isinstance(kwds[key], list) and str(rr[key]) not in map(str,kwds[key]) )
or ( not isinstance(kwds[key], list) and not match(str(kwds[key]), str(rr[key])))):
return False
return True
return f
def __init__(self, pkt):
self._ldns_pkt = pkt
def __repr__(self):
return str(self._ldns_pkt)
__str__ = __repr__
def rcode(self):
"""Returns the rcode.
Example returned value: "NOERROR"
possilbe rcodes (via ldns): "FORMERR", "MASK", "NOERROR",
"NOTAUTH", "NOTIMPL", "NOTZONE", "NXDOMAIN",
"NXRSET", "REFUSED", "SERVFAIL", "SHIFT",
"YXDOMAIN", "YXRRSET"
Refer to http://www.iana.org/assignments/dns-parameters
section: DNS RCODEs
"""
return self._ldns_pkt.rcode2str()
def opcode(self):
"""Returns the rcode.
Example returned value: "QUERY"
"""
return self._ldns_pkt.opcode2str()
def query_time(self):
"""Returns query time (in ms)."""
return self._ldns_pkt.querytime()
def answer_from(self):
"""Returns the server that answered."""
return str(self._ldns_pkt.answerfrom())
def flags(self):
"""Return packet flags (as list of strings).
Example returned value: ['QR', 'RA', 'RD']
**What are the flags?**
======== ==== ===================== =========
Bit Flag Description Reference
======== ==== ===================== =========
bit 5 AA Authoritative Answer [RFC1035]
bit 6 TC Truncated Response [RFC1035]
bit 7 RD Recursion Desired [RFC1035]
bit 8 RA Recursion Allowed [RFC1035]
bit 9 Reserved
bit 10 AD Authentic Data [RFC4035]
bit 11 CD Checking Disabled [RFC4035]
======== ==== ===================== =========
(from http://www.iana.org/assignments/dns-parameters)
There is also QR. It is mentioned in other sources,
though not the above page. It being false means that
the packet is a query, it being true means that it is
a response.
"""
ret = []
if self._ldns_pkt.aa(): ret += ["AA"]
if self._ldns_pkt.ad(): ret += ["AD"]
if self._ldns_pkt.cd(): ret += ["CD"]
if self._ldns_pkt.qr(): ret += ["QR"]
if self._ldns_pkt.ra(): ret += ["RA"]
if self._ldns_pkt.rd(): ret += ["RD"]
if self._ldns_pkt.tc(): ret += ["TC"]
return ret
def answer(self, **filters):
"""Returns the answer section.
* filters -- a filtering mechanism
Since a very common desire is to filter the resource records in a packet
section, we provide a special tool for doing this: filters. They are a
lot like regular python filters, but more convenient. If you set a
field equal to some value, you will only receive resource records for which
it holds true.
**Examples**
>>> res = ldnsx.resolver()
>>> pkt = res.query("google.ca","A")
>>> pkt.answer()
[google.ca. 28 IN A 74.125.91.99
, google.ca. 28 IN A 74.125.91.105
, google.ca. 28 IN A 74.125.91.147
, google.ca. 28 IN A 74.125.91.103
, google.ca. 28 IN A 74.125.91.104
, google.ca. 28 IN A 74.125.91.106
]
To understand filters, consider the following:
>>> pkt = ldnsx.query("cow.com","ANY")
>>> pkt.answer()
[cow.com. 276 IN A 208.87.32.75
, cow.com. 3576 IN NS sell.internettraffic.com.
, cow.com. 3576 IN NS buy.internettraffic.com.
, cow.com. 3576 IN SOA buy.internettraffic.com. hostmaster.hostingnet.com. 1308785320 10800 3600 604800 3600
]
>>> pkt.answer(rr_type="A")
[cow.com. 276 IN A 208.87.32.75
]
>>> pkt.answer(rr_type="A|NS")
[cow.com. 276 IN A 208.87.32.75
, cow.com. 3576 IN NS sell.internettraffic.com.
, cow.com. 3576 IN NS buy.internettraffic.com.
]
>>> pkt.answer(rr_type="!NS")
[cow.com. 276 IN A 208.87.32.75
, cow.com. 3576 IN SOA buy.internettraffic.com. hostmaster.hostingnet.com. 1308785320 10800 3600 604800 3600
]
fields are the same as when indexing a resource record.
note: ordering is alphabetical.
"""
ret = [resource_record(rr) for rr in self._ldns_pkt.answer().rrs()]
return filter(self._construct_rr_filter(**filters), ret)
def authority(self, **filters):
"""Returns the authority section.
* filters -- a filtering mechanism
Since a very common desire is to filter the resource records in a packet
section, we provide a special tool for doing this: filters. They are a
lot like regular python filters, but more convenient. If you set a
field equal to some value, you will only receive resource records for which
it holds true. See answer() for details.
**Examples**
>>> res = ldnsx.resolver()
>>> pkt = res.query("google.ca","A")
>>> pkt.authority()
[google.ca. 251090 IN NS ns3.google.com.
, google.ca. 251090 IN NS ns1.google.com.
, google.ca. 251090 IN NS ns2.google.com.
, google.ca. 251090 IN NS ns4.google.com.
]
"""
ret = [resource_record(rr) for rr in self._ldns_pkt.authority().rrs()]
return filter(self._construct_rr_filter(**filters), ret)
def additional(self, **filters):
"""Returns the additional section.
* filters -- a filtering mechanism
Since a very common desire is to filter the resource records in a packet
section, we provide a special tool for doing this: filters. They are a
lot like regular python filters, but more convenient. If you set a
field equal to some value, you will only receive resource records for which
it holds true. See answer() for details.
**Examples**
>>> res = ldnsx.resolver()
>>> pkt = res.query("google.ca","A")
>>> pkt.additional()
[ns3.google.com. 268778 IN A 216.239.36.10
, ns1.google.com. 262925 IN A 216.239.32.10
, ns2.google.com. 255659 IN A 216.239.34.10
, ns4.google.com. 264489 IN A 216.239.38.10
]
"""
ret = [resource_record(rr) for rr in self._ldns_pkt.additional().rrs()]
return filter(self._construct_rr_filter(**filters), ret)
def question(self, **filters):
"""Returns the question section.
* filters -- a filtering mechanism
Since a very common desire is to filter the resource records in a packet
section, we provide a special tool for doing this: filters. They are a
lot like regular python filters, but more convenient. If you set a
field equal to some value, you will only receive resource records for which
it holds true. See answer() for details.
"""
ret = [resource_record(rr) for rr in self._ldns_pkt.question().rrs()]
return filter(self._construct_rr_filter(**filters), ret)
class resource_record:
_rdfs = None
_iter_pos = None
def __init__(self, rr):
self._ldns_rr = rr
self._rdfs = [str(rr.owner()),rr.ttl(),rr.get_class_str(),rr.get_type_str()]+[str(rdf) for rdf in rr.rdfs()]
def __repr__(self):
return str(self._ldns_rr)
__str__ = __repr__
def __iter__(self):
self._iter_pos = 0
return self
def next(self):
if self._iter_pos < len(self._rdfs):
self._iter_pos += 1
return self._rdfs[self._iter_pos-1]
else:
raise StopIteration
def __len__(self):
try:
return len(_rdfs)
except:
return 0
def __getitem__(self, n):
if isinstance(n, int):
return self._rdfs[n]
elif isinstance(n, str):
n = n.lower()
if n in ["owner"]:
return self.owner()
elif n in ["rr_type", "rr type", "type"]:
return self.rr_type()
elif n in ["rr_class", "rr class", "class"]:
return self.rr_class()
elif n in ["covered_type", "covered type", "type2"]:
return self.covered_type()
elif n in ["ttl"]:
return self.ttl()
elif n in ["ip"]:
return self.ip()
elif n in ["alg", "algorithm"]:
return self.alg()
elif n in ["protocol"]:
return self.protocol()
elif n in ["flags"]:
return self.flags()
else:
raise Exception("ldnsx (version %s) does not recognize the rr field %s" % (__version__,n) )
else:
raise TypeError("bad type %s for index resource record" % type(n) )
#def rdfs(self):
# return self._rdfs.clone()
def owner(self):
"""Get the RR's owner"""
return str(self._ldns_rr.owner())
def rr_type(self):
"""Get a RR's type """
return self._ldns_rr.get_type_str()
def covered_type(self):
"""Get an RRSIG RR's covered type"""
if self.rr_type() == "RRSIG":
return self[4]
else:
return ""
def rr_class(self):
"""Get the RR's collapse"""
return self._ldns_rr.get_class_str()
def ttl(self):
"""Get the RR's TTL"""
return self._ldns_rr.ttl()
def inception(self, out_format="UTC"):
"""returns the inception time in format out_format, defaulting to a UTC string.
options for out_format are:
UTC -- a UTC string eg. 20110712192610 (2011/07/12 19:26:10)
unix -- number of seconds since the epoch, Jan 1, 1970
struct_time -- the format used by python's time library
"""
# Something very strange is going on with inception/expiration dates in DNS.
# According to RFC 4034 section 3.1.5 (http://tools.ietf.org/html/rfc4034#page-9)
# the inception/expiration fields should be in seconds since Jan 1, 1970, the Unix
# epoch (as is standard in unix). Yet all the packets I've seen provide UTC encoded
# as a string instead, eg. "20110712192610" which is 2011/07/12 19:26:10.
#
# It turns out that this is a standard thing that ldns is doing before the data gets
# to us.
if self.rr_type() == "RRSIG":
if out_format.lower() in ["utc", "utc str", "utc_str"]:
return self[9]
elif out_format.lower() in ["unix", "posix", "ctime"]:
return calendar.timegm(time.strptime(self[9], "%Y%m%d%H%M%S"))
elif out_format.lower() in ["relative"]:
return calendar.timegm(time.strptime(self[9], "%Y%m%d%H%M%S")) - time.time()
elif out_format.lower() in ["struct_time", "time.struct_time"]:
return time.strptime(self[9], "%Y%m%d%H%M%S")
else:
raise Exception("unrecognized time format")
else:
return ""
def expiration(self, out_format="UTC"):
"""get expiration time. see inception() for more information"""
if self.rr_type() == "RRSIG":
if out_format.lower() in ["utc", "utc str", "utc_str"]:
return self[8]
elif out_format.lower() in ["unix", "posix", "ctime"]:
return calendar.timegm(time.strptime(self[8], "%Y%m%d%H%M%S"))
elif out_format.lower() in ["relative"]:
return calendar.timegm(time.strptime(self[8], "%Y%m%d%H%M%S")) - time.time()
elif out_format.lower() in ["struct_time", "time.struct_time"]:
return time.strptime(self[8], "%Y%m%d%H%M%S")
else:
raise Exception("unrecognized time format")
else:
return ""
def ip(self):
""" IP address from A/AAAA record"""
if self.rr_type() in ["A", "AAAA"]:
return self[4]
else:
raise Exception("ldnsx does not support ip for records other than A/AAAA")
def name_server(self):
""" Name Server from NS record"""
if self.rr_type() in ["NS"]:
return self[4]
else:
raise Exception("ldnsx does not support name server for records other than NS")
def mx_exchange(self):
""" Exchange from MX record"""
if self.rr_type() in ["MX"]:
return self[5]
else:
raise Exception("ldnsx does not support exchange for records other than MX")
def mx_priority(self):
""" Exchange Priority from MX record"""
if self.rr_type() in ["MX"]:
return self[4]
else:
raise Exception("ldnsx does not support exchange for records other than MX")
def txt_value(self):
""" TXT field from TXT record"""
if self.rr_type() in ["TXT"]:
return self[4]
else:
raise Exception("ldnsx does not support text value for records other than TXT")
def soa_maintainer(self):
""" Maintainer Name from SOA record"""
if self.rr_type() in ["SOA"]:
return self[5]
else:
raise Exception("ldnsx does not support maintainer name for records other than SOA")
def soa_master(self):
""" Master Server Name from SOA record"""
if self.rr_type() in ["SOA"]:
return self[4]
else:
raise Exception("ldnsx does not support master server name for records other than SOA")
def soa_serial(self):
""" Serial Number from SOA record"""
if self.rr_type() in ["SOA"]:
return self[6]
else:
raise Exception("ldnsx does not support serial number for records other than SOA")
def soa_refresh(self):
""" Refresh Interval from SOA record"""
if self.rr_type() in ["SOA"]:
return self[7]
else:
raise Exception("ldnsx does not support refresh interval for records other than SOA")
def soa_retry(self):
""" Retry Interval from SOA record"""
if self.rr_type() in ["SOA"]:
return self[8]
else:
raise Exception("ldnsx does not support retry interval for records other than SOA")
def soa_expire(self):
""" Expire Interval from SOA record"""
if self.rr_type() in ["SOA"]:
return self[9]
else:
raise Exception("ldnsx does not support expire interval for records other than SOA")
def soa_negative_ttl(self):
""" Negative TTL from SOA record"""
if self.rr_type() in ["SOA"]:
return self[10]
else:
raise Exception("ldnsx does not support negative TTL for records other than SOA")
def cname(self):
""" Canonical Name field from CNAME record"""
if self.rr_type() in ["CNAME"]:
return self[4]
else:
raise Exception("ldnsx does not support canonical name for records other than CNAME")
def alg(self):
"""Returns algorithm of RRSIG/DNSKEY/DS"""
t = self.rr_type()
if t == "RRSIG":
return int(self[5])
elif t == "DNSKEY":
return int(self[6])
elif t == "DS":
return int(self[5])
else:
return -1
def protocol(self):
""" Returns proticol of the DNSKEY"""
t = self.rr_type()
if t == "DNSKEY":
return int(self[5])
else:
return -1
def flags(self, number = False):
"""Return RR flags for DNSKEY """
t = self.rr_type()
if t == "DNSKEY":
ret = []
n = int(self[4])
if number:
return n
else:
for m in range(16):
if 2**(15-m) & n:
if m == 7: ret.append("ZONE")
elif m == 8: ret.append("REVOKE")
elif m ==15: ret.append("SEP")
else: ret.append(m)
else:
return []
_rr_types={
"A" : ldns.LDNS_RR_TYPE_A,
"A6" : ldns.LDNS_RR_TYPE_A6,
"AAAA" : ldns.LDNS_RR_TYPE_AAAA,
"AFSDB": ldns.LDNS_RR_TYPE_AFSDB,
"ANY" : ldns.LDNS_RR_TYPE_ANY,
"APL" : ldns.LDNS_RR_TYPE_APL,
"ATMA" : ldns.LDNS_RR_TYPE_ATMA,
"AXFR" : ldns.LDNS_RR_TYPE_AXFR,
"CERT" : ldns.LDNS_RR_TYPE_CERT,
"CNAME": ldns.LDNS_RR_TYPE_CNAME,
"COUNT": ldns.LDNS_RR_TYPE_COUNT,
"DHCID": ldns.LDNS_RR_TYPE_DHCID,
"DLV" : ldns.LDNS_RR_TYPE_DLV,
"DNAME": ldns.LDNS_RR_TYPE_DNAME,
"DNSKEY": ldns.LDNS_RR_TYPE_DNSKEY,
"DS" : ldns.LDNS_RR_TYPE_DS,
"EID" : ldns.LDNS_RR_TYPE_EID,
"FIRST": ldns.LDNS_RR_TYPE_FIRST,
"GID" : ldns.LDNS_RR_TYPE_GID,
"GPOS" : ldns.LDNS_RR_TYPE_GPOS,
"HINFO": ldns.LDNS_RR_TYPE_HINFO,
"IPSECKEY": ldns.LDNS_RR_TYPE_IPSECKEY,
"ISDN" : ldns.LDNS_RR_TYPE_ISDN,
"IXFR" : ldns.LDNS_RR_TYPE_IXFR,
"KEY" : ldns.LDNS_RR_TYPE_KEY,
"KX" : ldns.LDNS_RR_TYPE_KX,
"LAST" : ldns.LDNS_RR_TYPE_LAST,
"LOC" : ldns.LDNS_RR_TYPE_LOC,
"MAILA": ldns.LDNS_RR_TYPE_MAILA,
"MAILB": ldns.LDNS_RR_TYPE_MAILB,
"MB" : ldns.LDNS_RR_TYPE_MB,
"MD" : ldns.LDNS_RR_TYPE_MD,
"MF" : ldns.LDNS_RR_TYPE_MF,
"MG" : ldns.LDNS_RR_TYPE_MG,
"MINFO": ldns.LDNS_RR_TYPE_MINFO,
"MR" : ldns.LDNS_RR_TYPE_MR,
"MX" : ldns.LDNS_RR_TYPE_MX,
"NAPTR": ldns.LDNS_RR_TYPE_NAPTR,
"NIMLOC": ldns.LDNS_RR_TYPE_NIMLOC,
"NS" : ldns.LDNS_RR_TYPE_NS,
"NSAP" : ldns.LDNS_RR_TYPE_NSAP,
"NSAP_PTR" : ldns.LDNS_RR_TYPE_NSAP_PTR,
"NSEC" : ldns.LDNS_RR_TYPE_NSEC,
"NSEC3": ldns.LDNS_RR_TYPE_NSEC3,
"NSEC3PARAMS" : ldns.LDNS_RR_TYPE_NSEC3PARAMS,
"NULL" : ldns.LDNS_RR_TYPE_NULL,
"NXT" : ldns.LDNS_RR_TYPE_NXT,
"OPT" : ldns.LDNS_RR_TYPE_OPT,
"PTR" : ldns.LDNS_RR_TYPE_PTR,
"PX" : ldns.LDNS_RR_TYPE_PX,
"RP" : ldns.LDNS_RR_TYPE_RP,
"RRSIG": ldns.LDNS_RR_TYPE_RRSIG,
"RT" : ldns.LDNS_RR_TYPE_RT,
"SIG" : ldns.LDNS_RR_TYPE_SIG,
"SINK" : ldns.LDNS_RR_TYPE_SINK,
"SOA" : ldns.LDNS_RR_TYPE_SOA,
"SRV" : ldns.LDNS_RR_TYPE_SRV,
"SSHFP": ldns.LDNS_RR_TYPE_SSHFP,
"TSIG" : ldns.LDNS_RR_TYPE_TSIG,
"TXT" : ldns.LDNS_RR_TYPE_TXT,
"UID" : ldns.LDNS_RR_TYPE_UID,
"UINFO": ldns.LDNS_RR_TYPE_UINFO,
"UNSPEC": ldns.LDNS_RR_TYPE_UNSPEC,
"WKS" : ldns.LDNS_RR_TYPE_WKS,
"X25" : ldns.LDNS_RR_TYPE_X25
}
| bsd-3-clause |
davidcusatis/horizon | openstack_dashboard/dashboards/identity/mappings/tables.py | 13 | 2814 | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
class CreateMappingLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Mapping")
url = "horizon:identity:mappings:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_mapping"),)
class EditMappingLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:mappings:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_mapping"),)
class DeleteMappingsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Mapping",
u"Delete Mappings",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Mapping",
u"Deleted Mappings",
count
)
policy_rules = (("identity", "identity:delete_mapping"),)
def delete(self, request, obj_id):
api.keystone.mapping_delete(request, obj_id)
class MappingFilterAction(tables.FilterAction):
def filter(self, table, mappings, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [mapping for mapping in mappings
if q in mapping.ud.lower()]
def get_rules_as_json(mapping):
rules = getattr(mapping, 'rules', None)
if rules:
rules = json.dumps(rules, indent=4)
return safestring.mark_safe(rules)
class MappingsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('Mapping ID'))
description = tables.Column(get_rules_as_json,
verbose_name=_('Rules'))
class Meta(object):
name = "idp_mappings"
verbose_name = _("Attribute Mappings")
row_actions = (EditMappingLink, DeleteMappingsAction)
table_actions = (MappingFilterAction, CreateMappingLink,
DeleteMappingsAction)
| apache-2.0 |
divio/django | tests/custom_pk/fields.py | 379 | 1731 | import random
import string
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def from_db_value(self, value, expression, connection, context):
if not value:
return
return MyWrapper(value)
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
| bsd-3-clause |
duydb2/ZTC | atc/django-atc-profile-storage/atc_profile_storage/views.py | 1 | 4558 | from atc_profile_storage.models import Profile
from atc_profile_storage.serializers import ProfileSerializer
from functools import wraps
from rest_framework.exceptions import APIException
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.parsers import JSONParser
class BadGateway(APIException):
status_code = 502
default_detail = 'Could not connect to ATC gateway.'
def serviced(method):
'''
A decorator to check if the service is available or not.
Raise a BadGateway exception on failure to connect to the atc gateway
'''
@wraps(method)
def decorator(cls, request, *args, **kwargs):
service = None
# service = atcdClient()
# if service is None:
# raise BadGateway()
return method(cls, request, service, *args, **kwargs)
return decorator
class ProfilesApi(APIView):
@serviced
def get(self, request, service):
profiles = Profile.objects.all()
serializer = ProfileSerializer(profiles, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@serviced
def post(self, request, service):
data = request.DATA
profiles = Profile.objects.all()
profiles.delete()
serializer = ProfileSerializer(data=data, many=True)
if not serializer.is_valid():
raise ParseError(detail=serializer.errors)
serializer.save()
return Response(
serializer.data,
status=status.HTTP_201_CREATED
)
@serviced
def delete(self, request, service, pk=None):
profiles = Profile.objects.all()
profiles.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
class ProfileApi(APIView):
def get_object(self, pk, create=None):
""" get exist object if not, create """
try:
profile = Profile.objects.get(pk=pk)
except Profile.DoesNotExist as e:
if create:
profile = Profile.objects.create(id=pk, name='profile id=%s'%pk, content={u'up': [], u'down':[]})
else:
return Response(
e.message,
status=status.HTTP_404_OK
)
return profile
@serviced
def get(self, request, service, pk=None, format=None):
profile = self.get_object(pk, create=True)
serializer = ProfileSerializer(profile)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@serviced
def post(self, request, service, pk=None, format=None):
profile = self.get_object(pk, create=True)
data = request.DATA
serializer = ProfileSerializer(profile, data=data)
if not serializer.is_valid():
raise ParseError(detail=serializer.errors)
serializer.save()
return Response(
serializer.data,
status=status.HTTP_201_CREATED
)
@serviced
def delete(self, request, service, pk=None):
profile = self.get_object(pk)
if profile:
profile.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
# class JSONResponse(HttpResponse):
# def __init__(self, data, **kwargs):
# content = JSONRenderer().render(data)
# kwargs['content_type'] = 'application/json'
# super(JSONResponse, self).__init__(content, **kwargs)
#
#
# @csrf_exempt
# def profile_list(request):
# if request.method == 'GET':
# return JSONResponse(serializer.data)
# elif request.method == 'POST':
# return HttpResponse(status=405)
# else:
# return HttpResponse(status=405)
#
#
# @csrf_exempt
# def profile_detail(request, pk):
# try:
# profile = Profile.objects.get(pk=pk)
# except Profile.DoesNotExist:
# return HttpResponse(status=404)
#
# if request.method == 'GET':
# serializer = ProfileSerializer(profile)
# return JSONResponse(serializer.data)
#
# elif request.method == 'POST':
# data = JSONParser().parse(request)
# serializer = ProfileSerializer(profile, data=data)
# if serializer.is_valid():
# serializer.save()
# return JSONResponse(serializer.data)
# return JSONResponse(serializer.errors, status=400)
#
# elif request.method == 'DELETE':
# profile.delete()
# return HttpResponse(status=204)
#
# else:
# return HttpResponse(status=405)
| bsd-3-clause |
bandit145/PyVenture | PyVentureClass.py | 1 | 2904 | class Class01: # class file for PyVentureGen
#Did this because classes are cool and declutter the program like a b0ss
#PY_BASE is for the gen function in the main program. I need to actually work on the game code itself soon - 3/2/2016 Bove
PY_BASE = ("""
#PyVenture by Philip Bove|
#Created on 2/8/2016 |
# |
# This is to start you along your journy of making text based cmd line adventures|
# Just run this and start |
#------------------------|
def start(): #my beginning function
basegame()
def basegame():
inventory = {} #dict pls
choices = ['choice0', 'choice1', 'choice2', 'choice3', 'choice4', 'choice5'] #choices, will probably expand
print ('output')
selection = input('> ' )
if selection == choices[0]:
print('stuff here') #sectioning game into functions, all choices will call a new function segment at some point
choice0(inventory)
elif selection == choices[1]:
print('output')
elif selection == choices[2]:
print('output')
elif selection == choices[3]:
print('output')
elif selection == choices[4]:
print('output')
elif selection == choices[5]:
print('output.')
else: """#break this into seperate end(): function to be called whenever you die?
"""
print('output')
restart = input('Would you like to continue? y/n > ')
if restart == 'y':
start()
elif restart == 'n':
print('See you next time!')
""")
# appendmeth is for appending already created files.
def appendmeth(self,file,count):
f = open(file,"a")
f.write("""
def choice"""+str(count)+"""(inventory"""+str(count)+"""):
choices"""+str(count)+""" = ['choice0','choice1','choice2','choice3','choice4','choice5'] #Enter choices user choices here
Sel"""+str(count)+""" = input( 'put prompt here')
if sel"""+str(count)+""" == choices"""+str(count)+"""[0]:
print('stuff here') #sectioning game into functions, all choices will call a new function segment at some point
choice0()
elif sel"""+str(count)+""" == choices"""+str(count)+"""[1]:
print('output')
elif sel"""+str(count)+""" == choices"""+str(count)+"""[2]:
print('output')
elif sel"""+str(count)+""" == choices"""+str(count)+"""[3]:
print('output')
elif sel"""+str(count)+""" == choices"""+str(count)+"""[4]:
print('output')
elif sel"""+str(count)+""" == choices"""+str(count)+"""[5]:
print('output.')
else:
print('no recognized input') #loop back?
""")#Write the the code thing here
#Returnf makes f= to whatever it needs to be and return it so the main program can append the file again.
def returnf(self,file):
f = open(file,"a")
return f
| mit |
nan86150/ImageFusion | lib/python2.7/site-packages/PIL/FliImagePlugin.py | 41 | 3432 | #
# The Python Imaging Library.
# $Id$
#
# FLI/FLC file handling.
#
# History:
# 95-09-01 fl Created
# 97-01-03 fl Fixed parser, setup decoder tile
# 98-07-15 fl Renamed offset attribute to avoid name clash
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import Image, ImageFile, ImagePalette
import string
def i16(c):
return ord(c[0]) + (ord(c[1])<<8)
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
#
# decoder
def _accept(prefix):
return i16(prefix[4:6]) in [0xAF11, 0xAF12]
##
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
# method to load individual frames.
class FliImageFile(ImageFile.ImageFile):
format = "FLI"
format_description = "Autodesk FLI/FLC Animation"
def _open(self):
# HEAD
s = self.fp.read(128)
magic = i16(s[4:6])
if magic not in [0xAF11, 0xAF12]:
raise SyntaxError, "not an FLI/FLC file"
# image characteristics
self.mode = "P"
self.size = i16(s[8:10]), i16(s[10:12])
# animation speed
duration = i32(s[16:20])
if magic == 0xAF11:
duration = (duration * 1000) / 70
self.info["duration"] = duration
# look for palette
palette = map(lambda a: (a,a,a), range(256))
s = self.fp.read(16)
self.__offset = 128
if i16(s[4:6]) == 0xF100:
# prefix chunk; ignore it
self.__offset = self.__offset + i32(s)
s = self.fp.read(16)
if i16(s[4:6]) == 0xF1FA:
# look for palette chunk
s = self.fp.read(6)
if i16(s[4:6]) == 11:
self._palette(palette, 2)
elif i16(s[4:6]) == 4:
self._palette(palette, 0)
palette = map(lambda (r,g,b): chr(r)+chr(g)+chr(b), palette)
self.palette = ImagePalette.raw("RGB", string.join(palette, ""))
# set things up to decode first frame
self.frame = -1
self.__fp = self.fp
self.seek(0)
def _palette(self, palette, shift):
# load palette
i = 0
for e in range(i16(self.fp.read(2))):
s = self.fp.read(2)
i = i + ord(s[0])
n = ord(s[1])
if n == 0:
n = 256
s = self.fp.read(n * 3)
for n in range(0, len(s), 3):
r = ord(s[n]) << shift
g = ord(s[n+1]) << shift
b = ord(s[n+2]) << shift
palette[i] = (r, g, b)
i = i + 1
def seek(self, frame):
if frame != self.frame + 1:
raise ValueError, "cannot seek to frame %d" % frame
self.frame = frame
# move to next frame
self.fp = self.__fp
self.fp.seek(self.__offset)
s = self.fp.read(4)
if not s:
raise EOFError
framesize = i32(s)
self.decodermaxblock = framesize
self.tile = [("fli", (0,0)+self.size, self.__offset, None)]
self.__offset = self.__offset + framesize
def tell(self):
return self.frame
#
# registry
Image.register_open("FLI", FliImageFile, _accept)
Image.register_extension("FLI", ".fli")
Image.register_extension("FLI", ".flc")
| mit |
matthewoliver/swift | swift/container/server.py | 2 | 28572 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
import traceback
import math
from swift import gettext_ as _
from eventlet import Timeout
import swift.common.db
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.replicator import ContainerReplicatorRpc
from swift.common.db import DatabaseAlreadyExists
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.request_helpers import get_param, \
split_and_validate_path, is_sys_or_user_meta
from swift.common.utils import get_logger, hash_path, public, \
Timestamp, storage_directory, validate_sync_to, \
config_true_value, timing_stats, replication, \
override_bytes_from_content_type, get_log_line
from swift.common.constraints import valid_timestamp, check_utf8, check_drive
from swift.common import constraints
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import HTTP_NO_CONTENT, HTTP_NOT_FOUND, is_success
from swift.common.middleware import listing_formats
from swift.common.storage_policy import POLICIES
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPException
def gen_resp_headers(info, is_deleted=False):
"""
Convert container info dict to headers.
"""
# backend headers are always included
headers = {
'X-Backend-Timestamp': Timestamp(info.get('created_at', 0)).internal,
'X-Backend-PUT-Timestamp': Timestamp(info.get(
'put_timestamp', 0)).internal,
'X-Backend-DELETE-Timestamp': Timestamp(
info.get('delete_timestamp', 0)).internal,
'X-Backend-Status-Changed-At': Timestamp(
info.get('status_changed_at', 0)).internal,
'X-Backend-Storage-Policy-Index': info.get('storage_policy_index', 0),
}
if not is_deleted:
# base container info on deleted containers is not exposed to client
headers.update({
'X-Container-Object-Count': info.get('object_count', 0),
'X-Container-Bytes-Used': info.get('bytes_used', 0),
'X-Timestamp': Timestamp(info.get('created_at', 0)).normal,
'X-PUT-Timestamp': Timestamp(
info.get('put_timestamp', 0)).normal,
})
return headers
class ContainerController(BaseStorageServer):
"""WSGI Controller for the container server."""
# Ensure these are all lowercase
save_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to']
server_type = 'container-server'
def __init__(self, conf, logger=None):
super(ContainerController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='container-server')
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = float(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.replicator_rpc = ContainerReplicatorRpc(
self.root, DATADIR, ContainerBroker, self.mount_check,
logger=self.logger)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
if 'allow_versions' in conf:
self.logger.warning('Option allow_versions is deprecated. '
'Configure the versioned_writes middleware in '
'the proxy-server instead. This option will '
'be ignored in a future release.')
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
def _get_container_broker(self, drive, part, account, container, **kwargs):
"""
Get a DB broker for the container.
:param drive: drive that holds the container
:param part: partition the container is in
:param account: account name
:param container: container name
:returns: ContainerBroker object
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
kwargs.setdefault('account', account)
kwargs.setdefault('container', container)
kwargs.setdefault('logger', self.logger)
return ContainerBroker(db_path, **kwargs)
def get_and_validate_policy_index(self, req):
"""
Validate that the index supplied maps to a policy.
:returns: policy index from request, or None if not present
:raises HTTPBadRequest: if the supplied index is bogus
"""
policy_index = req.headers.get('X-Backend-Storage-Policy-Index', None)
if policy_index is None:
return None
try:
policy_index = int(policy_index)
except ValueError:
raise HTTPBadRequest(
request=req, content_type="text/plain",
body=("Invalid X-Storage-Policy-Index %r" % policy_index))
policy = POLICIES.get_by_index(policy_index)
if policy is None:
raise HTTPBadRequest(
request=req, content_type="text/plain",
body=("Invalid X-Storage-Policy-Index %r" % policy_index))
return int(policy)
def account_update(self, req, account, container, broker):
"""
Update the account server(s) with latest container info.
:param req: swob.Request object
:param account: account name
:param container: container name
:param broker: container DB broker object
:returns: if all the account requests return a 404 error code,
HTTPNotFound response object,
if the account cannot be updated due to a malformed header,
an HTTPBadRequest response object,
otherwise None.
"""
account_hosts = [h.strip() for h in
req.headers.get('X-Account-Host', '').split(',')]
account_devices = [d.strip() for d in
req.headers.get('X-Account-Device', '').split(',')]
account_partition = req.headers.get('X-Account-Partition', '')
if len(account_hosts) != len(account_devices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Account update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': req.headers.get('X-Account-Host', ''),
'devices': req.headers.get('X-Account-Device', '')})
return HTTPBadRequest(req=req)
if account_partition:
# zip is lazy on py3, but we need a list, so force evaluation.
# On py2 it's an extra list copy, but the list is so small
# (one element per replica in account ring, usually 3) that it
# doesn't matter.
updates = list(zip(account_hosts, account_devices))
else:
updates = []
account_404s = 0
for account_host, account_device in updates:
account_ip, account_port = account_host.rsplit(':', 1)
new_path = '/' + '/'.join([account, container])
info = broker.get_info()
account_headers = HeaderKeyDict({
'x-put-timestamp': info['put_timestamp'],
'x-delete-timestamp': info['delete_timestamp'],
'x-object-count': info['object_count'],
'x-bytes-used': info['bytes_used'],
'x-trans-id': req.headers.get('x-trans-id', '-'),
'X-Backend-Storage-Policy-Index': info['storage_policy_index'],
'user-agent': 'container-server %s' % os.getpid(),
'referer': req.as_referer()})
if req.headers.get('x-account-override-deleted', 'no').lower() == \
'yes':
account_headers['x-account-override-deleted'] = 'yes'
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
account_ip, account_port, account_device,
account_partition, 'PUT', new_path, account_headers)
with Timeout(self.node_timeout):
account_response = conn.getresponse()
account_response.read()
if account_response.status == HTTP_NOT_FOUND:
account_404s += 1
elif not is_success(account_response.status):
self.logger.error(_(
'ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
{'ip': account_ip, 'port': account_port,
'device': account_device})
if updates and account_404s == len(updates):
return HTTPNotFound(req=req)
else:
return None
def _update_sync_store(self, broker, method):
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s during %s' %
(broker.db_file, method))
@public
@timing_stats()
def DELETE(self, req):
"""Handle HTTP DELETE request."""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
req_timestamp = valid_timestamp(req)
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
# policy index is only relevant for delete_obj (and transitively for
# auto create accounts)
obj_policy_index = self.get_and_validate_policy_index(req) or 0
broker = self._get_container_broker(drive, part, account, container)
if account.startswith(self.auto_create_account_prefix) and obj and \
not os.path.exists(broker.db_file):
try:
broker.initialize(req_timestamp.internal, obj_policy_index)
except DatabaseAlreadyExists:
pass
if not os.path.exists(broker.db_file):
return HTTPNotFound()
if obj: # delete object
broker.delete_object(obj, req.headers.get('x-timestamp'),
obj_policy_index)
return HTTPNoContent(request=req)
else:
# delete container
if not broker.empty():
return HTTPConflict(request=req)
existed = Timestamp(broker.get_info()['put_timestamp']) and \
not broker.is_deleted()
broker.delete_db(req_timestamp.internal)
if not broker.is_deleted():
return HTTPConflict(request=req)
self._update_sync_store(broker, 'DELETE')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if existed:
return HTTPNoContent(request=req)
return HTTPNotFound()
def _update_or_create(self, req, broker, timestamp, new_container_policy,
requested_policy_index):
"""
Create new database broker or update timestamps for existing database.
:param req: the swob request object
:param broker: the broker instance for the container
:param timestamp: internalized timestamp
:param new_container_policy: the storage policy index to use
when creating the container
:param requested_policy_index: the storage policy index sent in the
request, may be None
:returns: created, a bool, if database did not previously exist
"""
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, new_container_policy)
except DatabaseAlreadyExists:
pass
else:
return True # created
recreated = broker.is_deleted()
if recreated:
# only set storage policy on deleted containers
broker.set_storage_policy_index(new_container_policy,
timestamp=timestamp)
elif requested_policy_index is not None:
# validate requested policy with existing container
if requested_policy_index != broker.storage_policy_index:
raise HTTPConflict(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
broker.update_put_timestamp(timestamp)
if broker.is_deleted():
raise HTTPConflict(request=req)
if recreated:
broker.update_status_changed_at(timestamp)
return recreated
@public
@timing_stats()
def PUT(self, req):
"""Handle HTTP PUT request."""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
if obj: # put container object
# obj put expects the policy_index header, default is for
# legacy support during upgrade.
obj_policy_index = requested_policy_index or 0
if account.startswith(self.auto_create_account_prefix) and \
not os.path.exists(broker.db_file):
try:
broker.initialize(req_timestamp.internal, obj_policy_index)
except DatabaseAlreadyExists:
pass
if not os.path.exists(broker.db_file):
return HTTPNotFound()
broker.put_object(obj, req_timestamp.internal,
int(req.headers['x-size']),
req.headers['x-content-type'],
req.headers['x-etag'], 0,
obj_policy_index,
req.headers.get('x-content-type-timestamp'),
req.headers.get('x-meta-timestamp'))
return HTTPCreated(request=req)
else: # put container
if requested_policy_index is None:
# use the default index sent by the proxy if available
new_container_policy = req.headers.get(
'X-Backend-Storage-Policy-Default', int(POLICIES.default))
else:
new_container_policy = requested_policy_index
created = self._update_or_create(req, broker,
req_timestamp.internal,
new_container_policy,
requested_policy_index)
metadata = {}
metadata.update(
(key, (value, req_timestamp.internal))
for key, value in req.headers.items()
if key.lower() in self.save_headers or
is_sys_or_user_meta('container', key))
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
metadata['X-Container-Sync-To'][0] != \
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
if metadata:
self._update_sync_store(broker, 'PUT')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if created:
return HTTPCreated(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
else:
return HTTPAccepted(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
@public
@timing_stats(sample_rate=0.1)
def HEAD(self, req):
"""Handle HTTP HEAD request."""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=headers)
headers.update(
(key, value)
for key, (value, timestamp) in broker.metadata.items()
if value != '' and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)))
headers['Content-Type'] = out_content_type
resp = HTTPNoContent(request=req, headers=headers, charset='utf-8')
resp.last_modified = math.ceil(float(headers['X-PUT-Timestamp']))
return resp
def update_data_record(self, record):
"""
Perform any mutations to container listing records that are common to
all serialization formats, and returns it as a dict.
Converts created time to iso timestamp.
Replaces size with 'swift_bytes' content type parameter.
:params record: object entry record
:returns: modified record
"""
(name, created, size, content_type, etag) = record[:5]
if content_type is None:
return {'subdir': name.decode('utf8')}
response = {'bytes': size, 'hash': etag, 'name': name.decode('utf8'),
'content_type': content_type}
response['last_modified'] = Timestamp(created).isoformat
override_bytes_from_content_type(response, logger=self.logger)
return response
@public
@timing_stats()
def GET(self, req):
"""Handle HTTP GET request."""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
path = get_param(req, 'path')
prefix = get_param(req, 'prefix')
delimiter = get_param(req, 'delimiter')
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
# delimiters can be made more flexible later
return HTTPPreconditionFailed(body='Bad delimiter')
marker = get_param(req, 'marker', '')
end_marker = get_param(req, 'end_marker')
limit = constraints.CONTAINER_LISTING_LIMIT
given_limit = get_param(req, 'limit')
reverse = config_true_value(get_param(req, 'reverse'))
if given_limit and given_limit.isdigit():
limit = int(given_limit)
if limit > constraints.CONTAINER_LISTING_LIMIT:
return HTTPPreconditionFailed(
request=req,
body='Maximum limit is %d'
% constraints.CONTAINER_LISTING_LIMIT)
out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=resp_headers)
container_list = broker.list_objects_iter(
limit, marker, end_marker, prefix, delimiter, path,
storage_policy_index=info['storage_policy_index'], reverse=reverse)
return self.create_listing(req, out_content_type, info, resp_headers,
broker.metadata, container_list, container)
def create_listing(self, req, out_content_type, info, resp_headers,
metadata, container_list, container):
for key, (value, timestamp) in metadata.items():
if value and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)):
resp_headers[key] = value
listing = [self.update_data_record(record)
for record in container_list]
if out_content_type.endswith('/xml'):
body = listing_formats.container_to_xml(listing, container)
elif out_content_type.endswith('/json'):
body = json.dumps(listing)
else:
body = listing_formats.listing_to_text(listing)
ret = Response(request=req, headers=resp_headers, body=body,
content_type=out_content_type, charset='utf-8')
ret.last_modified = math.ceil(float(resp_headers['X-PUT-Timestamp']))
if not ret.body:
ret.status_int = HTTP_NO_CONTENT
return ret
@public
@replication
@timing_stats(sample_rate=0.01)
def REPLICATE(self, req):
"""
Handle HTTP REPLICATE request (json-encoded RPC calls for replication.)
"""
post_args = split_and_validate_path(req, 3)
drive, partition, hash = post_args
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
try:
args = json.load(req.environ['wsgi.input'])
except ValueError as err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)
ret.request = req
return ret
@public
@timing_stats()
def POST(self, req):
"""Handle HTTP POST request."""
drive, part, account, container = split_and_validate_path(req, 4)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
if not check_drive(self.root, drive, self.mount_check):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container)
if broker.is_deleted():
return HTTPNotFound(request=req)
broker.update_put_timestamp(req_timestamp.internal)
metadata = {}
metadata.update(
(key, (value, req_timestamp.internal))
for key, value in req.headers.items()
if key.lower() in self.save_headers or
is_sys_or_user_meta('container', key))
if metadata:
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
metadata['X-Container-Sync-To'][0] != \
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
self._update_sync_store(broker, 'POST')
return HTTPNoContent(request=req)
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s %(path)s '),
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
if self.log_requests:
trans_time = time.time() - start_time
log_message = get_log_line(req, res, trans_time, '')
if req.method.upper() == 'REPLICATE':
self.logger.debug(log_message)
else:
self.logger.info(log_message)
return res(env, start_response)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf)
| apache-2.0 |
goddardl/cortex | python/IECore/registerRunTimeTyped.py | 7 | 7239 | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
## Registers a type id for an extension class. This makes TypeId.className
# available and also checks that no other type is trying to use the same id.
# It raises a RuntimeError if a conflicting type is already registered.
def __registerTypeId( typeId, typeName, baseTypeId ) :
assert( type( typeId ) is IECore.TypeId )
assert( type( typeName ) is str )
assert( type( baseTypeId ) is IECore.TypeId )
# check this type hasn't been registered already
if hasattr( IECore.TypeId, typeName ):
if getattr( IECore.TypeId, typeName ) != typeId:
raise RuntimeError( "Type \"%s\" is already registered." % typeName )
return
if typeId in IECore.TypeId.values :
raise RuntimeError( "TypeId \"%d\" is already registered as \"%s\"." % (typeId, IECore.TypeId.values[typeId] ) )
# update the TypeId enum
setattr( IECore.TypeId, typeName, typeId )
IECore.TypeId.values[ int( typeId ) ] = typeId
# register the new type id
IECore.RunTimeTyped.registerType( typeId, typeName, baseTypeId )
__nextDynamicRunTimeTypedId = None
## This function adds the necessary function definitions to a python
# class for it to properly implement the RunTimeTyped interface. It should
# be called once for all python classes inheriting from RunTimeTyped. It also
# calls registerTypeId() for you.
# typId is optional and if not defined, this function will associate a dynamic Id
# in the range FirstDynamicTypeId and LastDynamicTypeId from TypeIds.h.
# It's necessary to specify type Id for Object derived class or anything that
# is serializable.
# If typeName is not specified then the name of the class itself is used - you may wish
# to provide an explicit typeName in order to prefix the name with a module name.
def registerRunTimeTyped( typ, typId = None, typeName = None ) :
if typeName is None :
typeName = typ.__name__
runTypedBaseClass = filter( lambda c: issubclass( c, IECore.RunTimeTyped ), typ.__bases__ )[0]
# constants below are the same as in TypeIds.h
FirstDynamicTypeId = 300000
LastDynamicTypeId = 399999
# check if overwritting registration.
if not hasattr( IECore.TypeId, typeName ) :
if typId is None :
global __nextDynamicRunTimeTypedId
if __nextDynamicRunTimeTypedId is None :
__nextDynamicRunTimeTypedId = FirstDynamicTypeId
elif __nextDynamicRunTimeTypedId > LastDynamicTypeId:
raise Exception, "Too many dynamic RunTimeTyped registered classes! You must change TypeIds.h and rebuild Cortex."
typId = __nextDynamicRunTimeTypedId
__nextDynamicRunTimeTypedId += 1
__registerTypeId( IECore.TypeId( typId ), typeName, IECore.TypeId( runTypedBaseClass.staticTypeId() ) )
else :
# check if the new type Id is compatible with the previously registered one.
prevTypId = getattr( IECore.TypeId, typeName )
if prevTypId in xrange( FirstDynamicTypeId, LastDynamicTypeId+1 ) :
if not typId is None :
raise Exception, "Trying to set a type ID for %s previously registered as a dynamic type Id!" % typeName
else :
if typId is None :
raise Exception, "Trying to re-register type %s as dynamic type Id!" % typeName
elif typId != prevTypId :
raise Exception, "Trying to re-register %s under different type Id: %s != %s" % ( typeName, str(typId), prevTypId )
# necessary when the typeid is defined in IECore/TypeIds.h and bound in TypeIdBinding.cpp, but then
# the class for that typeid is implemented in python (currently ClassParameter does this).
if IECore.RunTimeTyped.typeNameFromTypeId( prevTypId )=="" :
IECore.RunTimeTyped.registerType( prevTypId, typeName, IECore.TypeId( runTypedBaseClass.staticTypeId() ) )
# Retrieve the correct value from the enum
tId = getattr( IECore.TypeId, typeName )
# add the typeId and typeName method overrides
typ.typeId = lambda x : tId
typ.typeName = lambda x: typeName
# add the staticTypeId, staticTypeName, baseTypeId, and baseTypeName overrides
typ.staticTypeId = staticmethod( lambda : tId )
typ.staticTypeName = staticmethod( lambda : typeName )
typ.baseTypeId = staticmethod( lambda : runTypedBaseClass.staticTypeId() )
typ.baseTypeName = staticmethod( lambda : runTypedBaseClass.staticTypeName() )
# add the inheritsFrom method override
def inheritsFrom( t, baseClass ) :
if type( t ) is str :
if type( baseClass ) is list :
for base in baseClass :
if base.staticTypeName() == t :
return True
else:
if baseClass.staticTypeName() == t :
return True
elif type(t) is IECore.TypeId :
if type( baseClass ) is list :
for base in baseClass :
if base.staticTypeId() == t :
return True
else:
if baseClass.staticTypeId() == t :
return True
else:
raise TypeError( "Invalid type specifier ( %s )" % str( t ) )
if type( baseClass ) is list :
for base in baseClass:
if base.inheritsFrom( t ):
return True
else:
return baseClass.inheritsFrom( t )
return False
typ.inheritsFrom = staticmethod( lambda t : inheritsFrom( t, runTypedBaseClass ) )
# add the isInstanceOf method override
def isInstanceOf( self, t, baseClass ) :
if type( t ) is str :
if self.staticTypeName() == t :
return True
elif type( t ) is IECore.TypeId :
if self.staticTypeId() == t :
return True
else :
raise TypeError( "Invalid type specifier ( %s )" % str( t ) )
return inheritsFrom( t, baseClass )
typ.isInstanceOf = lambda self, t : isInstanceOf( self, t, runTypedBaseClass )
| bsd-3-clause |
simonkuang/grpc | tools/run_tests/artifacts/artifact_targets.py | 1 | 15032 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
docker_base_image=None,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if docker_base_image is not None:
docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Raspberry Pi build
environ['PYTHON'] = '/usr/local/bin/python{}'.format(
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 5,
docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
extra_docker_args=extra_args)
elif self.platform == 'linux':
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
self.arch,
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60,
docker_base_image='quay.io/pypa/manylinux1_i686'
if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
elif self.platform == 'windows':
if 'Python27' in self.py_version or 'Python34' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=45 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'csharp', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.arch == 'android':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_android_ndk',
'tools/run_tests/artifacts/build_artifact_csharp_android.sh',
environ={})
elif self.platform == 'windows':
cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
cmake_arch_option
],
use_workspace=True)
else:
environ = {
'CONFIG': 'opt',
'EMBED_OPENSSL': 'true',
'EMBED_ZLIB': 'true',
'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'LDFLAGS': ''
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
'tools/run_tests/artifacts/build_artifact_csharp.sh',
environ=environ)
else:
archflag = _ARCH_FLAG_MAP[self.arch]
environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['LDFLAGS'] += ' %s' % archflag
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
environ=environ,
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
else:
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
use_workspace=True)
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
if self.platform != 'macos':
ldflags += ' -static-libgcc -static-libstdc++ -s'
environ = {
'CONFIG': 'opt',
'CXXFLAGS': cxxflags,
'LDFLAGS': ldflags,
'PROTOBUF_LDFLAGS_EXTRA': ldflags
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_protoc',
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return ([
Cls(platform, arch)
for Cls in (CSharpExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
] + [
CSharpExtArtifact('linux', 'android'),
PythonArtifact('linux', 'x86', 'cp27-cp27m'),
PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
PythonArtifact('linux', 'x86', 'cp34-cp34m'),
PythonArtifact('linux', 'x86', 'cp35-cp35m'),
PythonArtifact('linux', 'x86', 'cp36-cp36m'),
PythonArtifact('linux_extra', 'armv7', '2.7'),
PythonArtifact('linux_extra', 'armv7', '3.4'),
PythonArtifact('linux_extra', 'armv7', '3.5'),
PythonArtifact('linux_extra', 'armv7', '3.6'),
PythonArtifact('linux_extra', 'armv6', '2.7'),
PythonArtifact('linux_extra', 'armv6', '3.4'),
PythonArtifact('linux_extra', 'armv6', '3.5'),
PythonArtifact('linux_extra', 'armv6', '3.6'),
PythonArtifact('linux', 'x64', 'cp27-cp27m'),
PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
PythonArtifact('linux', 'x64', 'cp34-cp34m'),
PythonArtifact('linux', 'x64', 'cp35-cp35m'),
PythonArtifact('linux', 'x64', 'cp36-cp36m'),
PythonArtifact('macos', 'x64', 'python2.7'),
PythonArtifact('macos', 'x64', 'python3.4'),
PythonArtifact('macos', 'x64', 'python3.5'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('windows', 'x86', 'Python27_32bits'),
PythonArtifact('windows', 'x86', 'Python34_32bits'),
PythonArtifact('windows', 'x86', 'Python35_32bits'),
PythonArtifact('windows', 'x86', 'Python36_32bits'),
PythonArtifact('windows', 'x64', 'Python27'),
PythonArtifact('windows', 'x64', 'Python34'),
PythonArtifact('windows', 'x64', 'Python35'),
PythonArtifact('windows', 'x64', 'Python36'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64'),
PHPArtifact('macos', 'x64')
])
| apache-2.0 |
conversationai/wikidetox | experimental/conversation_go_awry/feature_extraction/utils/politeness_with_spacy/features/vectorizer.py | 1 | 6272 | from __future__ import unicode_literals
import os
import cPickle
import string
import nltk
from itertools import chain
from collections import defaultdict
from spacy.en import English
from nltk.stem.wordnet import WordNetLemmatizer
import json
import re
# local import
from politeness_strategies import get_politeness_strategy_features
# Will need access to local dir
# for support files
LOCAL_DIR = os.path.split(__file__)[0]
def get_unigrams_and_bigrams(document):
"""
Grabs unigrams and bigrams from document
sentences. NLTK does the work.
"""
# Get unigram list per sentence:
unigram_lists = [[y for y in t] for t in map(lambda x: nltk.word_tokenize(x), document['sentences'])]
# Generate bigrams from all sentences:
bigrams = [tuple([y for y in t]) for l in map(lambda x: nltk.bigrams(x), unigram_lists) for t in l ]
# Chain unigram lists
unigrams = [x for l in unigram_lists for x in l]
return unigrams, bigrams
class PolitenessFeatureVectorizer:
"""
Returns document features based on-
- unigrams and bigrams
- politeness strategies
(inspired by B&L, modeled using dependency parses)
"""
UNIGRAMS_FILENAME = os.path.join(LOCAL_DIR, "featunigrams.p")
BIGRAMS_FILENAME = os.path.join(LOCAL_DIR, "featbigrams.p")
def __init__(self):
"""
Load pickled lists of unigram and bigram features
These lists can be generated using the training set
and PolitenessFeatureVectorizer.generate_bow_features
"""
self.unigrams = cPickle.load(open(self.UNIGRAMS_FILENAME))
self.bigrams = cPickle.load(open(self.BIGRAMS_FILENAME))
def features(self, document):
"""
document must be a dict of the following format--
{
'text': "text str",
}
"""
feature_dict = {}
# Add unigram, bigram features:
feature_dict.update(self._get_term_features(document))
# Add politeness strategy features:
feature_dict.update(get_politeness_strategy_features(document))
return feature_dict
def _get_term_features(self, document):
# One binary feature per ngram in
# in self.unigrams and self.bigrams
unigrams, bigrams = document['unigrams'], document['bigrams']
# Add unigrams to document for later use
unigrams, bigrams = set(unigrams), set(bigrams)
f = {}
f.update(dict(map(lambda x: ("UNIGRAM_" + str(x), 1 if x in unigrams else 0), self.unigrams)))
f.update(dict(map(lambda x: ("BIGRAM_" + str(x), 1 if x in bigrams else 0), self.bigrams)))
return f
@staticmethod
def preprocess(documents):
nlp = English()
for document in documents:
document['sentences'] = nltk.sent_tokenize(document['text'])
document['parses'] = []
document['pos_tags'] = []
for s in document['sentences']:
# Spacy inclues punctuation in dependency parsing
# which would lead to errors in feature extraction
bak = s
s = ""
for x in bak:
if x in string.punctuation:
s += " "
else:
s += x
s = ' '.join(s.split())
doc = nlp(s)
cur = []
pos_tags = []
for sent in doc.sents:
pos = sent.start
for tok in sent:
ele = "%s(%s-%d, %s-%d)"%(tok.dep_, tok.head.text, tok.head.i + 1 - pos, tok.text, tok.i + 1 - pos)
pos_tags.append(tok.tag_)
cur.append(ele)
document['parses'].append(cur)
document['pos_tags'].append(pos_tags)
document['unigrams'], document['bigrams'] = get_unigrams_and_bigrams(document)
return documents
@staticmethod
def generate_bow_features(documents, min_unigram_count=20, min_bigram_count=20):
"""
Given a list of documents, compute and store list of unigrams and bigrams
with a frequency > min_unigram_count and min_bigram_count, respectively.
This method must be called prior to the first vectorizer instantiation.
documents -
each document must be a dict
{
'text': 'text'
}
"""
unigram_counts, bigram_counts = defaultdict(int), defaultdict(int)
# Count unigrams and bigrams:
for d in documents:
unigrams = set(d['unigrams'])
bigrams = set(d['bigrams'])
# Count
for w in unigrams:
unigram_counts[w] += 1
for w in bigrams:
bigram_counts[w] += 1
# Keep only ngrams that pass frequency threshold:
unigram_features = filter(lambda x: unigram_counts[x] > min_unigram_count, unigram_counts.iterkeys())
bigram_features = filter(lambda x: bigram_counts[x] > min_bigram_count, bigram_counts.iterkeys())
# Save results:
cPickle.dump(unigram_features, open(PolitenessFeatureVectorizer.UNIGRAMS_FILENAME, 'w'))
cPickle.dump(bigram_features, open(PolitenessFeatureVectorizer.BIGRAMS_FILENAME, 'w'))
def alphas(s):
bak = s
s = ""
for x in bak:
if x.isalpha():
s += x
return s
if __name__ == "__main__":
"""
Extract features from test documents
"""
from test_documents import TEST_DOCUMENTS
vectorizer = PolitenessFeatureVectorizer()
documents = TEST_DOCUMENTS
documents = PolitenessFeatureVectorizer.preprocess(documents)
for doc in documents:
f = vectorizer.features(doc)
# Print summary of features that are present
print "\n===================="
print "Text: ", doc['text']
print "\tUnigrams, Bigrams: %d" % len(filter(lambda x: f[x] > 0 and ("UNIGRAM_" in x or "BIGRAM_" in x), f.iterkeys()))
print "\tPoliteness Strategies: \n\t\t%s" % "\n\t\t".join(filter(lambda x: f[x] > 0 and "feature_politeness_" in x, f.iterkeys()))
print "\n"
| apache-2.0 |
sklnet/openhdf-enigma2 | lib/python/Plugins/SystemPlugins/WirelessLan/plugin.py | 9 | 16398 | from enigma import eTimer, eEnv
from Screens.Screen import Screen
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap,MultiPixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.MenuList import MenuList
from Components.config import config, getConfigListEntry, ConfigYesNo, NoSave, ConfigSubsection, ConfigText, ConfigSelection, ConfigPassword
from Components.ConfigList import ConfigListScreen
from Components.Network import iNetwork
from Components.Console import Console
from Plugins.Plugin import PluginDescriptor
from os import system, path as os_path, listdir
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
from Tools.HardwareInfo import HardwareInfo
from Wlan import iWlan, wpaSupplicant, iStatus, getWlanConfigName
from time import time
from os import system
from re import escape as re_escape
plugin_path = eEnv.resolve("${libdir}/enigma2/python/Plugins/SystemPlugins/WirelessLan")
list = ["Unencrypted", "WEP", "WPA", "WPA/WPA2", "WPA2"]
weplist = ["ASCII", "HEX"]
config.plugins.wlan = ConfigSubsection()
config.plugins.wlan.essid = NoSave(ConfigText(default = "", fixed_size = False))
config.plugins.wlan.hiddenessid = NoSave(ConfigYesNo(default = False))
config.plugins.wlan.encryption = NoSave(ConfigSelection(list, default = "WPA2"))
config.plugins.wlan.wepkeytype = NoSave(ConfigSelection(weplist, default = "ASCII"))
config.plugins.wlan.psk = NoSave(ConfigPassword(default = "", fixed_size = False))
class WlanStatus(Screen):
skin = """
<screen name="WlanStatus" position="center,center" size="560,400" title="Wireless network status" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="LabelBSSID" render="Label" position="10,60" size="200,25" valign="left" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelESSID" render="Label" position="10,100" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelQuality" render="Label" position="10,140" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelSignal" render="Label" position="10,180" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelBitrate" render="Label" position="10,220" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelEnc" render="Label" position="10,260" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="BSSID" render="Label" position="220,60" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="ESSID" render="Label" position="220,100" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="quality" render="Label" position="220,140" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="signal" render="Label" position="220,180" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="bitrate" render="Label" position="220,220" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="enc" render="Label" position="220,260" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<ePixmap pixmap="skin_default/div-h.png" position="0,350" zPosition="1" size="560,2" />
<widget source="IFtext" render="Label" position="10,355" size="120,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1" />
<widget source="IF" render="Label" position="120,355" size="400,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1" />
<widget source="Statustext" render="Label" position="10,375" size="115,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1"/>
<widget name="statuspic" pixmaps="skin_default/buttons/button_green.png,skin_default/buttons/button_green_off.png" position="130,380" zPosition="10" size="15,16" transparent="1" alphatest="on"/>
</screen>"""
def __init__(self, session, iface):
Screen.__init__(self, session)
self.session = session
self.iface = iface
self["LabelBSSID"] = StaticText(_('Accesspoint:'))
self["LabelESSID"] = StaticText(_('SSID:'))
self["LabelQuality"] = StaticText(_('Link quality:'))
self["LabelSignal"] = StaticText(_('Signal strength:'))
self["LabelBitrate"] = StaticText(_('Bitrate:'))
self["LabelEnc"] = StaticText(_('Encryption:'))
self["BSSID"] = StaticText()
self["ESSID"] = StaticText()
self["quality"] = StaticText()
self["signal"] = StaticText()
self["bitrate"] = StaticText()
self["enc"] = StaticText()
self["IFtext"] = StaticText()
self["IF"] = StaticText()
self["Statustext"] = StaticText()
self["statuspic"] = MultiPixmap()
self["statuspic"].hide()
self["key_red"] = StaticText(_("Close"))
self.resetList()
self.updateStatusbar()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions", "ShortcutActions"],
{
"ok": self.exit,
"back": self.exit,
"red": self.exit,
}, -1)
self.timer = eTimer()
self.timer.timeout.get().append(self.resetList)
self.onShown.append(lambda: self.timer.start(8000))
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def cleanup(self):
iStatus.stopWlanConsole()
def layoutFinished(self):
self.setTitle(_("Wireless network state"))
def resetList(self):
iStatus.getDataForInterface(self.iface,self.getInfoCB)
def getInfoCB(self,data,status):
if data is not None:
if data is True:
if status is not None:
if status[self.iface]["essid"] == "off":
essid = _("No Connection")
else:
essid = status[self.iface]["essid"]
if status[self.iface]["accesspoint"] == "Not-Associated":
accesspoint = _("Not associated")
essid = _("No Connection")
else:
accesspoint = status[self.iface]["accesspoint"]
if self.has_key("BSSID"):
self["BSSID"].setText(accesspoint)
if self.has_key("ESSID"):
self["ESSID"].setText(essid)
quality = status[self.iface]["quality"]
if self.has_key("quality"):
self["quality"].setText(quality)
if status[self.iface]["bitrate"] == '0':
bitrate = _("Unsupported")
else:
bitrate = str(status[self.iface]["bitrate"]) + " Mb/s"
if self.has_key("bitrate"):
self["bitrate"].setText(bitrate)
signal = status[self.iface]["signal"]
if self.has_key("signal"):
self["signal"].setText(signal)
if status[self.iface]["encryption"] == "off":
if accesspoint == "Not-Associated":
encryption = _("Disabled")
else:
encryption = _("off or wpa2 on")
else:
encryption = _("Enabled")
if self.has_key("enc"):
self["enc"].setText(encryption)
self.updateStatusLink(status)
def exit(self):
self.timer.stop()
self.close(True)
def updateStatusbar(self):
wait_txt = _("Please wait...")
self["BSSID"].setText(wait_txt)
self["ESSID"].setText(wait_txt)
self["quality"].setText(wait_txt)
self["signal"].setText(wait_txt)
self["bitrate"].setText(wait_txt)
self["enc"].setText(wait_txt)
self["IFtext"].setText(_("Network:"))
self["IF"].setText(iNetwork.getFriendlyAdapterName(self.iface))
self["Statustext"].setText(_("Link:"))
def updateStatusLink(self,status):
if status is not None:
if status[self.iface]["essid"] == "off" or status[self.iface]["accesspoint"] == "Not-Associated" or status[self.iface]["accesspoint"] == False:
self["statuspic"].setPixmapNum(1)
else:
self["statuspic"].setPixmapNum(0)
self["statuspic"].show()
class WlanScan(Screen):
skin = """
<screen name="WlanScan" position="center,center" size="560,400" title="Select a wireless network" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="list" render="Listbox" position="5,40" size="550,300" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (0, 0), size = (550, 30), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the essid
MultiContentEntryText(pos = (0, 30), size = (175, 20), font=1, flags = RT_HALIGN_LEFT, text = 5), # index 5 is the interface
MultiContentEntryText(pos = (175, 30), size = (175, 20), font=1, flags = RT_HALIGN_LEFT, text = 4), # index 0 is the encryption
MultiContentEntryText(pos = (350, 0), size = (200, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 0 is the signal
MultiContentEntryText(pos = (350, 30), size = (200, 20), font=1, flags = RT_HALIGN_LEFT, text = 3), # index 0 is the maxrate
MultiContentEntryPixmapAlphaTest(pos = (0, 52), size = (550, 2), png = 6), # index 6 is the div pixmap
],
"fonts": [gFont("Regular", 28),gFont("Regular", 18)],
"itemHeight": 54
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,340" zPosition="1" size="560,2" />
<widget source="info" render="Label" position="0,350" size="560,50" font="Regular;24" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, iface):
Screen.__init__(self, session)
self.session = session
self.iface = iface
self.skin_path = plugin_path
self.oldInterfaceState = iNetwork.getAdapterAttribute(self.iface, "up")
self.APList = None
self.newAPList = None
self.WlanList = None
self.cleanList = None
self.oldlist = {}
self.listLength = None
self.divpng = LoadPixmap(path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
self.rescanTimer = eTimer()
self.rescanTimer.callback.append(self.rescanTimerFired)
self["info"] = StaticText()
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Connect"))
self["key_yellow"] = StaticText()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.select,
"back": self.cancel,
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.cancel,
"green": self.select,
})
iWlan.setInterface(self.iface)
self.w = iWlan.getInterface()
self.onLayoutFinish.append(self.layoutFinished)
self.getAccessPoints(refresh = False)
def layoutFinished(self):
self.setTitle(_("Select a wireless network"))
def select(self):
cur = self["list"].getCurrent()
if cur is not None:
iWlan.stopGetNetworkList()
self.rescanTimer.stop()
del self.rescanTimer
if cur[0] is not None:
self.close(cur[0])
else:
self.close(None)
else:
iWlan.stopGetNetworkList()
self.rescanTimer.stop()
del self.rescanTimer
self.close(None)
def cancel(self):
iWlan.stopGetNetworkList()
self.rescanTimer.stop()
del self.rescanTimer
self.close(None)
def rescanTimerFired(self):
self.rescanTimer.stop()
self.updateAPList()
def buildEntryComponent(self, essid, bssid, encrypted, iface, maxrate, signal):
encryption = encrypted and _("Yes") or _("No")
return essid, bssid, _("Signal: ") + str(signal), _("Max. bitrate: ") + str(maxrate), _("Encrypted: ") + encryption, _("Interface: ") + str(iface), self.divpng
def updateAPList(self):
newList = []
newList = self.getAccessPoints(refresh = True)
self.newAPList = []
tmpList = []
newListIndex = None
currentListEntry = None
currentListIndex = None
for ap in self.oldlist.keys():
data = self.oldlist[ap]['data']
if data is not None:
tmpList.append(data)
if len(tmpList):
for entry in tmpList:
self.newAPList.append(self.buildEntryComponent( entry[0], entry[1], entry[2], entry[3], entry[4], entry[5] ))
currentListEntry = self["list"].getCurrent()
if currentListEntry is not None:
idx = 0
for entry in self.newAPList:
if entry[0] == currentListEntry[0]:
newListIndex = idx
idx +=1
self['list'].setList(self.newAPList)
if newListIndex is not None:
self["list"].setIndex(newListIndex)
self["list"].updateList(self.newAPList)
self.listLength = len(self.newAPList)
self.buildWlanList()
self.setInfo()
def getAccessPoints(self, refresh = False):
self.APList = []
self.cleanList = []
aps = iWlan.getNetworkList()
if aps is not None:
print "[WirelessLan.py] got Accespoints!"
tmpList = []
compList = []
for ap in aps:
a = aps[ap]
if a['active']:
tmpList.append( (a['essid'], a['bssid']) )
compList.append( (a['essid'], a['bssid'], a['encrypted'], a['iface'], a['maxrate'], a['signal']) )
for entry in tmpList:
if entry[0] == "":
for compentry in compList:
if compentry[1] == entry[1]:
compList.remove(compentry)
for entry in compList:
self.cleanList.append( ( entry[0], entry[1], entry[2], entry[3], entry[4], entry[5] ) )
if not self.oldlist.has_key(entry[0]):
self.oldlist[entry[0]] = { 'data': entry }
else:
self.oldlist[entry[0]]['data'] = entry
for entry in self.cleanList:
self.APList.append(self.buildEntryComponent( entry[0], entry[1], entry[2], entry[3], entry[4], entry[5] ))
if refresh is False:
self['list'].setList(self.APList)
self.listLength = len(self.APList)
self.setInfo()
self.rescanTimer.start(5000)
return self.cleanList
def setInfo(self):
length = self.getLength()
if length == 0:
self["info"].setText(_("No wireless networks found! Searching..."))
else:
self["info"].setText(ngettext("%d wireless network found!", "%d wireless networks found!", length) % length)
def buildWlanList(self):
self.WlanList = []
for entry in self['list'].list:
self.WlanList.append( (entry[0], entry[0]) )
def getLength(self):
return self.listLength
def getWlanList(self):
if self.WlanList is None:
self.buildWlanList()
return self.WlanList
def WlanStatusScreenMain(session, iface):
session.open(WlanStatus, iface)
def callFunction(iface):
iWlan.setInterface(iface)
i = iWlan.getWirelessInterfaces()
if iface in i or iNetwork.isWirelessInterface(iface):
return WlanStatusScreenMain
return None
def configStrings(iface):
driver = iNetwork.detectWlanModule(iface)
ret = ""
if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value:
ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape(config.plugins.wlan.essid.value) + "\" || true\n"
ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -B -dd -D" + driver + " || true\n"
ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n"
return ret
def Plugins(**kwargs):
return PluginDescriptor(name=_("Wireless LAN"), description=_("Connect to a wireless network"), where = PluginDescriptor.WHERE_NETWORKSETUP, needsRestart = False, fnc={"ifaceSupported": callFunction, "configStrings": configStrings, "WlanPluginEntry": lambda x: _("Wireless network configuration...")})
| gpl-2.0 |
slohse/ansible | lib/ansible/modules/system/mount.py | 13 | 21486 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, inc
# Written by Seth Vidal
# based on the mount modules from salt and puppet
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- This module controls active and configured mount points in C(/etc/fstab).
author:
- Ansible Core Team
- Seth Vidal
version_added: "0.6"
options:
path:
description:
- Path to the mount point (e.g. C(/mnt/files)).
- Before 2.3 this option was only usable as I(dest), I(destfile) and
I(name).
required: true
aliases: [ name ]
src:
description:
- Device to be mounted on I(path). Required when I(state) set to
C(present) or C(mounted).
fstype:
description:
- Filesystem type. Required when I(state) is C(present) or C(mounted).
opts:
description:
- Mount options (see fstab(5), or vfstab(4) on Solaris).
dump:
description:
- Dump (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Has no effect on Solaris systems.
default: 0
passno:
description:
- Passno (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Deprecated on Solaris systems.
default: 0
state:
description:
- If C(mounted), the device will be actively mounted and appropriately
configured in I(fstab). If the mount point is not present, the mount
point will be created.
- If C(unmounted), the device will be unmounted without changing I(fstab).
- C(present) only specifies that the device is to be configured in
I(fstab) and does not trigger or require a mount.
- C(absent) specifies that the device mount's entry will be removed from
I(fstab) and will also unmount the device and remove the mount
point.
required: true
choices: [ absent, mounted, present, unmounted ]
fstab:
description:
- File to use instead of C(/etc/fstab). You shouldn't use this option
unless you really know what you are doing. This might be useful if
you need to configure mountpoints in a chroot environment. OpenBSD
does not allow specifying alternate fstab files with mount so do not
use this on OpenBSD with any state that operates on the live
filesystem.
default: /etc/fstab (/etc/vfstab on Solaris)
boot:
description:
- Determines if the filesystem should be mounted on boot.
- Only applies to Solaris systems.
type: bool
default: 'yes'
version_added: '2.2'
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
type: bool
default: "no"
version_added: '2.5'
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as
default, but I(name) still works as well.
'''
EXAMPLES = '''
# Before 2.3, option 'name' was used instead of 'path'
- name: Mount DVD read-only
mount:
path: /mnt/dvd
src: /dev/sr0
fstype: iso9660
opts: ro,noauto
state: present
- name: Mount up device by label
mount:
path: /srv/disk
src: LABEL=SOME_LABEL
fstype: ext4
state: present
- name: Mount up device by UUID
mount:
path: /home
src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077
fstype: xfs
opts: noatime
state: present
- name: Unmount a mounted volume
mount:
path: /tmp/mnt-pnt
state: unmounted
- name: Mount and bind a volume
mount:
path: /system/new_volume/boot
src: /boot
opts: bind
state: mounted
fstype: none
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.ismount import ismount
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def write_fstab(module, lines, path):
if module.params['backup']:
module.backup_local(path)
fs_w = open(path, 'w')
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
def _escape_fstab(v):
"""Escape invalid characters in fstab fields.
space (040)
ampersand (046)
backslash (134)
"""
if isinstance(v, int):
return v
else:
return(
v.
replace('\\', '\\134').
replace(' ', '\\040').
replace('&', '\\046'))
def set_mount(module, args):
"""Set/change a mount point location in fstab."""
to_write = []
exists = False
changed = False
escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
if get_platform() == 'SunOS':
new_line = (
'%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n')
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
# Check if we found the correct line
if (
ld['name'] != escaped_args['name'] or (
# In the case of swap, check the src instead
'src' in args and
ld['name'] == 'none' and
ld['fstype'] == 'swap' and
ld['src'] != args['src'])):
to_write.append(line)
continue
# If we got here we found a match - let's check if there is any
# difference
exists = True
args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno')
if get_platform() == 'SunOS':
args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts')
for t in args_to_check:
if ld[t] != escaped_args[t]:
ld[t] = escaped_args[t]
changed = True
if changed:
to_write.append(new_line % ld)
else:
to_write.append(line)
if not exists:
to_write.append(new_line % escaped_args)
changed = True
if changed and not module.check_mode:
write_fstab(module, to_write, args['fstab'])
return (args['name'], changed)
def unset_mount(module, args):
"""Remove a mount point from fstab."""
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
if (
ld['name'] != escaped_name or (
# In the case of swap, check the src instead
'src' in args and
ld['name'] == 'none' and
ld['fstype'] == 'swap' and
ld['src'] != args['src'])):
to_write.append(line)
continue
# If we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
write_fstab(module, to_write, args['fstab'])
return (args['name'], changed)
def _set_fstab_args(fstab_file):
result = []
if (
fstab_file and
fstab_file != '/etc/fstab' and
get_platform().lower() != 'sunos'):
if get_platform().lower().endswith('bsd'):
result.append('-F')
else:
result.append('-T')
result.append(fstab_file)
return result
def mount(module, args):
"""Mount up a path or remount if needed."""
mount_bin = module.get_bin_path('mount', required=True)
name = args['name']
cmd = [mount_bin]
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [name]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out + err
def umount(module, path):
"""Unmount a path."""
umount_bin = module.get_bin_path('umount', required=True)
cmd = [umount_bin, path]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out + err
def remount(module, args):
"""Try to use 'remount' first and fallback to (u)mount if unsupported."""
mount_bin = module.get_bin_path('mount', required=True)
cmd = [mount_bin]
# Multiplatform remount opts
if get_platform().lower().endswith('bsd'):
cmd += ['-u']
else:
cmd += ['-o', 'remount']
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [args['name']]
out = err = ''
try:
if get_platform().lower().endswith('bsd'):
# Note: Forcing BSDs to do umount/mount due to BSD remount not
# working as expected (suspect bug in the BSD mount command)
# Interested contributor could rework this to use mount options on
# the CLI instead of relying on fstab
# https://github.com/ansible/ansible-modules-core/issues/5591
rc = 1
else:
rc, out, err = module.run_command(cmd)
except:
rc = 1
msg = ''
if rc != 0:
msg = out + err
rc, msg = umount(module, args['name'])
if rc == 0:
rc, msg = mount(module, args)
return rc, msg
# Note if we wanted to put this into module_utils we'd have to get permission
# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923
# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439
# and @abadger to relicense from GPLv3+
def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None):
"""Return whether the dest is bind mounted
:arg module: The AnsibleModule (used for helper functions)
:arg dest: The directory to be mounted under. This is the primary means
of identifying whether the destination is mounted.
:kwarg src: The source directory. If specified, this is used to help
ensure that we are detecting that the correct source is mounted there.
:kwarg fstype: The filesystem type. If specified this is also used to
help ensure that we are detecting the right mount.
:kwarg linux_mounts: Cached list of mounts for Linux.
:returns: True if the dest is mounted with src otherwise False.
"""
is_mounted = False
if get_platform() == 'Linux' and linux_mounts is not None:
if src is None:
# That's for unmounted/absent
if dest in linux_mounts:
is_mounted = True
else:
if dest in linux_mounts:
is_mounted = linux_mounts[dest]['src'] == src
else:
bin_path = module.get_bin_path('mount', required=True)
cmd = '%s -l' % bin_path
rc, out, err = module.run_command(cmd)
mounts = []
if len(out):
mounts = to_native(out).strip().split('\n')
for mnt in mounts:
arguments = mnt.split()
if (
(arguments[0] == src or src is None) and
arguments[2] == dest and
(arguments[4] == fstype or fstype is None)):
is_mounted = True
if is_mounted:
break
return is_mounted
def get_linux_mounts(module, mntinfo_file="/proc/self/mountinfo"):
"""Gather mount information"""
try:
f = open(mntinfo_file)
except IOError:
return
lines = map(str.strip, f.readlines())
try:
f.close()
except IOError:
module.fail_json(msg="Cannot close file %s" % mntinfo_file)
mntinfo = {}
for line in lines:
fields = line.split()
record = {
'id': int(fields[0]),
'parent_id': int(fields[1]),
'root': fields[3],
'dst': fields[4],
'opts': fields[5],
'fs': fields[-3],
'src': fields[-2]
}
mntinfo[record['id']] = record
mounts = {}
for mnt in mntinfo.values():
if mnt['parent_id'] != 1 and mnt['parent_id'] in mntinfo:
m = mntinfo[mnt['parent_id']]
if (
len(m['root']) > 1 and
mnt['root'].startswith("%s/" % m['root'])):
# Ommit the parent's root in the child's root
# == Example:
# 140 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw
# 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
# == Expected result:
# src=/tmp/aaa
mnt['root'] = mnt['root'][len(m['root']):]
# Prepend the parent's dst to the child's root
# == Example:
# 42 60 0:35 / /tmp rw - tmpfs tmpfs rw
# 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw
# == Expected result:
# src=/tmp/aaa
if m['dst'] != '/':
mnt['root'] = "%s%s" % (m['dst'], mnt['root'])
src = mnt['root']
else:
src = mnt['src']
record = {
'dst': mnt['dst'],
'src': src,
'opts': mnt['opts'],
'fs': mnt['fs']
}
mounts[mnt['dst']] = record
return mounts
def main():
module = AnsibleModule(
argument_spec=dict(
boot=dict(type='bool', default=True),
dump=dict(type='str'),
fstab=dict(type='str'),
fstype=dict(type='str'),
path=dict(type='path', required=True, aliases=['name']),
opts=dict(type='str'),
passno=dict(type='str'),
src=dict(type='path'),
backup=dict(default=False, type='bool'),
state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted']),
),
supports_check_mode=True,
required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']],
),
)
# solaris args:
# name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
# linux args:
# name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
# Note: Do not modify module.params['fstab'] as we need to know if the user
# explicitly specified it in mount() and remount()
if get_platform().lower() == 'sunos':
args = dict(
name=module.params['path'],
opts='-',
passno='-',
fstab=module.params['fstab'],
boot='yes'
)
if args['fstab'] is None:
args['fstab'] = '/etc/vfstab'
else:
args = dict(
name=module.params['path'],
opts='defaults',
dump='0',
passno='0',
fstab=module.params['fstab']
)
if args['fstab'] is None:
args['fstab'] = '/etc/fstab'
# FreeBSD doesn't have any 'default' so set 'rw' instead
if get_platform() == 'FreeBSD':
args['opts'] = 'rw'
linux_mounts = []
# Cache all mounts here in order we have consistent results if we need to
# call is_bind_mounted() multiple times
if get_platform() == 'Linux':
linux_mounts = get_linux_mounts(module)
if linux_mounts is None:
args['warnings'] = (
'Cannot open file /proc/self/mountinfo. '
'Bind mounts might be misinterpreted.')
# Override defaults with user specified params
for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
if module.params[key] is not None:
args[key] = module.params[key]
# If fstab file does not exist, we first need to create it. This mainly
# happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
open(args['fstab'], 'a').close()
# absent:
# Remove from fstab and unmounted.
# unmounted:
# Do not change fstab state, but unmount.
# present:
# Add to fstab, do not change mount state.
# mounted:
# Add to fstab if not there and make sure it is mounted. If it has
# changed in fstab then remount it.
state = module.params['state']
name = module.params['path']
changed = False
if state == 'absent':
name, changed = unset_mount(module, args)
if changed and not module.check_mode:
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
os.rmdir(name)
except (OSError, IOError) as e:
module.fail_json(msg="Error rmdir %s: %s" % (name, to_native(e)))
elif state == 'unmounted':
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
if not module.check_mode:
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
changed = True
elif state == 'mounted':
if not os.path.exists(name) and not module.check_mode:
try:
os.makedirs(name)
except (OSError, IOError) as e:
module.fail_json(
msg="Error making dir %s: %s" % (name, to_native(e)))
name, changed = set_mount(module, args)
res = 0
if (
ismount(name) or
is_bind_mounted(
module, linux_mounts, name, args['src'], args['fstype'])):
if changed and not module.check_mode:
res, msg = remount(module, args)
changed = True
else:
changed = True
if not module.check_mode:
res, msg = mount(module, args)
if res:
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
elif state == 'present':
name, changed = set_mount(module, args)
else:
module.fail_json(msg='Unexpected position reached')
module.exit_json(changed=changed, **args)
if __name__ == '__main__':
main()
| gpl-3.0 |
jeffersonfparil/GTWAS_POOL_RADseq_SIM | simulateTRANS1.py | 2 | 1274 | #!/usr/bin/env python
#generate the numeric transcript data
import os, subprocess, sys, math
import numpy as np
from Bio import SeqIO
from Bio import Seq
from Bio.SeqRecord import SeqRecord
workDIR = sys.argv[1]
transcript = sys.argv[2]
nIND = int(sys.argv[3])
name = sys.argv[4]
#maybe there are better ways to define these:
readLength_MIN = 75
readLength_MAX = 200
readStart_ZIG = 0.10
os.chdir(workDIR)
#(1) reading the base or reference transcriptome:
with open(transcript) as handler:
rna_sequences = list(SeqIO.FastaIO.SimpleFastaParser(handler))
nTRANS = len(SeqIO.index(transcript,'fasta'))
#(2) simulating trascript absolute abundance: (do i need to connect these transcript with them QTL at this stage???!!!! NOPE! The phenotype model we'll use here is simply additive! At first at least ;-P)
absolute_BASE = np.ceil(np.random.beta(5, 5, size=nTRANS)*100).astype('int')
absolute_GENO = np.ceil(np.random.chisquare(0.01, size=(nIND, nTRANS))*100).astype('int')
absolute_GENO = np.nan_to_num(absolute_GENO)
absolute_ABUNDANCE = absolute_BASE + absolute_GENO
np.savetxt("Simulated_Lolium_perenne_TRANSCRIPT_BASE.data", absolute_BASE, fmt="%i", delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_TRANSCRIPT_GENO.data", absolute_GENO, fmt="%i", delimiter="\t") | gpl-3.0 |
lxn2/mxnet | example/reinforcement-learning/ddpg/strategies.py | 15 | 1705 | import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
ostree/plaso | plaso/parsers/firefox_cache.py | 3 | 15620 | # -*- coding: utf-8 -*-
"""Implements a parser for Firefox cache 1 and 2 files."""
import collections
import logging
import os
import construct
import pyparsing
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = 'Petter Bjelland ([email protected])'
class FirefoxCacheEvent(time_events.PosixTimeEvent):
"""Convenience class for a Firefox cache record event."""
DATA_TYPE = u'firefox:cache:record'
def __init__(self, timestamp, timestamp_type, cache_record_values):
"""Initializes the event object.
Args:
timestamp: The POSIX timestamp value.
timestamp_description: A description string for the timestamp value.
cache_record_values: A dictionary object containing the cache record
values.
"""
super(FirefoxCacheEvent, self).__init__(timestamp, timestamp_type)
for key, value in iter(cache_record_values.items()):
setattr(self, key, value)
class BaseFirefoxCacheParser(interface.SingleFileBaseParser):
"""Parses Firefox cache files."""
# pylint: disable=abstract-method
DESCRIPTION = u'Parser for Firefox Cache files.'
_MAXIMUM_URL_LENGTH = 65536
_REQUEST_METHODS = frozenset([
u'CONNECT', u'DELETE', u'GET', u'HEAD', u'OPTIONS', u'PATCH', u'POST',
u'PUT', u'TRACE'])
def _ParseHTTPHeaders(self, header_data, offset, display_name):
"""Extract relevant information from HTTP header.
Args:
header_data: binary string containing the HTTP header data.
offset: the offset of the cache record.
display_name: the display name.
"""
try:
http_header_start = header_data.index(b'request-method')
except ValueError:
logging.debug(u'No request method in header: "{0:s}"'.format(header_data))
return None, None
# HTTP request and response headers.
http_headers = header_data[http_header_start::]
header_parts = http_headers.split(b'\x00')
# TODO: check len(header_parts).
request_method = header_parts[1]
if request_method not in self._REQUEST_METHODS:
safe_headers = header_data.decode(u'ascii', errors=u'replace')
logging.debug((
u'[{0:s}] {1:s}:{2:d}: Unknown HTTP method \'{3:s}\'. Response '
u'headers: \'{4:s}\'').format(
self.NAME, display_name, offset, request_method, safe_headers))
try:
response_head_start = http_headers.index(b'response-head')
except ValueError:
logging.debug(u'No response head in header: "{0:s}"'.format(header_data))
return request_method, None
# HTTP response headers.
response_head = http_headers[response_head_start::]
response_head_parts = response_head.split(b'\x00')
# Response code, followed by other response header key-value pairs,
# separated by newline.
# TODO: check len(response_head_parts).
response_head_text = response_head_parts[1]
response_head_text_parts = response_head_text.split(b'\r\n')
# The first line contains response code.
# TODO: check len(response_head_text_parts).
response_code = response_head_text_parts[0]
if not response_code.startswith(b'HTTP'):
safe_headers = header_data.decode(u'ascii', errors=u'replace')
logging.debug((
u'[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. '
u'Response headers: \'{3:s}\'.').format(
self.NAME, display_name, offset, safe_headers))
return request_method, response_code
def _ValidateCacheRecordHeader(self, cache_record_header):
"""Determines whether the cache record header is valid.
Args:
cache_record_header: the cache record header (instance of
construct.Struct).
Returns:
A boolean value indicating the cache record header is valid.
"""
return (
cache_record_header.request_size > 0 and
cache_record_header.request_size < self._MAXIMUM_URL_LENGTH and
cache_record_header.major == 1 and
cache_record_header.last_fetched > 0 and
cache_record_header.fetch_count > 0)
class FirefoxCacheParser(BaseFirefoxCacheParser):
"""Parses Firefox 32 or later cache files."""
NAME = u'firefox_cache'
CACHE_VERSION = 2
# Cache 2 filenames are SHA-1 hex digests.
# TODO: change into regexp.
_CACHE_FILENAME = pyparsing.Word(pyparsing.hexnums, exact=40)
# The last four bytes of a file gives the size of the cached content.
_LENGTH = construct.UBInt32(u'length')
_CACHE_RECORD_HEADER_STRUCT = construct.Struct(
u'record_header',
construct.UBInt32(u'major'),
construct.UBInt32(u'fetch_count'),
construct.UBInt32(u'last_fetched'),
construct.UBInt32(u'last_modified'),
construct.UBInt32(u'frequency'),
construct.UBInt32(u'expire_time'),
construct.UBInt32(u'request_size'))
_CHUNK_SIZE = 512 * 1024
def _GetStartOfMetadata(self, file_object):
"""Determine the byte offset of the cache record metadata in cache file.
This method is inspired by the work of James Habben:
https://github.com/JamesHabben/FirefoxCache2
Args:
file_object: The file containing the cache record.
"""
file_object.seek(-4, os.SEEK_END)
try:
length = self._LENGTH.parse_stream(file_object)
except (IOError, construct.FieldError):
raise IOError(u'Could not find metadata offset in Firefox cache file.')
# Firefox splits the content into chunks.
hash_chunks, remainder = divmod(length, self._CHUNK_SIZE)
if remainder != 0:
hash_chunks += 1
# Each chunk in the cached record is padded with two bytes.
return length + (hash_chunks * 2)
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Firefox cache file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
# TODO: determine if the minimum file size is really 4 bytes.
if file_object.get_size() < 4:
raise errors.UnableToParseFile(u'Not a Firefox cache2 file.')
file_entry = parser_mediator.GetFileEntry()
try:
# Match cache2 filename (SHA-1 hex of cache record key).
self._CACHE_FILENAME.parseString(file_entry.name)
except pyparsing.ParseException:
raise errors.UnableToParseFile(u'Not a Firefox cache2 file.')
if file_object.get_size() == 0:
raise errors.UnableToParseFile(u'Empty file.')
meta_start = self._GetStartOfMetadata(file_object)
file_object.seek(meta_start, os.SEEK_SET)
# Skip the first 4 bytes of metadata which contains a hash value of
# the cached content.
file_object.seek(4, os.SEEK_CUR)
try:
cache_record_header = self._CACHE_RECORD_HEADER_STRUCT.parse_stream(
file_object)
except (IOError, construct.FieldError):
raise errors.UnableToParseFile(u'Not a Firefox cache2 file.')
if not self._ValidateCacheRecordHeader(cache_record_header):
raise errors.UnableToParseFile(u'Not a valid Firefox cache2 record.')
url = file_object.read(cache_record_header.request_size)
header_data = file_object.read()
display_name = parser_mediator.GetDisplayName()
request_method, response_code = self._ParseHTTPHeaders(
header_data, meta_start, display_name)
cache_record_values = {
u'fetch_count': cache_record_header.fetch_count,
u'frequency': cache_record_header.frequency,
u'major': cache_record_header.major,
u'request_method': request_method,
u'request_size': cache_record_header.request_size,
u'response_code': response_code,
u'version': self.CACHE_VERSION,
u'url': url}
event_object = FirefoxCacheEvent(
cache_record_header.last_fetched,
eventdata.EventTimestamp.LAST_VISITED_TIME, cache_record_values)
parser_mediator.ProduceEvent(event_object)
if cache_record_header.last_modified:
event_object = FirefoxCacheEvent(
cache_record_header.last_modified,
eventdata.EventTimestamp.WRITTEN_TIME, cache_record_values)
parser_mediator.ProduceEvent(event_object)
if cache_record_header.expire_time:
event_object = FirefoxCacheEvent(
cache_record_header.expire_time,
eventdata.EventTimestamp.EXPIRATION_TIME, cache_record_values)
parser_mediator.ProduceEvent(event_object)
class FirefoxOldCacheParser(BaseFirefoxCacheParser):
"""Parses Firefox 31 or earlier cache files."""
NAME = u'firefox_old_cache'
CACHE_VERSION = 1
# Initial size of Firefox 4 and later cache files.
_INITIAL_CACHE_FILE_SIZE = 4 * 1024 * 1024
# Smallest possible block size in Firefox cache files.
_MINUMUM_BLOCK_SIZE = 256
_CACHE_RECORD_HEADER_STRUCT = construct.Struct(
u'record_header',
construct.UBInt16(u'major'),
construct.UBInt16(u'minor'),
construct.UBInt32(u'location'),
construct.UBInt32(u'fetch_count'),
construct.UBInt32(u'last_fetched'),
construct.UBInt32(u'last_modified'),
construct.UBInt32(u'expire_time'),
construct.UBInt32(u'data_size'),
construct.UBInt32(u'request_size'),
construct.UBInt32(u'info_size'))
_CACHE_RECORD_HEADER_SIZE = _CACHE_RECORD_HEADER_STRUCT.sizeof()
# TODO: change into regexp.
_CACHE_FILENAME = (
pyparsing.Word(pyparsing.hexnums, exact=5) +
pyparsing.Word(u'm', exact=1) +
pyparsing.Word(pyparsing.nums, exact=2))
FIREFOX_CACHE_CONFIG = collections.namedtuple(
u'firefox_cache_config',
u'block_size first_record_offset')
def _GetFirefoxConfig(self, file_object, display_name):
"""Determine cache file block size.
Args:
file_object: A file-like object.
display_name: the display name.
Raises:
UnableToParseFile: if no valid cache record could be found.
"""
# There ought to be a valid record within the first 4 MiB. We use this
# limit to prevent reading large invalid files.
to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE)
while file_object.get_offset() < to_read:
offset = file_object.get_offset()
try:
# We have not yet determined the block size, so we use the smallest
# possible size.
fetched, _, _ = self._NextRecord(
file_object, display_name, self._MINUMUM_BLOCK_SIZE)
record_size = (
self._CACHE_RECORD_HEADER_SIZE + fetched.request_size +
fetched.info_size)
if record_size >= 4096:
# _CACHE_003_
block_size = 4096
elif record_size >= 1024:
# _CACHE_002_
block_size = 1024
else:
# _CACHE_001_
block_size = 256
return self.FIREFOX_CACHE_CONFIG(block_size, offset)
except IOError:
logging.debug(u'[{0:s}] {1:s}:{2:d}: Invalid record.'.format(
self.NAME, display_name, offset))
raise errors.UnableToParseFile(
u'Could not find a valid cache record. Not a Firefox cache file.')
def _NextRecord(self, file_object, display_name, block_size):
"""Provide the next cache record.
Args:
file_object: A file-like object.
display_name: the display name.
block_size: the block size.
Returns:
A tuple containing the fetched, modified and expire event objects
(instances of EventObject) or None.
"""
offset = file_object.get_offset()
try:
cache_record_header = self._CACHE_RECORD_HEADER_STRUCT.parse_stream(
file_object)
except (IOError, construct.FieldError):
raise IOError(u'Unable to parse stream.')
if not self._ValidateCacheRecordHeader(cache_record_header):
# Move reader to next candidate block.
file_offset = block_size - self._CACHE_RECORD_HEADER_SIZE
file_object.seek(file_offset, os.SEEK_CUR)
raise IOError(u'Not a valid Firefox cache record.')
# The last byte in a request is null.
url = file_object.read(cache_record_header.request_size)[:-1]
# HTTP response header, even elements are keys, odd elements values.
header_data = file_object.read(cache_record_header.info_size)
request_method, response_code = self._ParseHTTPHeaders(
header_data, offset, display_name)
# A request can span multiple blocks, so we use modulo.
file_offset = file_object.get_offset() - offset
_, remainder = divmod(file_offset, block_size)
# Move reader to next candidate block. Include the null-byte skipped above.
file_object.seek(block_size - remainder, os.SEEK_CUR)
cache_record_values = {
u'data_size': cache_record_header.data_size,
u'fetch_count': cache_record_header.fetch_count,
u'info_size': cache_record_header.info_size,
u'location': cache_record_header.location,
u'major': cache_record_header.major,
u'minor': cache_record_header.minor,
u'request_method': request_method,
u'request_size': cache_record_header.request_size,
u'response_code': response_code,
u'version': self.CACHE_VERSION,
u'url': url}
fetched = FirefoxCacheEvent(
cache_record_header.last_fetched,
eventdata.EventTimestamp.LAST_VISITED_TIME, cache_record_values)
if cache_record_header.last_modified:
modified = FirefoxCacheEvent(
cache_record_header.last_modified,
eventdata.EventTimestamp.WRITTEN_TIME, cache_record_values)
else:
modified = None
if cache_record_header.expire_time:
expire = FirefoxCacheEvent(
cache_record_header.expire_time,
eventdata.EventTimestamp.EXPIRATION_TIME, cache_record_values)
else:
expire = None
return fetched, modified, expire
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Firefox cache file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_entry = parser_mediator.GetFileEntry()
display_name = parser_mediator.GetDisplayName()
try:
# Match cache filename. Five hex characters + 'm' + two digit
# number, e.g. '01ABCm02'. 'm' is for metadata. Cache files with 'd'
# instead contain data only.
self._CACHE_FILENAME.parseString(file_entry.name)
except pyparsing.ParseException:
if not file_entry.name.startswith(u'_CACHE_00'):
raise errors.UnableToParseFile(u'Not a Firefox cache1 file.')
firefox_config = self._GetFirefoxConfig(file_object, display_name)
file_object.seek(firefox_config.first_record_offset)
while file_object.get_offset() < file_object.get_size():
try:
fetched, modified, expire = self._NextRecord(
file_object, display_name, firefox_config.block_size)
parser_mediator.ProduceEvent(fetched)
if modified:
parser_mediator.ProduceEvent(modified)
if expire:
parser_mediator.ProduceEvent(expire)
except IOError:
file_offset = file_object.get_offset() - self._MINUMUM_BLOCK_SIZE
logging.debug((
u'[{0:s}] Invalid cache record in file: {1:s} at offset: '
u'{2:d}.').format(self.NAME, display_name, file_offset))
manager.ParsersManager.RegisterParsers([
FirefoxCacheParser, FirefoxOldCacheParser])
| apache-2.0 |
michael-pacheco/dota2-predictor | tools/metadata.py | 2 | 1505 | """ Module responsible for parsing metadata """
import json
import logging
METADATA_JSON_PATH = 'metadata.json'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _load_metadata():
""" Loads the metadata JSON
Returns:
JSON containing the metadata
"""
global METADATA_JSON_PATH
with open(METADATA_JSON_PATH, 'r') as metadata_file:
metadata_json = json.load(metadata_file)
return metadata_json
def get_last_patch():
""" Fetches the last patch info
Returns:
dictionary containing info of the last patch
"""
metadata_json = _load_metadata()
return metadata_json['patches'][0]
def get_patch(patch_name):
""" Fetches the patch info named patch_name
Args:
patch_name: patch identifier
Returns:
dictionary containing info of the wanted patch
"""
global logger
metadata_json = _load_metadata()
for entry in metadata_json['patches']:
if entry['patch_name'] == patch_name:
logger.info('Found patch %s', patch_name)
return entry
logger.error('Could not find patch %s', patch_name)
def get_hero_dict():
""" Returns a dictionary where the key is the hero ID and the value is the hero's name
Returns:
dictionary of (hero_ID, name)
"""
metadata_json = _load_metadata()
hero_dict = dict()
for entry in metadata_json['heroes']:
hero_dict[entry['id']] = entry['name']
return hero_dict
| mit |
shusenl/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
JianfengXu/crosswalk-test-suite | cordova/cordova-webapp-android-tests/webapp/comm.py | 3 | 16228 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Cici,Li<[email protected]>
# Lin, Wanming <[email protected]>
import os
import sys
import commands
import shutil
import glob
import fnmatch
import re
import json
reload(sys)
sys.setdefaultencoding("utf-8")
script_path = os.path.realpath(__file__)
const_path = os.path.dirname(script_path)
tool_path = const_path + "/../tools/"
plugin_tool = const_path + "/../tools/cordova-plugin-crosswalk-webview/"
testapp_path = "/tmp/cordova-sampleapp/"
def setUp():
global ARCH, MODE, CORDOVA_VERSION, device, CROSSWALK_VERSION
device = os.environ.get('DEVICE_ID')
if not device:
print (" get env error\n")
sys.exit(1)
f_arch = open(const_path + "/../arch.txt", 'r')
arch_tmp = f_arch.read()
if arch_tmp.strip("\n\t") == "arm":
ARCH = "arm"
elif arch_tmp.strip("\n\t") == "x86":
ARCH = "x86"
else:
print (
" get arch error, the content of arch.txt should be 'arm' or 'x86'\n")
sys.exit(1)
f_arch.close()
f_mode = open(const_path + "/../mode.txt", 'r')
mode_tmp = f_mode.read()
if mode_tmp.strip("\n\t") == "shared":
MODE = "shared"
elif mode_tmp.strip("\n\t") == "embedded":
MODE = "embedded"
else:
print (
" get mode error, the content of mode.txt should be 'shared' or 'embedded'\n")
sys.exit(1)
f_mode.close()
f_version = open(const_path + "/../cordova-version", 'r')
if f_version.read().strip("\n\t") != "3.6":
CORDOVA_VERSION = "4.x"
else:
CORDOVA_VERSION = "3.6"
f_version.close()
if CORDOVA_VERSION == "4.x":
with open(const_path + "/../VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
def create(appname, pkgname, mode, sourcecodepath, replace_index_list, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Create project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova create %s %s %s" % (appname, pkgname, appname)
else:
if mode == "shared":
cmd = "cordova/bin/create %s %s %s --xwalk-shared-library" % (
appname, pkgname, appname)
else:
cmd = "cordova/bin/create %s %s %s" % (appname, pkgname, appname)
createstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, createstatus[0])
print "\nGenerate project %s ----------------> OK\n" % appname
result = commands.getstatusoutput("ls")
self.assertIn(appname, result[1])
project_root = os.path.join(tool_path, appname)
if CORDOVA_VERSION == "4.x":
os.chdir(project_root)
if not replace_key(os.path.join(project_root, 'config.xml'),
'<widget android-activityName="%s"' % appname, '<widget'):
print "replace key '<widget' failed."
return False
if not replace_key(os.path.join(project_root, 'config.xml'),
' <allow-navigation href="*" />\n</widget>', '</widget>'):
print "replace key '</widget>' failed."
return False
print "Add android platforms to this project --------------> START"
cordova_platform_cmd = "cordova platform add android"
platformstatus = commands.getstatusoutput(cordova_platform_cmd)
self.assertEquals(0, platformstatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
plugin_install_webview = "cordova plugin add %s --variable XWALK_VERSION=\"%s\"" % (plugin_tool, CROSSWALK_VERSION)
if mode == "shared":
plugin_install_cmd = plugin_install_webview + " --variable XWALK_MODE=\"shared\""
else:
plugin_install_cmd = plugin_install_webview + " --variable XWALK_MODE=\"embedded\""
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(project_root, "www", "index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "www")))
do_copy(sourcecodepath, os.path.join(tool_path, appname, "www"))
else:
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(
project_root,
"assets",
"www",
"index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "assets", "www")))
do_copy(
sourcecodepath,
os.path.join(
tool_path,
appname,
"assets",
"www"))
def buildGoogleApp(appname, sourcecodepath, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Build project %s ----------------> START" % appname
if sourcecodepath is None:
print "sourcecodepath can't be none"
return False
if checkContains(appname, "CIRC"):
cordova_app = os.path.join(tool_path, "circ")
create_cmd = "cca create " + appname + " --link-to circ/package"
elif checkContains(appname, "EH"):
cordova_app = os.path.join(tool_path, "workshop-cca-eh")
create_cmd = "cca create " + appname + " --link-to workshop-cca-eh/workshop/step4"
if os.path.exists(cordova_app):
do_remove(glob.glob(cordova_app))
if not do_copy(sourcecodepath, cordova_app):
return False
print create_cmd
buildstatus = commands.getstatusoutput(create_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(os.path.join(tool_path, appname))
print "Add android platforms to this project --------------> START"
add_android_cmd = "cca platform add android"
addstatus = commands.getstatusoutput(add_android_cmd)
self.assertEquals(0, addstatus[0])
print "uninstall webview default plugin from this project --------------> START"
plugin_uninstall_webview = "cordova plugin remove cordova-plugin-crosswalk-webview"
uninstallStatus = commands.getstatusoutput(plugin_uninstall_webview)
self.assertEquals(0, uninstallStatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
plugin_install_webview = "cordova plugin add %s --variable XWALK_VERSION=\"%s\"" % (plugin_tool, CROSSWALK_VERSION)
if MODE == "shared":
plugin_install_cmd = plugin_install_webview + " --variable XWALK_MODE=\"shared\""
else:
plugin_install_cmd = plugin_install_webview + " --variable XWALK_MODE=\"embedded\""
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
build_cmd = "cca build android"
buildstatus = commands.getstatusoutput(build_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def build(appname, isDebug, self):
os.chdir(os.path.join(tool_path, appname))
print "Build project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova build android"
if isDebug == True:
print "build debug app"
cmd = "cordova build android --debug"
else:
cmd = "./cordova/build"
if isDebug == True:
print "build debug app"
cmd = "./cordova/build --debug"
print cmd
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, buildstatus[0])
print "\nBuild project %s ----------------> OK\n" % appname
if CORDOVA_VERSION == "4.x":
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
else:
os.chdir(os.path.join(tool_path, appname, "bin"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def run(appname, self):
os.chdir(os.path.join(tool_path, appname))
print "Run project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova run android"
else:
cmd = "./cordova/run"
print cmd
runstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, runstatus[0])
self.assertIn("LAUNCH SUCCESS", runstatus[1])
print "\nRun project %s ----------------> OK\n" % appname
def app_install(appname, pkgname, self):
print "Install APK ----------------> START"
os.chdir(testapp_path)
apk_file = commands.getstatusoutput("ls | grep %s" % appname)[1]
if apk_file == "":
print "Error: No app: %s found in directory: %s" % (appname, testapp_path)
cmd_inst = "adb -s " + device + " install -r " + apk_file
inststatus = commands.getstatusoutput(cmd_inst)
self.assertEquals(0, inststatus[0])
print "Install APK ----------------> OK"
self.assertTrue(check_app_installed(pkgname, self))
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def check_app_installed(pkgname, self):
print "Check if app is installed ----------------> START"
cmd_find = "adb -s " + device + \
" shell pm list packages |grep %s" % pkgname
pmstatus = commands.getstatusoutput(cmd_find)
if pmstatus[0] == 0:
print "App is installed."
return True
else:
print "App is uninstalled."
return False
def app_launch(appname, pkgname, self):
print "Launch APK ----------------> START"
cmd = "adb -s " + device + " shell am start -n %s/.%s" % (pkgname, appname)
launchstatus = commands.getstatusoutput(cmd)
self.assertNotIn("error", launchstatus[1].lower())
print "Launch APK ----------------> OK"
# Find whether the app have launched
def check_app_launched(pkgname, self):
cmd_acti = "adb -s " + device + " shell ps | grep %s" % pkgname
launched = commands.getstatusoutput(cmd_acti)
if launched[0] != 0:
print "App haven't launched."
return False
else:
print "App is have launched."
return True
def app_stop(pkgname, self):
print "Stop APK ----------------> START"
cmd = "adb -s " + device + " shell am force-stop %s" % pkgname
stopstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, stopstatus[0])
print "Stop APK ----------------> OK"
def app_uninstall(pkgname, self):
print "Uninstall APK ----------------> START"
cmd_uninst = "adb -s " + device + " uninstall %s" % (pkgname)
unistatus = commands.getstatusoutput(cmd_uninst)
self.assertEquals(0, unistatus[0])
print "Uninstall APK ----------------> OK"
def replace_key(file_path, content, key):
print "Replace value ----------------> START"
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
print "Fail to replace: %s with: %s in file: %s" % (content, key, file_path)
return False
print "Replace value ----------------> OK"
return True
def do_remove(target_file_list=None):
for i_file in target_file_list:
print "Removing %s" % i_file
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
print "Fail to remove file %s: %s" % (i_file, e)
return False
return True
def do_copy(src_item=None, dest_item=None):
print "Copying %s to %s" % (src_item, dest_item)
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
print "Create non-existent dir: %s" % os.path.dirname(dest_item)
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
print "Fail to copy file %s: %s" % (src_item, e)
return False
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
| bsd-3-clause |
ilo10/deeppy | deeppy/dataset/cifar10.py | 17 | 2196 | import os
import pickle
import numpy as np
from ..base import float_, int_
from .dataset import Dataset
_URLS = [
'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
]
_SHA1S = [
'874905e36347c8536514d0a26261acf3bff89bc7',
]
class CIFAR10(Dataset):
'''
The CIFAR-10 dataset [1]
http://www.cs.toronto.edu/~kriz/cifar.html
References:
[1]: Learning Multiple Layers of Features from Tiny Images, Alex
Krizhevsky, 2009.
'''
def __init__(self, data_root='datasets'):
self.name = 'cifar10'
self.n_classes = 10
self.n_test = 10000
self.n_train = 50000
self.img_shape = (3, 32, 32)
self.data_dir = os.path.join(data_root, self.name)
self._install()
self._data = self._load()
def data(self, flat=False, dp_dtypes=False):
x_train, y_train, x_test, y_test = self._data
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
if flat:
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
return x_train, y_train, x_test, y_test
def _install(self):
self._download(_URLS, _SHA1S)
self._unpack()
def _load(self):
dirpath = os.path.join(self.data_dir, 'cifar-10-batches-py')
filenames = ['data_batch_1', 'data_batch_2', 'data_batch_3',
'data_batch_4', 'data_batch_5', 'test_batch']
x = []
y = []
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as f:
dic = pickle.load(f)
x.append(dic['data'])
y.append(dic['labels'])
x_train = np.vstack(x[:5])
y_train = np.hstack(y[:5])
x_test = np.array(x[5])
y_test = np.array(y[5])
x_train = np.reshape(x_train, (self.n_train,) + self.img_shape)
x_test = np.reshape(x_test, (self.n_test,) + self.img_shape)
return x_train, y_train, x_test, y_test
| mit |
Graghav/surabi | ADMIN/venv/lib/python2.7/site-packages/eve/tests/default_values.py | 23 | 7872 | import unittest
from eve.defaults import build_defaults, resolve_default_values
class TestBuildDefaults(unittest.TestCase):
def test_schemaless_dict(self):
schema = {
"address": {
'type': 'dict'
}
}
self.assertEqual({}, build_defaults(schema))
def test_simple(self):
schema = {
"name": {'type': 'string'},
"email": {'type': 'string', 'default': "[email protected]"}
}
res = build_defaults(schema)
self.assertEqual({'email': '[email protected]'}, res)
def test_nested_one_level(self):
schema = {
"address": {
'type': 'dict',
'schema': {
'street': {'type': 'string'},
'country': {'type': 'string', 'default': 'wonderland'}
}
}
}
res = build_defaults(schema)
self.assertEqual({'address': {'country': 'wonderland'}}, res)
def test_empty_defaults_multiple_level(self):
schema = {
'subscription': {
'type': 'dict',
'schema': {
'type': {'type': 'string'},
'when': {
'type': 'dict',
'schema': {
'timestamp': {'type': 'int'},
'repr': {'type': 'string'}
}
}
}
}
}
res = build_defaults(schema)
self.assertEqual({}, res)
def test_nested_multilevel(self):
schema = {
"subscription": {
'type': 'dict',
'schema': {
'type': {'type': 'string'},
'when': {
'type': 'dict',
'schema': {
'timestamp': {'type': 'int', 'default': 0},
'repr': {'type': 'string', 'default': '0'}
}
}
}
}
}
res = build_defaults(schema)
self.assertEqual(
{'subscription': {'when': {'timestamp': 0, 'repr': '0'}}},
res)
def test_default_in_list_schema(self):
schema = {
"one": {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'title': {
'type': 'string',
'default': 'M.'
}
}
}
},
"two": {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {'type': 'string'}
}
}
}
}
res = build_defaults(schema)
self.assertEqual({"one": [{'title': 'M.'}]}, res)
def test_default_in_list_without_schema(self):
schema = {
"one": {
'type': 'list',
'schema': {
'type': 'string',
'default': 'item'
}
}
}
res = build_defaults(schema)
self.assertEqual({"one": ['item']}, res)
def test_lists_of_lists_with_default(self):
schema = {
'twisting': {
'type': 'list', # list of groups
'required': True,
'schema': {
'type': 'list', # list of signals (in one group)
'schema': {
'type': 'string',
'default': 'listoflist',
}
}
}
}
res = build_defaults(schema)
self.assertEqual({'twisting': [['listoflist']]}, res)
def test_lists_of_lists_without_default(self):
schema = {
'twisting': {
'type': 'list', # list of groups
'required': True,
'schema': {
'type': 'list', # list of signals (in one group)
'schema': {
'type': 'ObjectId',
'required': True
}
}
}
}
res = build_defaults(schema)
self.assertEqual({}, res)
def test_lists_of_lists_with_a_dict(self):
schema = {
'twisting': {
'type': 'list', # list of groups
'required': True,
'schema': {
'type': 'list', # list of signals (in one group)
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'default': 'me'
}
},
}
}
}
}
res = build_defaults(schema)
self.assertEqual({'twisting': [[{'name': 'me'}]]}, res)
class TestResolveDefaultValues(unittest.TestCase):
def test_one_level(self):
document = {'name': 'john'}
defaults = {'email': 'noemail'}
resolve_default_values(document, defaults)
self.assertEqual({'name': 'john', 'email': 'noemail'}, document)
def test_multilevel(self):
document = {'name': 'myname', 'one': {'hey': 'jude'}}
defaults = {'one': {'two': {'three': 'banana'}}}
resolve_default_values(document, defaults)
expected = {
'name': 'myname',
'one': {
'hey': 'jude',
'two': {'three': 'banana'}
}
}
self.assertEqual(expected, document)
def test_value_instead_of_dict(self):
document = {'name': 'john'}
defaults = {'name': {'first': 'john'}}
resolve_default_values(document, defaults)
self.assertEqual(document, defaults)
def test_lists(self):
document = {"one": [{"name": "john"}, {}]}
defaults = {"one": [{"title": "M."}]}
resolve_default_values(document, defaults)
expected = {"one": [
{"name": "john", "title": "M."},
{"title": "M."}]}
self.assertEqual(expected, document)
def test_list_of_list_single_value(self):
document = {'one': [[], []]}
defaults = {'one': [['listoflist']]}
resolve_default_values(document, defaults)
# This functionality is not supported, no change in the document
expected = {'one': [[], []]}
assert expected == document
def test_list_empty_list_as_default(self):
# test that a default value of [] for a list does not causes IndexError
# (#417).
document = {'a': ['b']}
defaults = {'a': []}
resolve_default_values(document, defaults)
expected = {'a': ['b']}
assert expected == document
def test_list_of_strings_as_default(self):
document = {}
defaults = {'a': ['b']}
resolve_default_values(document, defaults)
expected = {'a': ['b']}
assert expected == document
# overwrite defaults
document = {'a': ['c', 'd']}
defaults = {'a': ['b']}
resolve_default_values(document, defaults)
expected = {'a': ['c', 'd']}
assert expected == document
def test_list_of_list_dict_value(self):
document = {'one': [[{}], [{}]]}
defaults = {'one': [[{'name': 'banana'}]]}
resolve_default_values(document, defaults)
expected = {'one': [[{'name': 'banana'}], [{'name': 'banana'}]]}
assert expected == document
| apache-2.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/distutils/file_util.py | 171 | 7795 | """distutils.file_util
Utility functions for operating on single files.
"""
__revision__ = "$Id$"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = {None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking'}
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'.
Both must be filenames. Any error opening either file, reading from
'src', or writing to 'dst', raises DistutilsFileError. Data is
read/written in chunks of 'buffer_size' bytes (default 16k). No attempt
is made to handle anything apart from regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError("could not open '%s': %s" % (src, errstr))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, errstr))
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not create '%s': %s" % (dst, errstr))
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not read from '%s': %s" % (src, errstr))
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, errstr))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'.
If 'dst' is a directory, then 'src' is copied there with the same name;
otherwise, it must be a filename. (If the file exists, it will be
ruthlessly clobbered.) If 'preserve_mode' is true (the default),
the file's mode (type and permission bits, or whatever is analogous on
the current platform) is copied. If 'preserve_times' is true (the
default), the last-modified and last-access times are copied as well.
If 'update' is true, 'src' will only be copied if 'dst' does not exist,
or if 'dst' does exist but is older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
if link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <[email protected]>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst, verbose=1, dry_run=0):
"""Move a file 'src' to 'dst'.
If 'dst' is a directory, the file will be moved into it with the same
name; otherwise, 'src' is just renamed to 'dst'. Return the new
full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" % \
(src, dst))
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") %
(src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
| mit |
ghedsouza/django | django/db/backends/sqlite3/schema.py | 18 | 13024 | import codecs
import contextlib
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
with self.connection.cursor() as c:
# Some SQLite schema alterations need foreign key constraints to be
# disabled. This is the default in SQLite but can be changed with a
# build flag and might change in future, so can't be relied upon.
# Enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
with self.connection.cursor() as c:
# Restore initial FK setting - PRAGMA values can't be parametrized
c.execute('PRAGMA foreign_keys = %s' % int(self._initial_pragma_fk))
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float, int)):
return str(value)
elif isinstance(value, str):
return "'%s'" % value.replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None):
"""
Shortcut to transform a model from old_model into new_model
The essential steps are:
1. rename the model's existing table, e.g. "app_model" to "app_model__old"
2. create a table with the updated definition called "app_model"
3. copy the data from the old renamed table to the new table
4. delete the "app_model__old" table
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if getattr(create_field, 'primary_key', False) or (
alter_field and getattr(alter_field[1], 'primary_key', False)):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
if create_field:
body[create_field.name] = create_field
# Choose a default and insert it into the copy map
if not create_field.many_to_many and create_field.concrete:
mapping[create_field.column] = self.quote_value(
self.effective_default(create_field)
)
# Add in any altered fields
if alter_field:
old_field, new_field = alter_field
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
if delete_field:
del body[delete_field.name]
del mapping[delete_field.column]
# Remove any implicit M2M tables
if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created:
return self.delete_model(delete_field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
unique_together = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
index_together = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
indexes = model._meta.indexes
if delete_field:
indexes = [
index for index in indexes
if delete_field.name not in index.fields
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': unique_together,
'index_together': index_together,
'indexes': indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# We need to modify model._meta.db_table, but everything explodes
# if the change isn't reversed before the end of this method. This
# context manager helps us avoid that situation.
@contextlib.contextmanager
def altered_table_name(model, temporary_table_name):
original_table_name = model._meta.db_table
model._meta.db_table = temporary_table_name
yield
model._meta.db_table = original_table_name
with altered_table_name(model, model._meta.db_table + "__old"):
# Rename the old table to make way for the new
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Create a new table with the updated schema. We remove things
# from the deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if temp_model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table into the new table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super().delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_field=field)
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_field=field)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_field=(old_field, new_field))
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_field=(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
| bsd-3-clause |
xushuwei202/Vintageous | ex/ex_location.py | 9 | 3136 | import sublime
def get_line_nr(view, point):
"""Return 1-based line number for `point`.
"""
return view.rowcol(point)[0] + 1
# TODO: Move this to sublime_lib; make it accept a point or a region.
def find_eol(view, point):
return view.line(point).end()
# TODO: Move this to sublime_lib; make it accept a point or a region.
def find_bol(view, point):
return view.line(point).begin()
# TODO: make this return None for failures.
def find_line(view, start=0, end=-1, target=0):
"""Do binary search to find :target: line number.
Return: If `target` is found, `Region` comprising entire line no. `target`.
If `target`is not found, `-1`.
"""
# Don't bother if sought line is beyond buffer boundaries.
if target < 0 or target > view.rowcol(view.size())[0] + 1:
return -1
if end == -1:
end = view.size()
lo, hi = start, end
while lo <= hi:
middle = lo + (hi - lo) / 2
if get_line_nr(view, middle) < target:
lo = find_eol(view, middle) + 1
elif get_line_nr(view, middle) > target:
hi = find_bol(view, middle) - 1
else:
return view.full_line(middle)
return -1
def search_in_range(view, what, start, end, flags=0):
match = view.find(what, start, flags)
if match and ((match.begin() >= start) and (match.end() <= end)):
return True
def find_last_match(view, what, start, end, flags=0):
"""Find last occurrence of `what` between `start`, `end`.
"""
match = view.find(what, start, flags)
new_match = None
while match:
new_match = view.find(what, match.end(), flags)
if new_match and new_match.end() <= end:
match = new_match
else:
return match
def reverse_search(view, what, start=0, end=-1, flags=0):
"""Do binary search to find `what` walking backwards in the buffer.
"""
if end == -1:
end = view.size()
end = find_eol(view, view.line(end).a)
last_match = None
lo, hi = start, end
while True:
middle = (lo + hi) / 2
line = view.line(middle)
middle, eol = find_bol(view, line.a), find_eol(view, line.a)
if search_in_range(view, what, middle, hi, flags):
lo = middle
elif search_in_range(view, what, lo, middle - 1, flags):
hi = middle -1
# Don't search forever the same line.
if last_match and line.contains(last_match):
match = find_last_match(view, what, lo, hi, flags=flags)
return view.rowcol(match.begin())[0] + 1
last_match = sublime.Region(line.begin(), line.end())
def search(view, what, start_line=None, flags=0):
# TODO: don't make start_line default to the first sel's begin(). It's
# confusing. ???
if start_line:
start = view.text_point(start_line, 0)
else:
start = view.sel()[0].begin()
reg = view.find(what, start, flags)
if not reg is None:
row = (view.rowcol(reg.begin())[0] + 1)
else:
row = calculate_relative_ref(view, '.', start_line=start_line)
return row
| mit |
onecrayon/PopClip-Extensions | source/OneNote/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
lxml/lxml | doc/rest2html.py | 1 | 1812 | #!/usr/bin/python
"""
A minimal front end to the Docutils Publisher, producing HTML with
Pygments syntax highlighting.
"""
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
# set up Pygments
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES, cssclass='syntax')
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
# run the generation
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| bsd-3-clause |
theheros/kbengine | kbe/src/lib/python/Lib/test/test_itertools.py | 48 | 69382 | import unittest
from test import support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.assertEqual(next(pickle.loads(pickle.dumps(c))), value)
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
def tupleize(*args):
return args
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
self.assertRaises(TypeError, islice, range(10))
self.assertRaises(TypeError, islice, range(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, range(10), -5, 10, 1)
self.assertRaises(ValueError, islice, range(10), 1, -5, -1)
self.assertRaises(ValueError, islice, range(10), 1, 10, -1)
self.assertRaises(ValueError, islice, range(10), 1, 10, 0)
self.assertRaises(ValueError, islice, range(10), 'a')
self.assertRaises(ValueError, islice, range(10), 'a', 1)
self.assertRaises(ValueError, islice, range(10), 1, 'a')
self.assertRaises(ValueError, islice, range(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, range(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
def test_tee(self):
n = 200
def irange(n):
for i in range(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumlate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
jespino/urwintranet | urwintranet/ui/widgets/generic.py | 1 | 10342 | # -*- coding: utf-8 -*-
"""
urwintranet.ui.widgets.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
from . import mixins, utils
def box_solid_fill(char, height):
sf = urwid.SolidFill(char)
return urwid.BoxAdapter(sf, height=height)
def wrap_in_whitespace(widget, cls=urwid.Columns):
whitespace = urwid.SolidFill(" ")
return cls([whitespace, widget, whitespace])
def center(widget):
return wrap_in_whitespace(wrap_in_whitespace(widget), cls=urwid.Pile)
def banner():
bt = urwid.BigText("Intranet", font=urwid.font.HalfBlock7x7Font())
btwp = urwid.Padding(bt, "center", width="clip")
return urwid.AttrWrap(btwp, "green")
def pony():
bt = urwid.Text('''
.,,.
,;;*;;;;,
.-'``;-');;.
/' .-. /*;;
.' \d \;; .;;;,
/ o ` \; ,__. ,;*;;;*;,
\__, _.__,' \_.-') __)--.;;;;;*;;;;,
`""`;;;\ /-')_) __) `\' ';;;;;;
;*;;; -') `)_) |\ | ;;;;*;
;;;;| `---` O | | ;;*;;;
*;*;\| O / ;;;;;*
;;;;;/| .-------\ / ;*;;;;;
;;;*;/ \ | '. (`. ;;;*;;;
;;;;;'. ; | ) \ | ;;;;;;
,;*;;;;\/ |. / /` | ';;;*;
;;;;;;/ |/ / /__/ ';;;
'*jgs/ | / | ;*;
`""""` `""""` ;'
''')
return urwid.AttrWrap(bt, "green")
def button(text, align=None):
return PlainButton(text.upper(), align)
def editor(mask=None):
if mask is None:
return urwid.Edit()
else:
return urwid.Edit(mask=mask)
class Header(mixins.NonSelectableMixin, urwid.WidgetWrap):
def __init__(self, tabs):
text = urwid.Text("Intranet")
self.account_button = PlainButton("My account")
cols = urwid.Columns([
("weight", 0.9, text),
("weight", 0.1, urwid.AttrMap(self.account_button, "account-button")),
])
super().__init__(urwid.AttrMap(cols, "green-bg"))
class Notifier(mixins.NotifierMixin, mixins.NonSelectableMixin, urwid.Text):
pass
class PlainButton(mixins.PlainButtonMixin, urwid.Button):
ALIGN = "center"
def __init__(self, text, align=None):
super().__init__(text)
self._label.set_align_mode(self.ALIGN if align is None else align)
class SubmitButton(PlainButton):
def __init__(self, text, align=None):
super().__init__(text, align)
class CancelButton(PlainButton):
def __init__(self, text, align=None):
super().__init__(text, align)
class FooterNotifier(Notifier):
ALIGN = "left"
ERROR_PREFIX = "[ERROR]: "
ERROR_ATTR = "footer-error"
INFO_PREFIX = "[INFO]: "
INFO_ATTR = "footer-info"
class Footer(mixins.NonSelectableMixin, urwid.WidgetWrap):
def __init__(self, notifier):
assert isinstance(notifier, FooterNotifier)
cols = urwid.Columns([
("weight", 0.9, urwid.AttrMap(notifier, "footer")),
("weight", 0.1, urwid.AttrMap(PlainButton("? Help"), "help-button")),
])
super().__init__(cols)
class Grid(mixins.ViMotionMixin, mixins.EmacsMotionMixin, urwid.GridFlow):
pass
class Tabs(mixins.NonSelectableMixin, urwid.WidgetWrap):
def __init__(self, tabs, focus=0):
self.tab_list = urwid.MonitoredFocusList(tabs)
self.tab_list.focus = focus
self.tab_list.set_focus_changed_callback(self.rebuild_tabs)
cols = [urwid.AttrMap(self.tab(t), "active-tab" if i == self.tab_list.focus else "inactive-tab")
for i, t in enumerate(tabs)]
self.columns = urwid.Columns(cols)
super().__init__(self.columns)
def rebuild_tabs(self, new_focus):
for i, c in enumerate(self.columns.contents):
widget, _ = c
widget.set_attr_map({None: "active-tab" if i == new_focus else "inactive-tab"})
def tab(self, text):
return urwid.LineBox(urwid.Text(text + " "))
class HelpPopup(urwid.WidgetWrap):
# FIXME: Remove solid_fill and use the Fill decorator
def __init__(self, title="Help", content={}):
contents = [box_solid_fill(" ", 1)]
for name, actions in content:
contents += self._section(name, actions)
contents.append(box_solid_fill(" ", 1))
contents.append(self._buttons())
contents.append(box_solid_fill(" ", 1))
self.widget = urwid.Pile(contents)
super().__init__(urwid.AttrMap(urwid.LineBox(urwid.Padding(self.widget, right=2, left=2),
title), "popup"))
def _section(self, name, actions):
items = [urwid.Text(("popup-section-title", name))]
items.append(box_solid_fill(" ", 1))
for keys, description in actions:
colum_items = [(18, urwid.Padding(ListText(keys, align="center"), right=2))]
colum_items.append(urwid.Text(description))
items.append(urwid.Padding(urwid.Columns(colum_items), left=2))
return items
def _buttons(self):
self.close_button = PlainButton("Close")
colum_items = [("weight", 1, urwid.Text(""))]
colum_items.append((15, urwid.AttrMap(urwid.Padding(self.close_button, right=1, left=2),
"popup-cancel-button")))
return urwid.Columns(colum_items)
class ListCell(urwid.WidgetWrap):
def __init__(self, text):
text_widget = urwid.AttrMap(ListText(text), "default")
widget = urwid.AttrMap(urwid.LineBox(text_widget), "green")
super().__init__(widget)
class ButtonCell(urwid.WidgetWrap):
def __init__(self, button):
text_widget = urwid.AttrMap(button, "default", "focus-header")
widget = urwid.AttrMap(urwid.LineBox(text_widget), "green")
super().__init__(widget)
class ListText(mixins.IgnoreKeyPressMixin, urwid.Text):
def __init__(self, text, align="center"):
super().__init__(text, align=align)
class RowDivider(urwid.WidgetWrap):
def __init__(self, attr_map="default", div_char="-"):
widget = urwid.AttrMap(urwid.Divider(div_char), attr_map)
super().__init__(widget)
class SemaphorePercentText(ListText):
"""
Get a number and a max_value and print it with a concrete color:
* red: value <= 20%
* yellos: 20% < vale < max_value
* green: vale == max_vale
If invert value is True red will be green and viceversa
"""
def __init__(self, value, max_value=100.0, invert=False):
color = "yellow"
if value <= max_value * 0.2:
color = "red" if not invert else "green"
elif value == max_value:
color = "green" if not invert else "red"
text = [(color, str(value))]
super().__init__(text)
class MenuItem(urwid.RadioButton):
"""
A RadioButton with a 'click' signal
"""
signals = urwid.RadioButton.signals + ["click", "quit"]
def __init__(self, group, label, value, state='first True', on_state_change=None, user_data=None):
self.value = value
super().__init__(group, label, state='first True', on_state_change=None, user_data=None)
def keypress(self, size, key):
command = urwid.command_map[key]
if command == "activate":
self._emit("click", True)
elif command == "menu":
self._emit("quit")
super(MenuItem, self).keypress(size, key)
return key
class ComboBoxMenu(urwid.WidgetWrap):
"""
A menu shown when parent is activated.
"""
signals = ["close"]
def __init__(self, items, style):
"""
Initialize items list
Item must be a list of dicts with "label" and "value" items.
"""
self.group = []
self.items = []
for i in items:
self.append(i)
self.walker = urwid.Pile(self.items)
super().__init__(urwid.AttrWrap(urwid.Filler(urwid.LineBox(self.walker)), "selectable", style))
def append(self, item):
"""
Append an item to the menu
"""
r = MenuItem(self.group, item["label"], item["value"])
self.items.append(r)
def get_item(self, index):
"""
Get an item by index
"""
return self.items[index].get_label()
def get_selection(self):
"""
Return the index of the selected item
"""
for index, item in enumerate(self.items):
if item.state is True:
return item
return None
class ComboBox(urwid.PopUpLauncher):
"""
A button launcher for the combobox menu
"""
_combobox_mark = u"↓"
signals = ["change"]
def __init__(self, items, default=-1, on_state_change=None, style="default"):
self.menu = ComboBoxMenu(items, style)
self.on_state_change = on_state_change
selected_item = utils.find(lambda x: x.value == default, self.menu.items) or self.menu.items[0]
selected_item.set_state(True)
self._button = DDButton(selected_item.get_label())
super().__init__(self._button)
urwid.connect_signal(self.original_widget, 'click', lambda b: self.open_pop_up())
for i in self.menu.items:
urwid.connect_signal(i, 'click', self.item_changed)
urwid.connect_signal(i, 'quit', self.quit_menu)
def create_pop_up(self):
"""
Create the pop up widget
"""
return self.menu
def get_pop_up_parameters(self):
"""
Configuration dictionary for the pop up
"""
return {'left': 0, 'top': 0, 'overlay_width': 32, 'overlay_height': len(self.menu.items) + 2}
def item_changed(self, item, state):
if state:
selection = item.get_label()
self._button.set_label(selection)
if self.on_state_change:
self.on_state_change(self, item, state)
self.close_pop_up()
self._emit("change", item, state)
def quit_menu(self, widget):
self.close_pop_up()
def get_selection(self):
return self.menu.get_selection()
class DDButton(mixins.PlainButtonMixin, urwid.Button):
def set_label(self, s):
s = s + " " + ComboBox._combobox_mark
super(DDButton, self).set_label(s)
| apache-2.0 |
bdoner/SickRage | lib/guessit/transfo/guess_filetype.py | 29 | 11928 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import mimetypes
import os.path
import re
from guessit.guess import Guess
from guessit.patterns.extension import subtitle_exts, info_exts, video_exts
from guessit.transfo import TransformerException
from guessit.plugins.transformers import Transformer, get_transformer
from guessit.matcher import log_found_guess, found_guess
class GuessFiletype(Transformer):
def __init__(self):
Transformer.__init__(self, 200)
# List of well known movies and series, hardcoded because they cannot be
# guessed appropriately otherwise
MOVIES = ['OSS 117']
SERIES = ['Band of Brothers']
MOVIES = [m.lower() for m in MOVIES]
SERIES = [s.lower() for s in SERIES]
def guess_filetype(self, mtree, options=None):
options = options or {}
# put the filetype inside a dummy container to be able to have the
# following functions work correctly as closures
# this is a workaround for python 2 which doesn't have the
# 'nonlocal' keyword which we could use here in the upgrade_* functions
# (python 3 does have it)
filetype_container = [mtree.guess.get('type')]
other = {}
filename = mtree.string
def upgrade_episode():
if filetype_container[0] == 'subtitle':
filetype_container[0] = 'episodesubtitle'
elif filetype_container[0] == 'info':
filetype_container[0] = 'episodeinfo'
elif (not filetype_container[0] or
filetype_container[0] == 'video'):
filetype_container[0] = 'episode'
def upgrade_movie():
if filetype_container[0] == 'subtitle':
filetype_container[0] = 'moviesubtitle'
elif filetype_container[0] == 'info':
filetype_container[0] = 'movieinfo'
elif (not filetype_container[0] or
filetype_container[0] == 'video'):
filetype_container[0] = 'movie'
def upgrade_subtitle():
if filetype_container[0] == 'movie':
filetype_container[0] = 'moviesubtitle'
elif filetype_container[0] == 'episode':
filetype_container[0] = 'episodesubtitle'
elif not filetype_container[0]:
filetype_container[0] = 'subtitle'
def upgrade_info():
if filetype_container[0] == 'movie':
filetype_container[0] = 'movieinfo'
elif filetype_container[0] == 'episode':
filetype_container[0] = 'episodeinfo'
elif not filetype_container[0]:
filetype_container[0] = 'info'
# look at the extension first
fileext = os.path.splitext(filename)[1][1:].lower()
if fileext in subtitle_exts:
upgrade_subtitle()
other = {'container': fileext}
elif fileext in info_exts:
upgrade_info()
other = {'container': fileext}
elif fileext in video_exts:
other = {'container': fileext}
else:
if fileext and not options.get('name_only'):
other = {'extension': fileext}
list(mtree.unidentified_leaves())[-1].guess = Guess(other)
# check whether we are in a 'Movies', 'Tv Shows', ... folder
folder_rexps = [(r'Movies?', upgrade_movie),
(r'Films?', upgrade_movie),
(r'Tv[ _-]?Shows?', upgrade_episode),
(r'Series?', upgrade_episode),
(r'Episodes?', upgrade_episode)]
for frexp, upgrade_func in folder_rexps:
frexp = re.compile(frexp, re.IGNORECASE)
for pathgroup in mtree.children:
if frexp.match(pathgroup.value):
upgrade_func()
return filetype_container[0], other
# check for a few specific cases which will unintentionally make the
# following heuristics confused (eg: OSS 117 will look like an episode,
# season 1, epnum 17, when it is in fact a movie)
fname = mtree.clean_string(filename).lower()
for m in self.MOVIES:
if m in fname:
self.log.debug('Found in exception list of movies -> type = movie')
upgrade_movie()
return filetype_container[0], other
for s in self.SERIES:
if s in fname:
self.log.debug('Found in exception list of series -> type = episode')
upgrade_episode()
return filetype_container[0], other
# if we have an episode_rexp (eg: s02e13), it is an episode
episode_transformer = get_transformer('guess_episodes_rexps')
if episode_transformer:
filename_parts = list(x.value for x in mtree.unidentified_leaves())
filename_parts.append(filename)
for filename_part in filename_parts:
guess = episode_transformer.guess_episodes_rexps(filename_part)
if guess:
self.log.debug('Found guess_episodes_rexps: %s -> type = episode', guess)
upgrade_episode()
return filetype_container[0], other
properties_transformer = get_transformer('guess_properties')
if properties_transformer:
# if we have certain properties characteristic of episodes, it is an ep
found = properties_transformer.container.find_properties(filename, mtree, options, 'episodeFormat')
guess = properties_transformer.container.as_guess(found, filename)
if guess:
self.log.debug('Found characteristic property of episodes: %s"', guess)
upgrade_episode()
return filetype_container[0], other
weak_episode_transformer = get_transformer('guess_weak_episodes_rexps')
if weak_episode_transformer:
found = weak_episode_transformer.container.find_properties(filename, mtree, options, 'episodeNumber')
guess = weak_episode_transformer.container.as_guess(found, filename)
if guess and (guess.raw('episodeNumber')[0] == '0' or guess['episodeNumber'] >= 10):
self.log.debug('Found characteristic property of episodes: %s"', guess)
upgrade_episode()
return filetype_container[0], other
found = properties_transformer.container.find_properties(filename, mtree, options, 'crc32')
guess = properties_transformer.container.as_guess(found, filename)
if guess:
found = weak_episode_transformer.container.find_properties(filename, mtree, options)
guess = weak_episode_transformer.container.as_guess(found, filename)
if guess:
self.log.debug('Found characteristic property of episodes: %s"', guess)
upgrade_episode()
return filetype_container[0], other
found = properties_transformer.container.find_properties(filename, mtree, options, 'format')
guess = properties_transformer.container.as_guess(found, filename)
if guess and guess['format'] in ('HDTV', 'WEBRip', 'WEB-DL', 'DVB'):
# Use weak episodes only if TV or WEB source
weak_episode_transformer = get_transformer('guess_weak_episodes_rexps')
if weak_episode_transformer:
guess = weak_episode_transformer.guess_weak_episodes_rexps(filename)
if guess:
self.log.debug('Found guess_weak_episodes_rexps: %s -> type = episode', guess)
upgrade_episode()
return filetype_container[0], other
website_transformer = get_transformer('guess_website')
if website_transformer:
found = website_transformer.container.find_properties(filename, mtree, options, 'website')
guess = website_transformer.container.as_guess(found, filename)
if guess:
for namepart in ('tv', 'serie', 'episode'):
if namepart in guess['website']:
# origin-specific type
self.log.debug('Found characteristic property of episodes: %s', guess)
upgrade_episode()
return filetype_container[0], other
if filetype_container[0] in ('subtitle', 'info') or (not filetype_container[0] and fileext in video_exts):
# if no episode info found, assume it's a movie
self.log.debug('Nothing characteristic found, assuming type = movie')
upgrade_movie()
if not filetype_container[0]:
self.log.debug('Nothing characteristic found, assuming type = unknown')
filetype_container[0] = 'unknown'
return filetype_container[0], other
def process(self, mtree, options=None):
"""guess the file type now (will be useful later)
"""
filetype, other = self.guess_filetype(mtree, options)
mtree.guess.set('type', filetype, confidence=1.0)
log_found_guess(mtree.guess)
filetype_info = Guess(other, confidence=1.0)
# guess the mimetype of the filename
# TODO: handle other mimetypes not found on the default type_maps
# mimetypes.types_map['.srt']='text/subtitle'
mime, _ = mimetypes.guess_type(mtree.string, strict=False)
if mime is not None:
filetype_info.update({'mimetype': mime}, confidence=1.0)
# Retrieve the last node of category path (extension node)
node_ext = list(filter(lambda x: x.category == 'path', mtree.nodes()))[-1]
found_guess(node_ext, filetype_info)
if mtree.guess.get('type') in [None, 'unknown']:
if options.get('name_only'):
mtree.guess.set('type', 'movie', confidence=0.6)
else:
raise TransformerException(__name__, 'Unknown file type')
def second_pass_options(self, mtree, options=None):
if 'type' not in options or not options['type']:
if mtree.info.get('type') != 'episode':
# now look whether there are some specific hints for episode vs movie
# If we have a date and no year, this is a TV Show.
if 'date' in mtree.info and 'year' not in mtree.info:
return {'type': 'episode'}
if mtree.info.get('type') != 'movie':
# If we have a year, no season but raw episodeNumber is a number not starting with '0', this is a movie.
if 'year' in mtree.info and 'episodeNumber' in mtree.info and not 'season' in mtree.info:
try:
int(mtree.raw['episodeNumber'])
return {'type': 'movie'}
except ValueError:
pass
| gpl-3.0 |
Changaco/oh-mainline | vendor/packages/scrapy/scrapy/core/downloader/__init__.py | 19 | 6133 | import random
import warnings
from time import time
from collections import deque
from functools import partial
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.signal import send_catch_log
from scrapy.utils.httpobj import urlparse_cached
from scrapy.resolver import dnscache
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy import signals
from scrapy import log
from .middleware import DownloaderMiddlewareManager
from .handlers import DownloadHandlers
class Slot(object):
"""Downloader slot"""
def __init__(self, concurrency, delay, settings):
self.concurrency = concurrency
self.delay = delay
self.randomize_delay = settings.getbool('RANDOMIZE_DOWNLOAD_DELAY')
self.active = set()
self.queue = deque()
self.transferring = set()
self.lastseen = 0
def free_transfer_slots(self):
return self.concurrency - len(self.transferring)
def download_delay(self):
if self.randomize_delay:
return random.uniform(0.5*self.delay, 1.5*self.delay)
return self.delay
def _get_concurrency_delay(concurrency, spider, settings):
delay = settings.getfloat('DOWNLOAD_DELAY')
if hasattr(spider, 'DOWNLOAD_DELAY'):
warnings.warn("%s.DOWNLOAD_DELAY attribute is deprecated, use %s.download_delay instead" %
(type(spider).__name__, type(spider).__name__))
delay = spider.DOWNLOAD_DELAY
if hasattr(spider, 'download_delay'):
delay = spider.download_delay
# TODO: remove for Scrapy 0.15
c = settings.getint('CONCURRENT_REQUESTS_PER_SPIDER')
if c:
warnings.warn("CONCURRENT_REQUESTS_PER_SPIDER setting is deprecated, " \
"use CONCURRENT_REQUESTS_PER_DOMAIN instead", ScrapyDeprecationWarning)
concurrency = c
# ----------------------------
if hasattr(spider, 'max_concurrent_requests'):
concurrency = spider.max_concurrent_requests
if delay > 0:
concurrency = 1 # force concurrency=1 if download delay required
return concurrency, delay
class Downloader(object):
def __init__(self, crawler):
self.settings = crawler.settings
self.slots = {}
self.active = set()
self.handlers = DownloadHandlers()
self.total_concurrency = self.settings.getint('CONCURRENT_REQUESTS')
self.domain_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self.ip_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_IP')
self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)
self.inactive_slots = {}
def fetch(self, request, spider):
key, slot = self._get_slot(request, spider)
self.active.add(request)
slot.active.add(request)
def _deactivate(response):
self.active.remove(request)
slot.active.remove(request)
if not slot.active: # remove empty slots
self.inactive_slots[key] = self.slots.pop(key)
return response
dlfunc = partial(self._enqueue_request, slot=slot)
dfd = self.middleware.download(dlfunc, request, spider)
return dfd.addBoth(_deactivate)
def needs_backout(self):
return len(self.active) >= self.total_concurrency
def _get_slot(self, request, spider):
key = urlparse_cached(request).hostname or ''
if self.ip_concurrency:
key = dnscache.get(key, key)
if key not in self.slots:
if key in self.inactive_slots:
self.slots[key] = self.inactive_slots.pop(key)
else:
if self.ip_concurrency:
concurrency = self.ip_concurrency
else:
concurrency = self.domain_concurrency
concurrency, delay = _get_concurrency_delay(concurrency, spider, self.settings)
self.slots[key] = Slot(concurrency, delay, self.settings)
return key, self.slots[key]
def _enqueue_request(self, request, spider, slot):
def _downloaded(response):
send_catch_log(signal=signals.response_downloaded, \
response=response, request=request, spider=spider)
return response
deferred = defer.Deferred().addCallback(_downloaded)
slot.queue.append((request, deferred))
self._process_queue(spider, slot)
return deferred
def _process_queue(self, spider, slot):
# Delay queue processing if a download_delay is configured
now = time()
delay = slot.download_delay()
if delay:
penalty = delay - now + slot.lastseen
if penalty > 0 and slot.free_transfer_slots():
d = defer.Deferred()
d.addCallback(self._process_queue, slot)
reactor.callLater(penalty, d.callback, spider)
return
slot.lastseen = now
# Process enqueued requests if there are free slots to transfer for this slot
while slot.queue and slot.free_transfer_slots() > 0:
request, deferred = slot.queue.popleft()
dfd = self._download(slot, request, spider)
dfd.chainDeferred(deferred)
def _download(self, slot, request, spider):
# The order is very important for the following deferreds. Do not change!
# 1. Create the download deferred
dfd = mustbe_deferred(self.handlers.download_request, request, spider)
# 2. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
slot.transferring.add(request)
def finish_transferring(_):
slot.transferring.remove(request)
self._process_queue(spider, slot)
return _
return dfd.addBoth(finish_transferring)
def is_idle(self):
return not self.slots
| agpl-3.0 |
ChristineLaMuse/mozillians | vendor-local/lib/python/kombu/entity.py | 10 | 19337 | """
kombu.entity
================
Exchange and Queue declarations.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {"transient": TRANSIENT_DELIVERY_MODE,
"persistent": PERSISTENT_DELIVERY_MODE}
__all__ = ["Exchange", "Queue"]
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ""
type = "direct"
durable = True
auto_delete = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (("name", None),
("type", None),
("arguments", None),
("durable", bool),
("auto_delete", bool),
("delivery_mode", lambda m: DELIVERY_MODES.get(m) or m))
def __init__(self, name="", type="", channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash("E|%s" % (self.name, ))
def declare(self, nowait=False):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_declare(exchange=self.name,
type=self.type,
durable=self.durable,
auto_delete=self.auto_delete,
arguments=self.arguments,
nowait=nowait)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None, properties=None,
headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties["delivery_mode"] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__("Exchange %s(%s)" % (self.name,
self.type))
@property
def can_cache_declaration(self):
return self.durable
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
"""
name = ""
exchange = Exchange("")
routing_key = ""
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (("name", None),
("exchange", None),
("routing_key", None),
("queue_arguments", None),
("binding_arguments", None),
("durable", bool),
("exclusive", bool),
("auto_delete", bool),
("no_ack", None),
("alias", None))
def __init__(self, name="", exchange=None, routing_key="", channel=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def __hash__(self):
return hash("Q|%s" % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
# self.name should be set by queue_declare in the case that
# we're working with anonymous queues
if self.name:
self.queue_bind(nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.channel.queue_bind(queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If set messages received does not have to
be acknowledged.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, "message_to_python", None)
if m2p:
message = m2p(message)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None, no_ack=None,
nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If set messages received does not have to
be acknowledged.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def unbind(self):
"""Delete the binding on the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
return super(Queue, self).__repr__(
"Queue %s -> %s -> %s" % (self.name,
self.exchange,
self.routing_key))
@property
def can_cache_declaration(self):
return self.durable
| bsd-3-clause |
trondeau/gnuradio | gr-analog/python/analog/nbfm_rx.py | 23 | 3478 | #
# Copyright 2005,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr
from gnuradio import filter
from fm_emph import fm_deemph
try:
from gnuradio import analog
except ImportError:
import analog_swig as analog
class nbfm_rx(gr.hier_block2):
def __init__(self, audio_rate, quad_rate, tau=75e-6, max_dev=5e3):
"""
Narrow Band FM Receiver.
Takes a single complex baseband input stream and produces a single
float output stream of audio sample in the range [-1, +1].
Args:
audio_rate: sample rate of audio stream, >= 16k (integer)
quad_rate: sample rate of output stream (integer)
tau: preemphasis time constant (default 75e-6) (float)
max_dev: maximum deviation in Hz (default 5e3) (float)
quad_rate must be an integer multiple of audio_rate.
Exported sub-blocks (attributes):
squelch
quad_demod
deemph
audio_filter
"""
gr.hier_block2.__init__(self, "nbfm_rx",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# FIXME audio_rate and quad_rate ought to be exact rationals
self._audio_rate = audio_rate = int(audio_rate)
self._quad_rate = quad_rate = int(quad_rate)
if quad_rate % audio_rate != 0:
raise ValueError, "quad_rate is not an integer multiple of audio_rate"
squelch_threshold = 20 # dB
#self.squelch = analog.simple_squelch_cc(squelch_threshold, 0.001)
# FM Demodulator input: complex; output: float
k = quad_rate/(2*math.pi*max_dev)
self.quad_demod = analog.quadrature_demod_cf(k)
# FM Deemphasis IIR filter
self.deemph = fm_deemph(quad_rate, tau=tau)
# compute FIR taps for audio filter
audio_decim = quad_rate // audio_rate
audio_taps = filter.firdes.low_pass(1.0, # gain
quad_rate, # sampling rate
2.7e3, # Audio LPF cutoff
0.5e3, # Transition band
filter.firdes.WIN_HAMMING) # filter type
print "len(audio_taps) =", len(audio_taps)
# Decimating audio filter
# input: float; output: float; taps: float
self.audio_filter = filter.fir_filter_fff(audio_decim, audio_taps)
self.connect(self, self.quad_demod, self.deemph, self.audio_filter, self)
def set_max_deviation(self, max_dev):
k = self._quad_rate/(2*math.pi*max_dev)
self.quad_demod.set_gain(k)
| gpl-3.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/unidecode/x004.py | 249 | 4071 | data = (
'Ie', # 0x00
'Io', # 0x01
'Dj', # 0x02
'Gj', # 0x03
'Ie', # 0x04
'Dz', # 0x05
'I', # 0x06
'Yi', # 0x07
'J', # 0x08
'Lj', # 0x09
'Nj', # 0x0a
'Tsh', # 0x0b
'Kj', # 0x0c
'I', # 0x0d
'U', # 0x0e
'Dzh', # 0x0f
'A', # 0x10
'B', # 0x11
'V', # 0x12
'G', # 0x13
'D', # 0x14
'E', # 0x15
'Zh', # 0x16
'Z', # 0x17
'I', # 0x18
'I', # 0x19
'K', # 0x1a
'L', # 0x1b
'M', # 0x1c
'N', # 0x1d
'O', # 0x1e
'P', # 0x1f
'R', # 0x20
'S', # 0x21
'T', # 0x22
'U', # 0x23
'F', # 0x24
'Kh', # 0x25
'Ts', # 0x26
'Ch', # 0x27
'Sh', # 0x28
'Shch', # 0x29
'\'', # 0x2a
'Y', # 0x2b
'\'', # 0x2c
'E', # 0x2d
'Iu', # 0x2e
'Ia', # 0x2f
'a', # 0x30
'b', # 0x31
'v', # 0x32
'g', # 0x33
'd', # 0x34
'e', # 0x35
'zh', # 0x36
'z', # 0x37
'i', # 0x38
'i', # 0x39
'k', # 0x3a
'l', # 0x3b
'm', # 0x3c
'n', # 0x3d
'o', # 0x3e
'p', # 0x3f
'r', # 0x40
's', # 0x41
't', # 0x42
'u', # 0x43
'f', # 0x44
'kh', # 0x45
'ts', # 0x46
'ch', # 0x47
'sh', # 0x48
'shch', # 0x49
'\'', # 0x4a
'y', # 0x4b
'\'', # 0x4c
'e', # 0x4d
'iu', # 0x4e
'ia', # 0x4f
'ie', # 0x50
'io', # 0x51
'dj', # 0x52
'gj', # 0x53
'ie', # 0x54
'dz', # 0x55
'i', # 0x56
'yi', # 0x57
'j', # 0x58
'lj', # 0x59
'nj', # 0x5a
'tsh', # 0x5b
'kj', # 0x5c
'i', # 0x5d
'u', # 0x5e
'dzh', # 0x5f
'O', # 0x60
'o', # 0x61
'E', # 0x62
'e', # 0x63
'Ie', # 0x64
'ie', # 0x65
'E', # 0x66
'e', # 0x67
'Ie', # 0x68
'ie', # 0x69
'O', # 0x6a
'o', # 0x6b
'Io', # 0x6c
'io', # 0x6d
'Ks', # 0x6e
'ks', # 0x6f
'Ps', # 0x70
'ps', # 0x71
'F', # 0x72
'f', # 0x73
'Y', # 0x74
'y', # 0x75
'Y', # 0x76
'y', # 0x77
'u', # 0x78
'u', # 0x79
'O', # 0x7a
'o', # 0x7b
'O', # 0x7c
'o', # 0x7d
'Ot', # 0x7e
'ot', # 0x7f
'Q', # 0x80
'q', # 0x81
'*1000*', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'[?]', # 0x87
'*100.000*', # 0x88
'*1.000.000*', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'"', # 0x8c
'"', # 0x8d
'R\'', # 0x8e
'r\'', # 0x8f
'G\'', # 0x90
'g\'', # 0x91
'G\'', # 0x92
'g\'', # 0x93
'G\'', # 0x94
'g\'', # 0x95
'Zh\'', # 0x96
'zh\'', # 0x97
'Z\'', # 0x98
'z\'', # 0x99
'K\'', # 0x9a
'k\'', # 0x9b
'K\'', # 0x9c
'k\'', # 0x9d
'K\'', # 0x9e
'k\'', # 0x9f
'K\'', # 0xa0
'k\'', # 0xa1
'N\'', # 0xa2
'n\'', # 0xa3
'Ng', # 0xa4
'ng', # 0xa5
'P\'', # 0xa6
'p\'', # 0xa7
'Kh', # 0xa8
'kh', # 0xa9
'S\'', # 0xaa
's\'', # 0xab
'T\'', # 0xac
't\'', # 0xad
'U', # 0xae
'u', # 0xaf
'U\'', # 0xb0
'u\'', # 0xb1
'Kh\'', # 0xb2
'kh\'', # 0xb3
'Tts', # 0xb4
'tts', # 0xb5
'Ch\'', # 0xb6
'ch\'', # 0xb7
'Ch\'', # 0xb8
'ch\'', # 0xb9
'H', # 0xba
'h', # 0xbb
'Ch', # 0xbc
'ch', # 0xbd
'Ch\'', # 0xbe
'ch\'', # 0xbf
'`', # 0xc0
'Zh', # 0xc1
'zh', # 0xc2
'K\'', # 0xc3
'k\'', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'N\'', # 0xc7
'n\'', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'Ch', # 0xcb
'ch', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'a', # 0xd0
'a', # 0xd1
'A', # 0xd2
'a', # 0xd3
'Ae', # 0xd4
'ae', # 0xd5
'Ie', # 0xd6
'ie', # 0xd7
'@', # 0xd8
'@', # 0xd9
'@', # 0xda
'@', # 0xdb
'Zh', # 0xdc
'zh', # 0xdd
'Z', # 0xde
'z', # 0xdf
'Dz', # 0xe0
'dz', # 0xe1
'I', # 0xe2
'i', # 0xe3
'I', # 0xe4
'i', # 0xe5
'O', # 0xe6
'o', # 0xe7
'O', # 0xe8
'o', # 0xe9
'O', # 0xea
'o', # 0xeb
'E', # 0xec
'e', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'U', # 0xf2
'u', # 0xf3
'Ch', # 0xf4
'ch', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
factorlibre/OCB | openerp/report/misc.py | 458 | 1425 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pychart import *
colorline = [color.T(r=((r+3) % 11)/10.0,
g=((g+6) % 11)/10.0,
b=((b+9) % 11)/10.0)
for r in range(11) for g in range(11) for b in range(11)]
def choice_colors(n):
if n:
return colorline[0:-1:len(colorline)/n]
return []
if __name__=='__main__':
print choice_colors(10)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kazitanvirahsan/scrapy | tests/test_http_headers.py | 161 | 6348 | import unittest
import copy
from scrapy.http import Headers
class HeadersTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_basics(self):
h = Headers({'Content-Type': 'text/html', 'Content-Length': 1234})
assert h['Content-Type']
assert h['Content-Length']
self.assertRaises(KeyError, h.__getitem__, 'Accept')
self.assertEqual(h.get('Accept'), None)
self.assertEqual(h.getlist('Accept'), [])
self.assertEqual(h.get('Accept', '*/*'), b'*/*')
self.assertEqual(h.getlist('Accept', '*/*'), [b'*/*'])
self.assertEqual(h.getlist('Accept', ['text/html', 'images/jpeg']),
[b'text/html', b'images/jpeg'])
def test_single_value(self):
h = Headers()
h['Content-Type'] = 'text/html'
self.assertEqual(h['Content-Type'], b'text/html')
self.assertEqual(h.get('Content-Type'), b'text/html')
self.assertEqual(h.getlist('Content-Type'), [b'text/html'])
def test_multivalue(self):
h = Headers()
h['X-Forwarded-For'] = hlist = ['ip1', 'ip2']
self.assertEqual(h['X-Forwarded-For'], b'ip2')
self.assertEqual(h.get('X-Forwarded-For'), b'ip2')
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1', b'ip2'])
assert h.getlist('X-Forwarded-For') is not hlist
def test_encode_utf8(self):
h = Headers({u'key': u'\xa3'}, encoding='utf-8')
key, val = dict(h).popitem()
assert isinstance(key, bytes), key
assert isinstance(val[0], bytes), val[0]
self.assertEqual(val[0], b'\xc2\xa3')
def test_encode_latin1(self):
h = Headers({u'key': u'\xa3'}, encoding='latin1')
key, val = dict(h).popitem()
self.assertEqual(val[0], b'\xa3')
def test_encode_multiple(self):
h = Headers({u'key': [u'\xa3']}, encoding='utf-8')
key, val = dict(h).popitem()
self.assertEqual(val[0], b'\xc2\xa3')
def test_delete_and_contains(self):
h = Headers()
h['Content-Type'] = 'text/html'
assert 'Content-Type' in h
del h['Content-Type']
assert 'Content-Type' not in h
def test_setdefault(self):
h = Headers()
hlist = ['ip1', 'ip2']
olist = h.setdefault('X-Forwarded-For', hlist)
assert h.getlist('X-Forwarded-For') is not hlist
assert h.getlist('X-Forwarded-For') is olist
h = Headers()
olist = h.setdefault('X-Forwarded-For', 'ip1')
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1'])
assert h.getlist('X-Forwarded-For') is olist
def test_iterables(self):
idict = {'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']}
h = Headers(idict)
self.assertDictEqual(dict(h),
{b'Content-Type': [b'text/html'],
b'X-Forwarded-For': [b'ip1', b'ip2']})
self.assertSortedEqual(h.keys(),
[b'X-Forwarded-For', b'Content-Type'])
self.assertSortedEqual(h.items(),
[(b'X-Forwarded-For', [b'ip1', b'ip2']),
(b'Content-Type', [b'text/html'])])
self.assertSortedEqual(h.iteritems(),
[(b'X-Forwarded-For', [b'ip1', b'ip2']),
(b'Content-Type', [b'text/html'])])
self.assertSortedEqual(h.values(), [b'ip2', b'text/html'])
def test_update(self):
h = Headers()
h.update({'Content-Type': 'text/html',
'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.getlist('Content-Type'), [b'text/html'])
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1', b'ip2'])
def test_copy(self):
h1 = Headers({'header1': ['value1', 'value2']})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.getlist('header1'), h2.getlist('header1'))
assert h1.getlist('header1') is not h2.getlist('header1')
assert isinstance(h2, Headers)
def test_appendlist(self):
h1 = Headers({'header1': 'value1'})
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), [b'value1', b'value3'])
h1 = Headers()
h1.appendlist('header1', 'value1')
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), [b'value1', b'value3'])
def test_setlist(self):
h1 = Headers({'header1': 'value1'})
self.assertEqual(h1.getlist('header1'), [b'value1'])
h1.setlist('header1', [b'value2', b'value3'])
self.assertEqual(h1.getlist('header1'), [b'value2', b'value3'])
def test_setlistdefault(self):
h1 = Headers({'header1': 'value1'})
h1.setlistdefault('header1', ['value2', 'value3'])
h1.setlistdefault('header2', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), [b'value1'])
self.assertEqual(h1.getlist('header2'), [b'value2', b'value3'])
def test_none_value(self):
h1 = Headers()
h1['foo'] = 'bar'
h1['foo'] = None
h1.setdefault('foo', 'bar')
self.assertEqual(h1.get('foo'), None)
self.assertEqual(h1.getlist('foo'), [])
def test_int_value(self):
h1 = Headers({'hey': 5})
h1['foo'] = 1
h1.setdefault('bar', 2)
h1.setlist('buz', [1, 'dos', 3])
self.assertEqual(h1.getlist('foo'), [b'1'])
self.assertEqual(h1.getlist('bar'), [b'2'])
self.assertEqual(h1.getlist('buz'), [b'1', b'dos', b'3'])
self.assertEqual(h1.getlist('hey'), [b'5'])
def test_invalid_value(self):
self.assertRaisesRegexp(TypeError, 'Unsupported value type',
Headers, {'foo': object()})
self.assertRaisesRegexp(TypeError, 'Unsupported value type',
Headers().__setitem__, 'foo', object())
self.assertRaisesRegexp(TypeError, 'Unsupported value type',
Headers().setdefault, 'foo', object())
self.assertRaisesRegexp(TypeError, 'Unsupported value type',
Headers().setlist, 'foo', [object()])
| bsd-3-clause |
bssrdf/deeppy | deeppy/dataset/cifar10.py | 17 | 2196 | import os
import pickle
import numpy as np
from ..base import float_, int_
from .dataset import Dataset
_URLS = [
'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
]
_SHA1S = [
'874905e36347c8536514d0a26261acf3bff89bc7',
]
class CIFAR10(Dataset):
'''
The CIFAR-10 dataset [1]
http://www.cs.toronto.edu/~kriz/cifar.html
References:
[1]: Learning Multiple Layers of Features from Tiny Images, Alex
Krizhevsky, 2009.
'''
def __init__(self, data_root='datasets'):
self.name = 'cifar10'
self.n_classes = 10
self.n_test = 10000
self.n_train = 50000
self.img_shape = (3, 32, 32)
self.data_dir = os.path.join(data_root, self.name)
self._install()
self._data = self._load()
def data(self, flat=False, dp_dtypes=False):
x_train, y_train, x_test, y_test = self._data
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
if flat:
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
return x_train, y_train, x_test, y_test
def _install(self):
self._download(_URLS, _SHA1S)
self._unpack()
def _load(self):
dirpath = os.path.join(self.data_dir, 'cifar-10-batches-py')
filenames = ['data_batch_1', 'data_batch_2', 'data_batch_3',
'data_batch_4', 'data_batch_5', 'test_batch']
x = []
y = []
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as f:
dic = pickle.load(f)
x.append(dic['data'])
y.append(dic['labels'])
x_train = np.vstack(x[:5])
y_train = np.hstack(y[:5])
x_test = np.array(x[5])
y_test = np.array(y[5])
x_train = np.reshape(x_train, (self.n_train,) + self.img_shape)
x_test = np.reshape(x_test, (self.n_test,) + self.img_shape)
return x_train, y_train, x_test, y_test
| mit |
liuyisiyisi/django-cms | cms/migrations/0004_auto_20140924_1038.py | 65 | 1420 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20140926_2347'),
]
operations = [
migrations.AddField(
model_name='cmsplugin',
name='depth',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='cmsplugin',
name='numchild',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='cmsplugin',
name='path',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='page',
name='depth',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='page',
name='numchild',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='page',
name='path',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
| bsd-3-clause |
andyzsf/edx_data_research | reporting_scripts/navigation_tabs_data_date_completers.py | 1 | 2700 | '''
This module keeps track of the number of times each Navigation tab was clicked/views,
by students who completed the course, for each day during the course
Usage:
python navigation_tabs_data_date.py
'''
import csv
from datetime import datetime
from collections import defaultdict
import sys
from base_edx import EdXConnection
from generate_csv_report import CSV
connection = EdXConnection('tracking_atoc185x')
collection = connection.get_access_to_collection()
# Get all users who completed the course. If you do not have a CSV with list
# of users who had completed the course, you will have to extra it from the
# MongoDB database
with open('csv_files/McGillX_CHEM181x_1T2014_grade_report_2014-04-24-1030.csv', 'r') as csv_file:
reader = csv.reader(csv_file)
reader.next()
usernames = [row[2] for row in reader]
NAVIGATION_TABS = {'/courses/McGillX/ATOC185x/2T2014/info' : 'info', '/courses/McGillX/ATOC185x/2T2014/progress' : 'progress', '/courses/McGillX/ATOC185x/2T2014/109d5374b52040e2a8b737cf90c5618a/' : 'syllabus', '/courses/McGillX/ATOC185x/2T2014/441b2c519f5c464883e2ddceb26c5559/' : 'maps','/courses/McGillX/ATOC185x/2T2014/84f630e833eb4dbabe0a6c45c52bb443/' : 'scoreboard' , '/courses/McGillX/ATOC185x/2T2014/e75195cb39fa4e3890a613a1b3c04c7d/' : 'faq', 'courseware' : 'courseware', 'discussion': 'discussion', '/courses/McGillX/ATOC185x/2T2014/instructor' : 'instructor'}
cursor = collection['tracking_atoc185x'].find({'username' : {'$in' : usernames},'event_type' : { '$regex' : '^/courses/McGillX/ATOC185x/2T2014/(info$|progress$|instructor$|109d5374b52040e2a8b737cf90c5618a/$|441b2c519f5c464883e2ddceb26c5559/$|84f630e833eb4dbabe0a6c45c52bb443/$|e75195cb39fa4e3890a613a1b3c04c7d/$|courseware|discussion)'}})
tab_events_per_date = defaultdict(int)
for doc in cursor:
date = datetime.strptime(doc['time'].split('T')[0], "%Y-%m-%d").date()
if 'courseware' in doc['event_type']:
tab_events_per_date[(date,'courseware')] += 1
elif 'discussion' in doc['event_type']:
tab_events_per_date[(date, 'discussion')] += 1
else:
tab_events_per_date[(date, doc['event_type'])] += 1
result = []
for date, tab in tab_events_per_date:
result.append([date,tab, tab_events_per_date[(date,tab)]])
output = CSV(result, ['Date','Tab ID','Number of Events'], output_file='number_of_tab_events_per_date_completers.csv')
output.generate_csv()
#with open('csv_files/number_of_tab_events_per_date_completers.csv', 'w') as csv_file:
# writer = csv.writer(csv_file)
# writer.writerow(['Date','Tab ID','Number of Events'])
# for date,tab in tab_events_per_date:
# writer.writerow([date,tab, tab_events_per_date[(date,tab)] ])
| mit |
Kilhog/odoo | addons/gamification/models/goal.py | 219 | 26522 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
import logging
import time
from datetime import date, datetime, timedelta
_logger = logging.getLogger(__name__)
class gamification_goal_definition(osv.Model):
"""Goal definition
A goal definition contains the way to evaluate an objective
Each module wanting to be able to set goals to the users needs to create
a new gamification_goal_definition
"""
_name = 'gamification.goal.definition'
_description = 'Gamification goal definition'
def _get_suffix(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for goal in self.browse(cr, uid, ids, context=context):
if goal.suffix and not goal.monetary:
res[goal.id] = goal.suffix
elif goal.monetary:
# use the current user's company currency
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if goal.suffix:
res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix)
else:
res[goal.id] = user.company_id.currency_id.symbol
else:
res[goal.id] = ""
return res
_columns = {
'name': fields.char('Goal Definition', required=True, translate=True),
'description': fields.text('Goal Description'),
'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."),
'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True),
'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"),
'computation_mode': fields.selection([
('manually', 'Recorded manually'),
('count', 'Automatic: number of records'),
('sum', 'Automatic: sum on a field'),
('python', 'Automatic: execute a specific Python code'),
],
string="Computation Mode",
help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.",
required=True),
'display_mode': fields.selection([
('progress', 'Progressive (using numerical values)'),
('boolean', 'Exclusive (done or not-done)'),
],
string="Displayed as", required=True),
'model_id': fields.many2one('ir.model',
string='Model',
help='The model object for the field to evaluate'),
'model_inherited_model_ids': fields.related('model_id', 'inherited_model_ids', type="many2many", obj="ir.model",
string="Inherited models", readonly="True"),
'field_id': fields.many2one('ir.model.fields',
string='Field to Sum',
help='The field containing the value to evaluate'),
'field_date_id': fields.many2one('ir.model.fields',
string='Date Field',
help='The date to use for the time period evaluated'),
'domain': fields.char("Filter Domain",
help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.",
required=True),
'batch_mode': fields.boolean('Batch Mode',
help="Evaluate the expression in batch instead of once for each user"),
'batch_distinctive_field': fields.many2one('ir.model.fields',
string="Distinctive field for batch user",
help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."),
'batch_user_expression': fields.char("Evaluted expression for batch mode",
help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."),
'compute_code': fields.text('Python Code',
help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."),
'condition': fields.selection([
('higher', 'The higher the better'),
('lower', 'The lower the better')
],
string='Goal Performance',
help='A goal is considered as completed when the current value is compared to the value to reach',
required=True),
'action_id': fields.many2one('ir.actions.act_window', string="Action",
help="The action that will be called to update the goal value."),
'res_id_field': fields.char("ID Field of user",
help="The field name on the user profile (res.users) containing the value for res_id for action."),
}
_defaults = {
'condition': 'higher',
'computation_mode': 'manually',
'domain': "[]",
'monetary': False,
'display_mode': 'progress',
}
def number_following(self, cr, uid, model_name="mail.thread", context=None):
"""Return the number of 'model_name' objects the user is following
The model specified in 'model_name' must inherit from mail.thread
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context)
def _check_domain_validity(self, cr, uid, ids, context=None):
# take admin as should always be present
superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context)
for definition in self.browse(cr, uid, ids, context=context):
if definition.computation_mode not in ('count', 'sum'):
continue
obj = self.pool[definition.model_id.model]
try:
domain = safe_eval(definition.domain, {'user': superuser})
# demmy search to make sure the domain is valid
obj.search(cr, uid, domain, context=context, count=True)
except (ValueError, SyntaxError), e:
msg = e.message or (e.msg + '\n' + e.text)
raise osv.except_osv(_('Error!'),_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg)))
return True
def create(self, cr, uid, vals, context=None):
res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context)
if vals.get('computation_mode') in ('count', 'sum'):
self._check_domain_validity(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context)
if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')):
self._check_domain_validity(cr, uid, ids, context=context)
return res
def on_change_model_id(self, cr, uid, ids, model_id, context=None):
"""Prefill field model_inherited_model_ids"""
if not model_id:
return {'value': {'model_inherited_model_ids': []}}
model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)
# format (6, 0, []) to construct the domain ('model_id', 'in', m and m[0] and m[0][2])
return {'value': {'model_inherited_model_ids': [(6, 0, [m.id for m in model.inherited_model_ids])]}}
class gamification_goal(osv.Model):
"""Goal instance for a user
An individual goal for a user on a specified time period"""
_name = 'gamification.goal'
_description = 'Gamification goal instance'
def _get_completion(self, cr, uid, ids, field_name, arg, context=None):
"""Return the percentage of completeness of the goal, between 0 and 100"""
res = dict.fromkeys(ids, 0.0)
for goal in self.browse(cr, uid, ids, context=context):
if goal.definition_condition == 'higher':
if goal.current >= goal.target_goal:
res[goal.id] = 100.0
else:
res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2)
elif goal.current < goal.target_goal:
# a goal 'lower than' has only two values possible: 0 or 100%
res[goal.id] = 100.0
else:
res[goal.id] = 0.0
return res
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}}
_columns = {
'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"),
'user_id': fields.many2one('res.users', string='User', required=True, auto_join=True, ondelete="cascade"),
'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"),
'challenge_id': fields.related('line_id', 'challenge_id',
string="Challenge",
type='many2one',
relation='gamification.challenge',
store=True, readonly=True,
help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."),
'start_date': fields.date('Start Date'),
'end_date': fields.date('End Date'), # no start and end = always active
'target_goal': fields.float('To Reach',
required=True,
track_visibility='always'), # no goal = global index
'current': fields.float('Current Value', required=True, track_visibility='always'),
'completeness': fields.function(_get_completion, type='float', string='Completeness'),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In progress'),
('reached', 'Reached'),
('failed', 'Failed'),
('canceled', 'Canceled'),
],
string='State',
required=True,
track_visibility='always'),
'to_update': fields.boolean('To update'),
'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."),
'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"),
'remind_update_delay': fields.integer('Remind delay',
help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."),
'last_update': fields.date('Last Update',
help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."),
'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True),
'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True),
'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True),
'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True),
}
_defaults = {
'current': 0,
'state': 'draft',
'start_date': fields.date.today,
}
_order = 'start_date desc, end_date desc, definition_id, id'
def _check_remind_delay(self, cr, uid, goal, context=None):
"""Verify if a goal has not been updated for some time and send a
reminder message of needed.
:return: data to write on the goal object
"""
if goal.remind_update_delay and goal.last_update:
delta_max = timedelta(days=goal.remind_update_delay)
last_update = datetime.strptime(goal.last_update, DF).date()
if date.today() - last_update > delta_max:
# generate a remind report
temp_obj = self.pool.get('email.template')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_goal_reminder', context)
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.goal', goal.id, context=context)
self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment')
return {'to_update': True}
return {}
def _get_write_values(self, cr, uid, goal, new_value, context=None):
"""Generate values to write after recomputation of a goal score"""
if new_value == goal.current:
# avoid useless write if the new value is the same as the old one
return {}
result = {goal.id: {'current': new_value}}
if (goal.definition_id.condition == 'higher' and new_value >= goal.target_goal) \
or (goal.definition_id.condition == 'lower' and new_value <= goal.target_goal):
# success, do no set closed as can still change
result[goal.id]['state'] = 'reached'
elif goal.end_date and fields.date.today() > goal.end_date:
# check goal failure
result[goal.id]['state'] = 'failed'
result[goal.id]['closed'] = True
return result
def update(self, cr, uid, ids, context=None):
"""Update the goals to recomputes values and change of states
If a manual goal is not updated for enough time, the user will be
reminded to do so (done only once, in 'inprogress' state).
If a goal reaches the target value, the status is set to reached
If the end date is passed (at least +1 day, time not considered) without
the target value being reached, the goal is set as failed."""
if context is None:
context = {}
commit = context.get('commit_gamification', False)
goals_by_definition = {}
for goal in self.browse(cr, uid, ids, context=context):
goals_by_definition.setdefault(goal.definition_id, []).append(goal)
for definition, goals in goals_by_definition.items():
goals_to_write = dict((goal.id, {}) for goal in goals)
if definition.computation_mode == 'manually':
for goal in goals:
goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context))
elif definition.computation_mode == 'python':
# TODO batch execution
for goal in goals:
# execute the chosen method
cxt = {
'self': self.pool.get('gamification.goal'),
'object': goal,
'pool': self.pool,
'cr': cr,
'context': dict(context), # copy context to prevent side-effects of eval
'uid': uid,
'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time
}
code = definition.compute_code.strip()
safe_eval(code, cxt, mode="exec", nocopy=True)
# the result of the evaluated codeis put in the 'result' local variable, propagated to the context
result = cxt.get('result')
if result is not None and type(result) in (float, int, long):
goals_to_write.update(
self._get_write_values(cr, uid, goal, result, context=context)
)
else:
_logger.exception(_('Invalid return content from the evaluation of code for definition %s') % definition.name)
else: # count or sum
obj = self.pool.get(definition.model_id.model)
field_date_name = definition.field_date_id and definition.field_date_id.name or False
if definition.computation_mode == 'count' and definition.batch_mode:
# batch mode, trying to do as much as possible in one request
general_domain = safe_eval(definition.domain)
field_name = definition.batch_distinctive_field.name
subqueries = {}
for goal in goals:
start_date = field_date_name and goal.start_date or False
end_date = field_date_name and goal.end_date or False
subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})})
# the global query should be split by time periods (especially for recurrent goals)
for (start_date, end_date), query_goals in subqueries.items():
subquery_domain = list(general_domain)
subquery_domain.append((field_name, 'in', list(set(query_goals.values()))))
if start_date:
subquery_domain.append((field_date_name, '>=', start_date))
if end_date:
subquery_domain.append((field_date_name, '<=', end_date))
if field_name == 'id':
# grouping on id does not work and is similar to search anyway
user_ids = obj.search(cr, uid, subquery_domain, context=context)
user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids]
else:
user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context)
# user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
for goal in [g for g in goals if g.id in query_goals.keys()]:
for user_value in user_values:
queried_value = field_name in user_value and user_value[field_name] or False
if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)):
queried_value = queried_value[0]
if queried_value == query_goals[goal.id]:
new_value = user_value.get(field_name+'_count', goal.current)
goals_to_write.update(
self._get_write_values(cr, uid, goal, new_value, context=context)
)
else:
for goal in goals:
# eval the domain with user replaced by goal user object
domain = safe_eval(definition.domain, {'user': goal.user_id})
# add temporal clause(s) to the domain if fields are filled on the goal
if goal.start_date and field_date_name:
domain.append((field_date_name, '>=', goal.start_date))
if goal.end_date and field_date_name:
domain.append((field_date_name, '<=', goal.end_date))
if definition.computation_mode == 'sum':
field_name = definition.field_id.name
# TODO for master: group on user field in batch mode
res = obj.read_group(cr, uid, domain, [field_name], [], context=context)
new_value = res and res[0][field_name] or 0.0
else: # computation mode = count
new_value = obj.search(cr, uid, domain, context=context, count=True)
goals_to_write.update(
self._get_write_values(cr, uid, goal, new_value, context=context)
)
for goal_id, value in goals_to_write.items():
if not value:
continue
self.write(cr, uid, [goal_id], value, context=context)
if commit:
cr.commit()
return True
def action_start(self, cr, uid, ids, context=None):
"""Mark a goal as started.
This should only be used when creating goals manually (in draft state)"""
self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
return self.update(cr, uid, ids, context=context)
def action_reach(self, cr, uid, ids, context=None):
"""Mark a goal as reached.
If the target goal condition is not met, the state will be reset to In
Progress at the next goal update until the end date."""
return self.write(cr, uid, ids, {'state': 'reached'}, context=context)
def action_fail(self, cr, uid, ids, context=None):
"""Set the state of the goal to failed.
A failed goal will be ignored in future checks."""
return self.write(cr, uid, ids, {'state': 'failed'}, context=context)
def action_cancel(self, cr, uid, ids, context=None):
"""Reset the completion after setting a goal as reached or failed.
This is only the current state, if the date and/or target criterias
match the conditions for a change of state, this will be applied at the
next goal update."""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add a 'no_remind_goal' field to True"""
context = dict(context or {})
context['no_remind_goal'] = True
return super(gamification_goal, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
"""Overwrite the write method to update the last_update field to today
If the current value is changed and the report frequency is set to On
change, a report is generated
"""
if context is None:
context = {}
vals['last_update'] = fields.date.today()
result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context)
for goal in self.browse(cr, uid, ids, context=context):
if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals):
# avoid drag&drop in kanban view
raise osv.except_osv(_('Error!'), _('Can not modify the configuration of a started goal'))
if vals.get('current'):
if 'no_remind_goal' in context:
# new goals should not be reported
continue
if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange':
self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context)
return result
def get_action(self, cr, uid, goal_id, context=None):
"""Get the ir.action related to update the goal
In case of a manual goal, should return a wizard to update the value
:return: action description in a dictionnary
"""
goal = self.browse(cr, uid, goal_id, context=context)
if goal.definition_id.action_id:
# open a the action linked to the goal
action = goal.definition_id.action_id.read()[0]
if goal.definition_id.res_id_field:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user})
# if one element to display, should see it in form mode if possible
action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views']
return action
if goal.computation_mode == 'manually':
# open a wizard window to update the value manually
action = {
'name': _("Update %s") % goal.definition_id.name,
'id': goal_id,
'type': 'ir.actions.act_window',
'views': [[False, 'form']],
'target': 'new',
'context': {'default_goal_id': goal_id, 'default_current': goal.current},
'res_model': 'gamification.goal.wizard'
}
return action
return False
| agpl-3.0 |
crateio/crate.web | crate/web/history/migrations/0003_convert_changelog_to_history.py | 1 | 10430 | # -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
depends_on = (
("packages", "0019_auto__add_field_releasefile_hidden"),
)
def forwards(self, orm):
for cl in orm["packages.ChangeLog"].objects.all().select_related("package", "version"):
e = orm["history.Event"](created=cl.created, package=cl.package.name)
if cl.type == "new":
e.action = "package_create"
else:
e.action = "release_create"
e.version = cl.release.version
e.save()
def backwards(self, orm):
raise Exception("Cannot Go Backwards")
models = {
'history.event': {
'Meta': {'object_name': 'Event'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'data': ('jsonfield.fields.JSONField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'package': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'})
},
'packages.changelog': {
'Meta': {'object_name': 'ChangeLog'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Package']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Release']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
'packages.package': {
'Meta': {'object_name': 'Package'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'downloads_synced_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'normalized_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.packageuri': {
'Meta': {'unique_together': "(['package', 'uri'],)", 'object_name': 'PackageURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_links'", 'to': "orm['packages.Package']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'packages.readthedocspackageslug': {
'Meta': {'object_name': 'ReadTheDocsPackageSlug'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'readthedocs_slug'", 'unique': 'True', 'to': "orm['packages.Package']"}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.release': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Release'},
'author': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'author_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'releases'", 'blank': 'True', 'to': "orm['packages.TroveClassifier']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_uri': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['packages.Package']"}),
'platform': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requires_python': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'packages.releasefile': {
'Meta': {'unique_together': "(('release', 'type', 'python_version', 'filename'),)", 'object_name': 'ReleaseFile'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'digest': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'downloads': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'python_version': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['packages.Release']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'packages.releaseobsolete': {
'Meta': {'object_name': 'ReleaseObsolete'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'obsoletes'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaseprovide': {
'Meta': {'object_name': 'ReleaseProvide'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provides'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaserequire': {
'Meta': {'object_name': 'ReleaseRequire'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requires'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaseuri': {
'Meta': {'object_name': 'ReleaseURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'uris'", 'to': "orm['packages.Release']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '500'})
},
'packages.troveclassifier': {
'Meta': {'object_name': 'TroveClassifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trove': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '350'})
}
}
complete_apps = ['packages', 'history']
symmetrical = True
| bsd-2-clause |
aronparsons/spacewalk | backend/common/rhnFlags.py | 4 | 1402 | # Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Small class that handles a global flags structure. the globale dictionary
# used to hold the flags gets initialized on demand
#
__F = {}
def set(name, value=1):
"""
set value
"""
# pylint: disable=W0622,W0602
global __F
if not name:
return None
name = name.lower()
__F[name] = value
return None
def get(name):
"""
get value
"""
if not name:
return None
name = name.lower()
return __F.get(name)
def test(name):
"""
test value
"""
if not name:
return 0
name = name.lower()
return __F.has_key(name) and __F[name]
def reset():
"""
reset all
"""
__F.clear()
def all():
"""
return all flags in a dict
"""
# pylint: disable=W0622
return __F
| gpl-2.0 |
spring-week-topos/horizon-week | openstack_dashboard/test/integration_tests/tests/test_login.py | 25 | 1210 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages import loginpage
class TestLogin(helpers.BaseTestCase):
"""This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.conf)
login_pg.go_to_login_page()
home_pg = login_pg.login()
if not home_pg.is_logged_in:
self.fail("Could not determine if logged in")
home_pg.log_out()
| apache-2.0 |
potash/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
pawaranand/phr_frappe | frappe/core/doctype/workflow/workflow.py | 32 | 2524 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class Workflow(Document):
def validate(self):
self.set_active()
self.create_custom_field_for_workflow_state()
self.update_default_workflow_status()
self.validate_docstatus()
def on_update(self):
frappe.clear_cache(doctype=self.document_type)
def create_custom_field_for_workflow_state(self):
frappe.clear_cache(doctype=self.document_type)
meta = frappe.get_meta(self.document_type)
if not meta.get_field(self.workflow_state_field):
# create custom field
frappe.get_doc({
"doctype":"Custom Field",
"dt": self.document_type,
"__islocal": 1,
"fieldname": self.workflow_state_field,
"label": self.workflow_state_field.replace("_", " ").title(),
"hidden": 1,
"fieldtype": "Link",
"options": "Workflow State",
}).save()
frappe.msgprint(_("Created Custom Field {0} in {1}").format(self.workflow_state_field,
self.document_type))
def update_default_workflow_status(self):
docstatus_map = {}
states = self.get("workflow_document_states")
for d in states:
if not d.doc_status in docstatus_map:
frappe.db.sql("""update `tab%s` set `%s` = %s where \
ifnull(`%s`, '')='' and docstatus=%s""" % (self.document_type, self.workflow_state_field,
'%s', self.workflow_state_field, "%s"), (d.state, d.doc_status))
docstatus_map[d.doc_status] = d.state
def validate_docstatus(self):
def get_state(state):
for s in self.workflow_document_states:
if s.state==state:
return s
frappe.throw(frappe._("{0} not a valid State").format(state))
for t in self.workflow_transitions:
state = get_state(t.state)
next_state = get_state(t.next_state)
if state.doc_status=="2":
frappe.throw(frappe._("Cannot change state of Cancelled Document. Transition row {0}").format(t.idx))
if state.doc_status=="1" and next_state.doc_status=="0":
frappe.throw(frappe._("Submitted Document cannot be converted back to draft. Transition row {0}").format(t.idx))
if state.doc_status=="0" and next_state.doc_status=="2":
frappe.throw(frappe._("Cannot cancel before submitting. See Transition {0}").format(t.idx))
def set_active(self):
if int(self.is_active or 0):
# clear all other
frappe.db.sql("""update tabWorkflow set is_active=0
where document_type=%s""",
self.document_type)
| mit |
theguardian/headphones | lib/unidecode/x0d2.py | 253 | 4724 | data = (
'toels', # 0x00
'toelt', # 0x01
'toelp', # 0x02
'toelh', # 0x03
'toem', # 0x04
'toeb', # 0x05
'toebs', # 0x06
'toes', # 0x07
'toess', # 0x08
'toeng', # 0x09
'toej', # 0x0a
'toec', # 0x0b
'toek', # 0x0c
'toet', # 0x0d
'toep', # 0x0e
'toeh', # 0x0f
'tyo', # 0x10
'tyog', # 0x11
'tyogg', # 0x12
'tyogs', # 0x13
'tyon', # 0x14
'tyonj', # 0x15
'tyonh', # 0x16
'tyod', # 0x17
'tyol', # 0x18
'tyolg', # 0x19
'tyolm', # 0x1a
'tyolb', # 0x1b
'tyols', # 0x1c
'tyolt', # 0x1d
'tyolp', # 0x1e
'tyolh', # 0x1f
'tyom', # 0x20
'tyob', # 0x21
'tyobs', # 0x22
'tyos', # 0x23
'tyoss', # 0x24
'tyong', # 0x25
'tyoj', # 0x26
'tyoc', # 0x27
'tyok', # 0x28
'tyot', # 0x29
'tyop', # 0x2a
'tyoh', # 0x2b
'tu', # 0x2c
'tug', # 0x2d
'tugg', # 0x2e
'tugs', # 0x2f
'tun', # 0x30
'tunj', # 0x31
'tunh', # 0x32
'tud', # 0x33
'tul', # 0x34
'tulg', # 0x35
'tulm', # 0x36
'tulb', # 0x37
'tuls', # 0x38
'tult', # 0x39
'tulp', # 0x3a
'tulh', # 0x3b
'tum', # 0x3c
'tub', # 0x3d
'tubs', # 0x3e
'tus', # 0x3f
'tuss', # 0x40
'tung', # 0x41
'tuj', # 0x42
'tuc', # 0x43
'tuk', # 0x44
'tut', # 0x45
'tup', # 0x46
'tuh', # 0x47
'tweo', # 0x48
'tweog', # 0x49
'tweogg', # 0x4a
'tweogs', # 0x4b
'tweon', # 0x4c
'tweonj', # 0x4d
'tweonh', # 0x4e
'tweod', # 0x4f
'tweol', # 0x50
'tweolg', # 0x51
'tweolm', # 0x52
'tweolb', # 0x53
'tweols', # 0x54
'tweolt', # 0x55
'tweolp', # 0x56
'tweolh', # 0x57
'tweom', # 0x58
'tweob', # 0x59
'tweobs', # 0x5a
'tweos', # 0x5b
'tweoss', # 0x5c
'tweong', # 0x5d
'tweoj', # 0x5e
'tweoc', # 0x5f
'tweok', # 0x60
'tweot', # 0x61
'tweop', # 0x62
'tweoh', # 0x63
'twe', # 0x64
'tweg', # 0x65
'twegg', # 0x66
'twegs', # 0x67
'twen', # 0x68
'twenj', # 0x69
'twenh', # 0x6a
'twed', # 0x6b
'twel', # 0x6c
'twelg', # 0x6d
'twelm', # 0x6e
'twelb', # 0x6f
'twels', # 0x70
'twelt', # 0x71
'twelp', # 0x72
'twelh', # 0x73
'twem', # 0x74
'tweb', # 0x75
'twebs', # 0x76
'twes', # 0x77
'twess', # 0x78
'tweng', # 0x79
'twej', # 0x7a
'twec', # 0x7b
'twek', # 0x7c
'twet', # 0x7d
'twep', # 0x7e
'tweh', # 0x7f
'twi', # 0x80
'twig', # 0x81
'twigg', # 0x82
'twigs', # 0x83
'twin', # 0x84
'twinj', # 0x85
'twinh', # 0x86
'twid', # 0x87
'twil', # 0x88
'twilg', # 0x89
'twilm', # 0x8a
'twilb', # 0x8b
'twils', # 0x8c
'twilt', # 0x8d
'twilp', # 0x8e
'twilh', # 0x8f
'twim', # 0x90
'twib', # 0x91
'twibs', # 0x92
'twis', # 0x93
'twiss', # 0x94
'twing', # 0x95
'twij', # 0x96
'twic', # 0x97
'twik', # 0x98
'twit', # 0x99
'twip', # 0x9a
'twih', # 0x9b
'tyu', # 0x9c
'tyug', # 0x9d
'tyugg', # 0x9e
'tyugs', # 0x9f
'tyun', # 0xa0
'tyunj', # 0xa1
'tyunh', # 0xa2
'tyud', # 0xa3
'tyul', # 0xa4
'tyulg', # 0xa5
'tyulm', # 0xa6
'tyulb', # 0xa7
'tyuls', # 0xa8
'tyult', # 0xa9
'tyulp', # 0xaa
'tyulh', # 0xab
'tyum', # 0xac
'tyub', # 0xad
'tyubs', # 0xae
'tyus', # 0xaf
'tyuss', # 0xb0
'tyung', # 0xb1
'tyuj', # 0xb2
'tyuc', # 0xb3
'tyuk', # 0xb4
'tyut', # 0xb5
'tyup', # 0xb6
'tyuh', # 0xb7
'teu', # 0xb8
'teug', # 0xb9
'teugg', # 0xba
'teugs', # 0xbb
'teun', # 0xbc
'teunj', # 0xbd
'teunh', # 0xbe
'teud', # 0xbf
'teul', # 0xc0
'teulg', # 0xc1
'teulm', # 0xc2
'teulb', # 0xc3
'teuls', # 0xc4
'teult', # 0xc5
'teulp', # 0xc6
'teulh', # 0xc7
'teum', # 0xc8
'teub', # 0xc9
'teubs', # 0xca
'teus', # 0xcb
'teuss', # 0xcc
'teung', # 0xcd
'teuj', # 0xce
'teuc', # 0xcf
'teuk', # 0xd0
'teut', # 0xd1
'teup', # 0xd2
'teuh', # 0xd3
'tyi', # 0xd4
'tyig', # 0xd5
'tyigg', # 0xd6
'tyigs', # 0xd7
'tyin', # 0xd8
'tyinj', # 0xd9
'tyinh', # 0xda
'tyid', # 0xdb
'tyil', # 0xdc
'tyilg', # 0xdd
'tyilm', # 0xde
'tyilb', # 0xdf
'tyils', # 0xe0
'tyilt', # 0xe1
'tyilp', # 0xe2
'tyilh', # 0xe3
'tyim', # 0xe4
'tyib', # 0xe5
'tyibs', # 0xe6
'tyis', # 0xe7
'tyiss', # 0xe8
'tying', # 0xe9
'tyij', # 0xea
'tyic', # 0xeb
'tyik', # 0xec
'tyit', # 0xed
'tyip', # 0xee
'tyih', # 0xef
'ti', # 0xf0
'tig', # 0xf1
'tigg', # 0xf2
'tigs', # 0xf3
'tin', # 0xf4
'tinj', # 0xf5
'tinh', # 0xf6
'tid', # 0xf7
'til', # 0xf8
'tilg', # 0xf9
'tilm', # 0xfa
'tilb', # 0xfb
'tils', # 0xfc
'tilt', # 0xfd
'tilp', # 0xfe
'tilh', # 0xff
)
| gpl-3.0 |
pdellaert/ansible | lib/ansible/modules/network/fortimanager/fmgr_secprof_ssl_ssh.py | 39 | 39490 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_secprof_ssl_ssh
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manage SSL and SSH security profiles in FortiManager
description:
- Manage SSL and SSH security profiles in FortiManager via the FMG API
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
whitelist:
description:
- Enable/disable exempting servers by FortiGuard whitelist.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
use_ssl_server:
description:
- Enable/disable the use of SSL server table for SSL offloading.
- choice | disable | Don't use SSL server configuration.
- choice | enable | Use SSL server configuration.
required: false
choices: ["disable", "enable"]
untrusted_caname:
description:
- Untrusted CA certificate used by SSL Inspection.
required: false
ssl_exemptions_log:
description:
- Enable/disable logging SSL exemptions.
- choice | disable | Disable logging SSL exemptions.
- choice | enable | Enable logging SSL exemptions.
required: false
choices: ["disable", "enable"]
ssl_anomalies_log:
description:
- Enable/disable logging SSL anomalies.
- choice | disable | Disable logging SSL anomalies.
- choice | enable | Enable logging SSL anomalies.
required: false
choices: ["disable", "enable"]
server_cert_mode:
description:
- Re-sign or replace the server's certificate.
- choice | re-sign | Multiple clients connecting to multiple servers.
- choice | replace | Protect an SSL server.
required: false
choices: ["re-sign", "replace"]
server_cert:
description:
- Certificate used by SSL Inspection to replace server certificate.
required: false
rpc_over_https:
description:
- Enable/disable inspection of RPC over HTTPS.
- choice | disable | Disable inspection of RPC over HTTPS.
- choice | enable | Enable inspection of RPC over HTTPS.
required: false
choices: ["disable", "enable"]
name:
description:
- Name.
required: false
mapi_over_https:
description:
- Enable/disable inspection of MAPI over HTTPS.
- choice | disable | Disable inspection of MAPI over HTTPS.
- choice | enable | Enable inspection of MAPI over HTTPS.
required: false
choices: ["disable", "enable"]
comment:
description:
- Optional comments.
required: false
caname:
description:
- CA certificate used by SSL Inspection.
required: false
ftps:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ftps_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
ftps_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ftps_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
ftps_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
ftps_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ftps_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
https:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
https_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
https_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
https_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
https_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | certificate-inspection | Inspect SSL handshake only.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "certificate-inspection", "deep-inspection"]
https_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
https_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
imaps:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
imaps_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
imaps_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
imaps_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
imaps_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
imaps_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
imaps_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
pop3s:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
pop3s_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
pop3s_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
pop3s_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
pop3s_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
pop3s_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
pop3s_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
smtps:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
smtps_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
smtps_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
smtps_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
smtps_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
smtps_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
smtps_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
ssh:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssh_inspect_all:
description:
- Level of SSL inspection.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
ssh_ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
required: false
ssh_ssh_algorithm:
description:
- Relative strength of encryption algorithms accepted during negotiation.
- choice | compatible | Allow a broader set of encryption algorithms for best compatibility.
- choice | high-encryption | Allow only AES-CTR, AES-GCM ciphers and high encryption algorithms.
required: false
choices: ["compatible", "high-encryption"]
ssh_ssh_policy_check:
description:
- Enable/disable SSH policy check.
- choice | disable | Disable SSH policy check.
- choice | enable | Enable SSH policy check.
required: false
choices: ["disable", "enable"]
ssh_ssh_tun_policy_check:
description:
- Enable/disable SSH tunnel policy check.
- choice | disable | Disable SSH tunnel policy check.
- choice | enable | Enable SSH tunnel policy check.
required: false
choices: ["disable", "enable"]
ssh_status:
description:
- Configure protocol inspection status.
- choice | disable | Disable.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "deep-inspection"]
ssh_unsupported_version:
description:
- Action based on SSH version being unsupported.
- choice | block | Block.
- choice | bypass | Bypass.
required: false
choices: ["block", "bypass"]
ssl:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssl_allow_invalid_server_cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
ssl_client_cert_request:
description:
- Action based on client certificate request failure.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_inspect_all:
description:
- Level of SSL inspection.
- choice | disable | Disable.
- choice | certificate-inspection | Inspect SSL handshake only.
- choice | deep-inspection | Full SSL inspection.
required: false
choices: ["disable", "certificate-inspection", "deep-inspection"]
ssl_unsupported_ssl:
description:
- Action based on the SSL encryption used being unsupported.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_untrusted_cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
- choice | allow | Allow the untrusted server certificate.
- choice | block | Block the connection when an untrusted server certificate is detected.
- choice | ignore | Always take the server certificate as trusted.
required: false
choices: ["allow", "block", "ignore"]
ssl_exempt:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssl_exempt_address:
description:
- IPv4 address object.
required: false
ssl_exempt_address6:
description:
- IPv6 address object.
required: false
ssl_exempt_fortiguard_category:
description:
- FortiGuard category ID.
required: false
ssl_exempt_regex:
description:
- Exempt servers by regular expression.
required: false
ssl_exempt_type:
description:
- Type of address object (IPv4 or IPv6) or FortiGuard category.
- choice | fortiguard-category | FortiGuard category.
- choice | address | Firewall IPv4 address.
- choice | address6 | Firewall IPv6 address.
- choice | wildcard-fqdn | Fully Qualified Domain Name with wildcard characters.
- choice | regex | Regular expression FQDN.
required: false
choices: ["fortiguard-category", "address", "address6", "wildcard-fqdn", "regex"]
ssl_exempt_wildcard_fqdn:
description:
- Exempt servers by wildcard FQDN.
required: false
ssl_server:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssl_server_ftps_client_cert_request:
description:
- Action based on client certificate request failure during the FTPS handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_server_https_client_cert_request:
description:
- Action based on client certificate request failure during the HTTPS handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_server_imaps_client_cert_request:
description:
- Action based on client certificate request failure during the IMAPS handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_server_ip:
description:
- IPv4 address of the SSL server.
required: false
ssl_server_pop3s_client_cert_request:
description:
- Action based on client certificate request failure during the POP3S handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_server_smtps_client_cert_request:
description:
- Action based on client certificate request failure during the SMTPS handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
ssl_server_ssl_other_client_cert_request:
description:
- Action based on client certificate request failure during an SSL protocol handshake.
- choice | bypass | Bypass.
- choice | inspect | Inspect.
- choice | block | Block.
required: false
choices: ["bypass", "inspect", "block"]
'''
EXAMPLES = '''
- name: DELETE Profile
fmgr_secprof_ssl_ssh:
name: Ansible_SSL_SSH_Profile
mode: delete
- name: CREATE Profile
fmgr_secprof_ssl_ssh:
name: Ansible_SSL_SSH_Profile
comment: "Created by Ansible Module TEST"
mode: set
mapi_over_https: enable
rpc_over_https: enable
server_cert_mode: replace
ssl_anomalies_log: enable
ssl_exemptions_log: enable
use_ssl_server: enable
whitelist: enable
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
whitelist=dict(required=False, type="str", choices=["disable", "enable"]),
use_ssl_server=dict(required=False, type="str", choices=["disable", "enable"]),
untrusted_caname=dict(required=False, type="str"),
ssl_exemptions_log=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_anomalies_log=dict(required=False, type="str", choices=["disable", "enable"]),
server_cert_mode=dict(required=False, type="str", choices=["re-sign", "replace"]),
server_cert=dict(required=False, type="str"),
rpc_over_https=dict(required=False, type="str", choices=["disable", "enable"]),
name=dict(required=False, type="str"),
mapi_over_https=dict(required=False, type="str", choices=["disable", "enable"]),
comment=dict(required=False, type="str"),
caname=dict(required=False, type="str"),
ftps=dict(required=False, type="list"),
ftps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ftps_ports=dict(required=False, type="str"),
ftps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
ftps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ftps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
https=dict(required=False, type="list"),
https_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
https_ports=dict(required=False, type="str"),
https_status=dict(required=False, type="str", choices=["disable", "certificate-inspection", "deep-inspection"]),
https_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
https_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
imaps=dict(required=False, type="list"),
imaps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
imaps_ports=dict(required=False, type="str"),
imaps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
imaps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
imaps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
pop3s=dict(required=False, type="list"),
pop3s_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
pop3s_ports=dict(required=False, type="str"),
pop3s_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
pop3s_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
pop3s_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
smtps=dict(required=False, type="list"),
smtps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
smtps_ports=dict(required=False, type="str"),
smtps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
smtps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
smtps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
ssh=dict(required=False, type="list"),
ssh_inspect_all=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
ssh_ports=dict(required=False, type="str"),
ssh_ssh_algorithm=dict(required=False, type="str", choices=["compatible", "high-encryption"]),
ssh_ssh_policy_check=dict(required=False, type="str", choices=["disable", "enable"]),
ssh_ssh_tun_policy_check=dict(required=False, type="str", choices=["disable", "enable"]),
ssh_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]),
ssh_unsupported_version=dict(required=False, type="str", choices=["block", "bypass"]),
ssl=dict(required=False, type="list"),
ssl_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_inspect_all=dict(required=False, type="str", choices=["disable", "certificate-inspection",
"deep-inspection"]),
ssl_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]),
ssl_exempt=dict(required=False, type="list"),
ssl_exempt_address=dict(required=False, type="str"),
ssl_exempt_address6=dict(required=False, type="str"),
ssl_exempt_fortiguard_category=dict(required=False, type="str"),
ssl_exempt_regex=dict(required=False, type="str"),
ssl_exempt_type=dict(required=False, type="str", choices=["fortiguard-category", "address", "address6",
"wildcard-fqdn", "regex"]),
ssl_exempt_wildcard_fqdn=dict(required=False, type="str"),
ssl_server=dict(required=False, type="list"),
ssl_server_ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_server_https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_server_imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_server_ip=dict(required=False, type="str"),
ssl_server_pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_server_smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]),
ssl_server_ssl_other_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect",
"block"]),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"whitelist": module.params["whitelist"],
"use-ssl-server": module.params["use_ssl_server"],
"untrusted-caname": module.params["untrusted_caname"],
"ssl-exemptions-log": module.params["ssl_exemptions_log"],
"ssl-anomalies-log": module.params["ssl_anomalies_log"],
"server-cert-mode": module.params["server_cert_mode"],
"server-cert": module.params["server_cert"],
"rpc-over-https": module.params["rpc_over_https"],
"name": module.params["name"],
"mapi-over-https": module.params["mapi_over_https"],
"comment": module.params["comment"],
"caname": module.params["caname"],
"ftps": {
"allow-invalid-server-cert": module.params["ftps_allow_invalid_server_cert"],
"client-cert-request": module.params["ftps_client_cert_request"],
"ports": module.params["ftps_ports"],
"status": module.params["ftps_status"],
"unsupported-ssl": module.params["ftps_unsupported_ssl"],
"untrusted-cert": module.params["ftps_untrusted_cert"],
},
"https": {
"allow-invalid-server-cert": module.params["https_allow_invalid_server_cert"],
"client-cert-request": module.params["https_client_cert_request"],
"ports": module.params["https_ports"],
"status": module.params["https_status"],
"unsupported-ssl": module.params["https_unsupported_ssl"],
"untrusted-cert": module.params["https_untrusted_cert"],
},
"imaps": {
"allow-invalid-server-cert": module.params["imaps_allow_invalid_server_cert"],
"client-cert-request": module.params["imaps_client_cert_request"],
"ports": module.params["imaps_ports"],
"status": module.params["imaps_status"],
"unsupported-ssl": module.params["imaps_unsupported_ssl"],
"untrusted-cert": module.params["imaps_untrusted_cert"],
},
"pop3s": {
"allow-invalid-server-cert": module.params["pop3s_allow_invalid_server_cert"],
"client-cert-request": module.params["pop3s_client_cert_request"],
"ports": module.params["pop3s_ports"],
"status": module.params["pop3s_status"],
"unsupported-ssl": module.params["pop3s_unsupported_ssl"],
"untrusted-cert": module.params["pop3s_untrusted_cert"],
},
"smtps": {
"allow-invalid-server-cert": module.params["smtps_allow_invalid_server_cert"],
"client-cert-request": module.params["smtps_client_cert_request"],
"ports": module.params["smtps_ports"],
"status": module.params["smtps_status"],
"unsupported-ssl": module.params["smtps_unsupported_ssl"],
"untrusted-cert": module.params["smtps_untrusted_cert"],
},
"ssh": {
"inspect-all": module.params["ssh_inspect_all"],
"ports": module.params["ssh_ports"],
"ssh-algorithm": module.params["ssh_ssh_algorithm"],
"ssh-policy-check": module.params["ssh_ssh_policy_check"],
"ssh-tun-policy-check": module.params["ssh_ssh_tun_policy_check"],
"status": module.params["ssh_status"],
"unsupported-version": module.params["ssh_unsupported_version"],
},
"ssl": {
"allow-invalid-server-cert": module.params["ssl_allow_invalid_server_cert"],
"client-cert-request": module.params["ssl_client_cert_request"],
"inspect-all": module.params["ssl_inspect_all"],
"unsupported-ssl": module.params["ssl_unsupported_ssl"],
"untrusted-cert": module.params["ssl_untrusted_cert"],
},
"ssl-exempt": {
"address": module.params["ssl_exempt_address"],
"address6": module.params["ssl_exempt_address6"],
"fortiguard-category": module.params["ssl_exempt_fortiguard_category"],
"regex": module.params["ssl_exempt_regex"],
"type": module.params["ssl_exempt_type"],
"wildcard-fqdn": module.params["ssl_exempt_wildcard_fqdn"],
},
"ssl-server": {
"ftps-client-cert-request": module.params["ssl_server_ftps_client_cert_request"],
"https-client-cert-request": module.params["ssl_server_https_client_cert_request"],
"imaps-client-cert-request": module.params["ssl_server_imaps_client_cert_request"],
"ip": module.params["ssl_server_ip"],
"pop3s-client-cert-request": module.params["ssl_server_pop3s_client_cert_request"],
"smtps-client-cert-request": module.params["ssl_server_smtps_client_cert_request"],
"ssl-other-client-cert-request": module.params["ssl_server_ssl_other_client_cert_request"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['ftps', 'https', 'imaps', 'pop3s', 'smtps', 'ssh', 'ssl', 'ssl-exempt', 'ssl-server']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
unaizalakain/django | django/contrib/sessions/middleware.py | 256 | 2658 | import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
Nabs007/ansible | lib/ansible/module_utils/rax.py | 280 | 11974 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from uuid import UUID
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception, e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception, e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
SUSE/azure-sdk-for-python | azure-keyvault/azure/keyvault/models/certificate_update_parameters.py | 4 | 1565 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateUpdateParameters(Model):
"""The certificate update parameters.
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: :class:`CertificatePolicy
<azure.keyvault.models.CertificatePolicy>`
:param certificate_attributes: The attributes of the certificate
(optional).
:type certificate_attributes: :class:`CertificateAttributes
<azure.keyvault.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict
"""
_attribute_map = {
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, certificate_policy=None, certificate_attributes=None, tags=None):
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
| mit |
Trigition/MTG-DataScraper | scripts/variant_cards.py | 1 | 1256 | #!/usr/bin/env python
import pandas as pd
import argparse
parser = argparse.ArgumentParser(description="This python script generates a csv describing all the physical cards")
parser.add_argument('inputs', help='Input CSV files', nargs='+')
parser.add_argument('output', help='Optional output csv filename, defaults to "variants_join_table.csv"', nargs='?', default="variants_joins_table.csv")
args = parser.parse_args()
def get_data_frames():
print "Grabbing csv files..."
dataframes = [pd.read_csv(x) for x in args.inputs]
if len(dataframes) > 1:
all_cards = dataframes[0].append(dataframes[1:], ignore_index=True)
else:
all_cards = dataframes[0]
return all_cards
all_cards = get_data_frames()
cards_seen = []
variants = []
for name, group in all_cards.groupby(['name']):
if len(group) > 1:
if name not in cards_seen:
for variant in group['gatherer_id']:
cur_variant = {}
cur_variant['name'] = name
cur_variant['variant'] = variant
variants.append(cur_variant)
cards_seen.append(name)
print "Writing file"
print pd.DataFrame(variants)
pd.DataFrame(variants).to_csv(args.output, index=False)
print "Done"
| mit |
Abschiedsstein/ANWP | draft.py | 1 | 1423 | import numpy as np
class RnD : Server{
draw()
reveal(n)
shuffle()
pick()
}
class Archive : Server{
add(face)
reveal(n)
pick()
}
class Hand{
add()
remove()
reveal()
}
class Server {
create()
install()
}
class HQ : Server{
}
class Remote : Server {
}
class Rule : EventReader{
target,
checkConditions(),
effect()
neffect()
}
class Blocker :Rule {
}
class Player {
turn,
clicks,
credits,
}
class Event {
registerAsEventReader()
readersList
}
class TimeEvent : Event {
def init():
super(Event,self)
}
class PlayerActionEvent : Event {
}
class EventReader {
def triggerFunction ()
Event.registerAsEventReader(triggerFunction)
}
class EventHandler {
}
def subjectToRules (func):
def rulesChecker(*args, **kwargs):
relevantRules = [rule in rules if rule.target is func]
for rule in relevantRules:
if rule.checkConditions(*args,**kwargs):
if ruel is Blocker:
return
rule.effect()
else:
rule.neffect()
return func(*args,**kwargs)
return rulesChecker
class Test :
def __init__(self):
| gpl-2.0 |
jphilipsen05/zulip | zerver/views/push_notifications.py | 5 | 1932 | from __future__ import absolute_import
import requests
import json
from typing import Optional, Text
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.push_notifications import add_push_device_token, \
remove_push_device_token
from zerver.lib.request import has_request_variables, REQ, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.models import PushDeviceToken, UserProfile
def validate_token(token_str):
# type: (str) -> None
if token_str == '' or len(token_str) > 4096:
raise JsonableError(_('Empty or invalid length token'))
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ(),
appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
# type: (HttpRequest, UserProfile, str, str) -> HttpResponse
validate_token(token)
add_push_device_token(user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
return json_success()
@has_request_variables
def add_android_reg_id(request, user_profile, token=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
validate_token(token)
add_push_device_token(user_profile, token, PushDeviceToken.GCM)
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
validate_token(token)
remove_push_device_token(user_profile, token, PushDeviceToken.APNS)
return json_success()
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
validate_token(token)
remove_push_device_token(user_profile, token, PushDeviceToken.GCM)
return json_success()
| apache-2.0 |
ciminaghi/linux-3.3.8 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
AnanyaKumar/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/setup.py | 213 | 1409 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pre_install():
"""
Do any setup required before the install hook.
"""
install_charmhelpers()
install_path()
def install_charmhelpers():
"""
Install the charmhelpers library, if not present.
"""
try:
import charmhelpers # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'charmhelpers'])
def install_path():
"""
Install the path.py library, when not present.
"""
try:
import path # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'path.py'])
| apache-2.0 |
uclouvain/OSIS-Louvain | base/tests/views/test_my_osis.py | 1 | 6883 | #############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from attribution.tests.factories.attribution import AttributionFactory
from base.models.enums import academic_calendar_type
from base.tests.factories.academic_calendar import AcademicCalendarFactory
from base.tests.factories.academic_year import create_current_academic_year
from base.tests.factories.learning_container_year import LearningContainerYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFakerFactory
from base.tests.factories.person import PersonFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.factories.user import SuperUserFactory
from osis_common.models import message_history
LANGUAGE_CODE_FR = 'fr-be'
LANGUAGE_CODE_EN = 'en'
class MyOsisViewTestCase(TestCase):
fixtures = ['osis_common/fixtures/messages_tests.json']
@classmethod
def setUpTestData(cls):
cls.a_superuser = SuperUserFactory()
cls.person = PersonFactory(user=cls.a_superuser,
language=LANGUAGE_CODE_FR)
academic_year = create_current_academic_year()
cls.summary_course_submission_calendar = AcademicCalendarFactory(
academic_year=academic_year,
start_date=academic_year.start_date,
end_date=academic_year.end_date,
reference=academic_calendar_type.SUMMARY_COURSE_SUBMISSION)
cls.tutor = TutorFactory(person=cls.person)
# FIXME CHANGE LEARNINGUNITYEARFACTORY FOR AVOID MULTI ACADEMIC YEAR
cls.learning_container_year = LearningContainerYearFactory(academic_year=academic_year)
cls.learning_unit_year = LearningUnitYearFakerFactory(academic_year=academic_year,
learning_container_year=cls.learning_container_year)
cls.attribution = AttributionFactory(learning_unit_year=cls.learning_unit_year, summary_responsible=True,
tutor=cls.tutor)
def setUp(self):
self.client.force_login(self.a_superuser)
@staticmethod
def get_message_history():
return message_history.MessageHistory.objects.all().first()
def test_my_osis_index(self):
from base.views.my_osis import my_osis_index
response = self.client.get(reverse(my_osis_index))
self.assertTemplateUsed(response, 'my_osis/home.html')
def test_my_messages_index(self):
from base.views.my_osis import my_messages_index
response = self.client.get(reverse(my_messages_index))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'my_osis/my_messages.html')
def test_get_messages_formset(self):
messages = message_history.MessageHistory.objects.all()
from base.views.my_osis import get_messages_formset
formset_factory_result = get_messages_formset(messages)
self.assertEqual(len(messages), len(formset_factory_result))
cpt = 0
for form in formset_factory_result:
message = messages[cpt]
self.assertEqual(message.subject, form['subject'].value())
self.assertEqual(message.id, form['id'].value())
cpt += 1
def test_profile(self):
from base.views.my_osis import profile
response = self.client.get(reverse(profile))
self.assertTemplateUsed(response, 'my_osis/profile.html')
with self.assertRaises(KeyError):
response.context['tab_attribution_on']
self.check_context_data(response.context)
def test_profile_attributions(self):
from base.views.my_osis import profile_attributions
response = self.client.get(reverse(profile_attributions))
self.assertTemplateUsed(response, 'my_osis/profile.html')
self.assertEqual(response.context['tab_attribution_on'], True)
self.check_context_data(response.context)
def test_read_message(self):
message = self.get_message_history()
from base.views.my_osis import read_message
response = self.client.get(reverse(read_message, args=[message.id]))
self.assertTemplateUsed(response, 'my_osis/my_message.html')
self.assertEqual(response.context['my_message'], message)
def test_get_data(self):
request = self.get_request()
from base.views.my_osis import _get_data
data = _get_data(request)
self.assertEqual(data['person'], self.person)
@override_settings(LANGUAGES=[('fr-be', 'French'), ('en', 'English'), ], LANGUAGE_CODE='fr-be')
def test_profile_lang(self):
data = {
"ui_language": LANGUAGE_CODE_EN
}
response = self.client.post(reverse('profile_lang'), data)
self.assertTemplateUsed(response, 'my_osis/profile.html')
self.assertEqual(response.context['person'].language, LANGUAGE_CODE_EN)
def get_request(self):
request_factory = RequestFactory()
request = request_factory.get(reverse('home'))
request.user = self.a_superuser
return request
def check_context_data(self, context):
self.assertEqual(context['person'], self.person)
self.assertCountEqual(context['addresses'], [])
self.assertEqual(context['tutor'], self.tutor)
self.assertCountEqual(context['attributions'], [self.attribution])
self.assertCountEqual(context['programs'], [])
self.assertTrue(context['summary_submission_opened'])
| agpl-3.0 |
ekiourk/ansible-modules-core | web_infrastructure/supervisorctl.py | 61 | 8777 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
DOCUMENTATION = '''
---
module: supervisorctl
short_description: Manage the state of a program or group of programs running via supervisord
description:
- Manage the state of a program or group of programs running via supervisord
version_added: "0.7"
options:
name:
description:
- The name of the supervisord program or group to manage.
- The name will be taken as group name when it ends with a colon I(:)
- Group support is only available in Ansible version 1.6 or later.
required: true
default: null
config:
description:
- The supervisor configuration file path
required: false
default: null
version_added: "1.3"
server_url:
description:
- URL on which supervisord server is listening
required: false
default: null
version_added: "1.3"
username:
description:
- username to use for authentication
required: false
default: null
version_added: "1.3"
password:
description:
- password to use for authentication
required: false
default: null
version_added: "1.3"
state:
description:
- The desired state of program/group.
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "absent" ]
supervisorctl_path:
description:
- path to supervisorctl executable
required: false
default: null
version_added: "1.4"
notes:
- When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
- When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
requirements: [ "supervisorctl" ]
author:
- "Matt Wright (@mattupstate)"
- "Aaron Wang (@inetfuture) <[email protected]>"
'''
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
- supervisorctl: name=my_app state=started
# Manage the state of program group to be in 'started' state.
- supervisorctl: name='my_apps:' state=started
# Restart my_app, reading supervisorctl configuration from a specified file.
- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001
'''
def main():
arg_spec = dict(
name=dict(required=True),
config=dict(required=False),
server_url=dict(required=False),
username=dict(required=False),
password=dict(required=False),
supervisorctl_path=dict(required=False),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
is_group = False
if name.endswith(':'):
is_group = True
name = name.rstrip(':')
state = module.params['state']
config = module.params.get('config')
server_url = module.params.get('server_url')
username = module.params.get('username')
password = module.params.get('password')
supervisorctl_path = module.params.get('supervisorctl_path')
if supervisorctl_path:
supervisorctl_path = os.path.expanduser(supervisorctl_path)
if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path):
supervisorctl_args = [supervisorctl_path]
else:
module.fail_json(
msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
else:
supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
if config:
supervisorctl_args.extend(['-c', os.path.expanduser(config)])
if server_url:
supervisorctl_args.extend(['-s', server_url])
if username:
supervisorctl_args.extend(['-u', username])
if password:
supervisorctl_args.extend(['-p', password])
def run_supervisorctl(cmd, name=None, **kwargs):
args = list(supervisorctl_args) # copy the master args
args.append(cmd)
if name:
args.append(name)
return module.run_command(args, **kwargs)
def get_matched_processes():
matched = []
rc, out, err = run_supervisorctl('status')
for line in out.splitlines():
# One status line may look like one of these two:
# process not in group:
# echo_date_lonely RUNNING pid 7680, uptime 13:22:18
# process in group:
# echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
fields = [field for field in line.split(' ') if field != '']
process_name = fields[0]
status = fields[1]
if is_group:
# If there is ':', this process must be in a group.
if ':' in process_name:
group = process_name.split(':')[0]
if group != name:
continue
else:
continue
else:
if process_name != name:
continue
matched.append((process_name, status))
return matched
def take_action_on_processes(processes, status_filter, action, expected_result):
to_take_action_on = []
for process_name, status in processes:
if status_filter(status):
to_take_action_on.append(process_name)
if len(to_take_action_on) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
for process_name in to_take_action_on:
rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
if '%s: %s' % (process_name, expected_result) not in out:
module.fail_json(msg=out)
module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
if state == 'restarted':
rc, out, err = run_supervisorctl('update', check_rc=True)
processes = get_matched_processes()
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: True, 'restart', 'started')
processes = get_matched_processes()
if state == 'absent':
if len(processes) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('remove', name)
if '%s: removed process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'present':
if len(processes) > 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('add', name)
if '%s: added process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'started':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
if state == 'stopped':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
moodboom/beets | test/testall.py | 25 | 1497 | #!/usr/bin/env python
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import re
import sys
from test._common import unittest
pkgpath = os.path.dirname(__file__) or '.'
sys.path.append(pkgpath)
os.chdir(pkgpath)
# Make sure we use local version of beetsplug and not system namespaced version
# for tests
try:
del sys.modules["beetsplug"]
except KeyError:
pass
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(pkgpath):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit |
pong3489/TEST_Mission | Lib/site-packages/numpy/oldnumeric/ma.py | 81 | 75739 | """MA: a facility for dealing with missing observations
MA is generally used as a numpy.array look-alike.
by Paul F. Dubois.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
Adapted for numpy_core 2005 by Travis Oliphant and
(mainly) Paul Dubois.
"""
import types, sys
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.core.numeric import newaxis, ndarray, inf
from numpy.core.fromnumeric import amax, amin
from numpy.core.numerictypes import bool_, typecodes
import numpy.core.numeric as numeric
import warnings
if sys.version_info[0] >= 3:
from functools import reduce
# Ufunc domain lookup for __array_wrap__
ufunc_domain = {}
# Ufunc fills lookup for __array__
ufunc_fills = {}
MaskType = bool_
nomask = MaskType(0)
divide_tolerance = 1.e-35
class MAError (Exception):
def __init__ (self, args=None):
"Create an exception"
# The .args attribute must be a tuple.
if not isinstance(args, tuple):
args = (args,)
self.args = args
def __str__(self):
"Calculate the string representation"
return str(self.args[0])
__repr__ = __str__
class _MaskedPrintOption:
"One instance of this class, masked_print_option, is created."
def __init__ (self, display):
"Create the masked print option object."
self.set_display(display)
self._enabled = 1
def display (self):
"Show what prints for masked values."
return self._display
def set_display (self, s):
"set_display(s) sets what prints for masked values."
self._display = s
def enabled (self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, flag=1):
"Set the enabling flag to flag."
self._enabled = flag
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
# Use single element arrays or scalars.
default_real_fill_value = 1.e20
default_complex_fill_value = 1.e20 + 0.0j
default_character_fill_value = '-'
default_integer_fill_value = 999999
default_object_fill_value = '?'
def default_fill_value (obj):
"Function to calculate default fill value for an object."
if isinstance(obj, types.FloatType):
return default_real_fill_value
elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
return default_integer_fill_value
elif isinstance(obj, types.StringType):
return default_character_fill_value
elif isinstance(obj, types.ComplexType):
return default_complex_fill_value
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
if x in typecodes['Float']:
return default_real_fill_value
if x in typecodes['Integer']:
return default_integer_fill_value
if x in typecodes['Complex']:
return default_complex_fill_value
if x in typecodes['Character']:
return default_character_fill_value
if x in typecodes['UnsignedInteger']:
return umath.absolute(default_integer_fill_value)
return default_object_fill_value
else:
return default_object_fill_value
def minimum_fill_value (obj):
"Function to calculate default fill value suitable for taking minima."
if isinstance(obj, types.FloatType):
return numeric.inf
elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
return sys.maxint
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
if x in typecodes['Float']:
return numeric.inf
if x in typecodes['Integer']:
return sys.maxint
if x in typecodes['UnsignedInteger']:
return sys.maxint
else:
raise TypeError, 'Unsuitable type for calculating minimum.'
def maximum_fill_value (obj):
"Function to calculate default fill value suitable for taking maxima."
if isinstance(obj, types.FloatType):
return -inf
elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
return -sys.maxint
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
if x in typecodes['Float']:
return -inf
if x in typecodes['Integer']:
return -sys.maxint
if x in typecodes['UnsignedInteger']:
return 0
else:
raise TypeError, 'Unsuitable type for calculating maximum.'
def set_fill_value (a, fill_value):
"Set fill value of a if it is a masked array."
if isMaskedArray(a):
a.set_fill_value (fill_value)
def getmask (a):
"""Mask of values in a; could be nomask.
Returns nomask if a is not a masked array.
To get an array for sure use getmaskarray."""
if isinstance(a, MaskedArray):
return a.raw_mask()
else:
return nomask
def getmaskarray (a):
"""Mask of values in a; an array of zeros if mask is nomask
or not a masked array, and is a byte-sized integer.
Do not try to add up entries, for example.
"""
m = getmask(a)
if m is nomask:
return make_mask_none(shape(a))
else:
return m
def is_mask (m):
"""Is m a legal mask? Does not check contents, only type.
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask (m, copy=0, flag=0):
"""make_mask(m, copy=0, flag=0)
return m as a mask, creating a copy if necessary or requested.
Can accept any sequence of integers or nomask. Does not check
that contents must be 0s and 1s.
if flag, return nomask if m contains no true elements.
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
if m.dtype.type is MaskType:
if copy:
result = numeric.array(m, dtype=MaskType, copy=copy)
else:
result = m
else:
result = m.astype(MaskType)
else:
result = filled(m, True).astype(MaskType)
if flag and not fromnumeric.sometrue(fromnumeric.ravel(result)):
return nomask
else:
return result
def make_mask_none (s):
"Return a mask of all zeros of shape s."
result = numeric.zeros(s, dtype=MaskType)
result.shape = s
return result
def mask_or (m1, m2):
"""Logical or of the mask candidates m1 and m2, treating nomask as false.
Result may equal m1 or m2 if the other is nomask.
"""
if m1 is nomask: return make_mask(m2)
if m2 is nomask: return make_mask(m1)
if m1 is m2 and is_mask(m1): return m1
return make_mask(umath.logical_or(m1, m2))
def filled (a, value = None):
"""a as a contiguous numeric array with any masked areas replaced by value
if value is None or the special element "masked", get_fill_value(a)
is used instead.
If a is already a contiguous numeric array, a itself is returned.
filled(a) can be used to be sure that the result is numeric when
passing an object a to other software ignorant of MA, in particular to
numeric itself.
"""
if isinstance(a, MaskedArray):
return a.filled(value)
elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, types.DictType):
return numeric.array(a, 'O')
else:
return numeric.array(a)
def get_fill_value (a):
"""
The fill value of a, if it has one; otherwise, the default fill value
for that type.
"""
if isMaskedArray(a):
result = a.fill_value()
else:
result = default_fill_value(a)
return result
def common_fill_value (a, b):
"The common fill_value of a and b, if there is one, or None"
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2: return t1
return None
# Domain functions return 1 where the argument(s) are not in the domain.
class domain_check_interval:
"domain_check_interval(a,b)(x) = true where x < a or y > b"
def __init__(self, y1, y2):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
self.y1 = y1
self.y2 = y2
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.y2),
umath.less(x, self.y1)
)
class domain_tan:
"domain_tan(eps) = true where abs(cos(x)) < eps)"
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Execute the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class domain_greater:
"domain_greater(v)(x) = true where x <= v"
def __init__(self, critical_value):
"domain_greater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Execute the call behavior."
return umath.less_equal (x, self.critical_value)
class domain_greater_equal:
"domain_greater_equal(v)(x) = true where x < v"
def __init__(self, critical_value):
"domain_greater_equal(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Execute the call behavior."
return umath.less (x, self.critical_value)
class masked_unary_operation:
def __init__ (self, aufunc, fill=0, domain=None):
""" masked_unary_operation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = aufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(aufunc, "__doc__", str(aufunc))
self.__name__ = getattr(aufunc, "__name__", str(aufunc))
ufunc_domain[aufunc] = domain
ufunc_fills[aufunc] = fill,
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
# numeric tries to return scalars rather than arrays when given scalars.
m = getmask(a)
d1 = filled(a, self.fill)
if self.domain is not None:
m = mask_or(m, self.domain(d1))
result = self.f(d1, *args, **kwargs)
return masked_array(result, m)
def __str__ (self):
return "Masked version of " + str(self.f)
class domain_safe_divide:
def __init__ (self, tolerance=divide_tolerance):
self.tolerance = tolerance
def __call__ (self, a, b):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class domained_binary_operation:
"""Binary operations that have a domain, like divide. These are complicated
so they are a separate class. They have no reduce, outer or accumulate.
"""
def __init__ (self, abfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = abfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(abfunc, "__doc__", str(abfunc))
self.__name__ = getattr(abfunc, "__name__", str(abfunc))
ufunc_domain[abfunc] = domain
ufunc_fills[abfunc] = fillx, filly
def __call__(self, a, b):
"Execute the call behavior."
ma = getmask(a)
mb = getmask(b)
d1 = filled(a, self.fillx)
d2 = filled(b, self.filly)
t = self.domain(d1, d2)
if fromnumeric.sometrue(t, None):
d2 = where(t, self.filly, d2)
mb = mask_or(mb, t)
m = mask_or(ma, mb)
result = self.f(d1, d2)
return masked_array(result, m)
def __str__ (self):
return "Masked version of " + str(self.f)
class masked_binary_operation:
def __init__ (self, abfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = abfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(abfunc, "__doc__", str(abfunc))
ufunc_domain[abfunc] = None
ufunc_fills[abfunc] = fillx, filly
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
m = mask_or(getmask(a), getmask(b))
d1 = filled(a, self.fillx)
d2 = filled(b, self.filly)
result = self.f(d1, d2, *args, **kwargs)
if isinstance(result, ndarray) \
and m.ndim != 0 \
and m.shape != result.shape:
m = mask_or(getmaskarray(a), getmaskarray(b))
return masked_array(result, m)
def reduce (self, target, axis=0, dtype=None):
"""Reduce target along the given axis with this function."""
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
t = self.f.reduce(t, axis)
else:
t = masked_array (t, m)
# XXX: "or t.dtype" below is a workaround for what appears
# XXX: to be a bug in reduce.
t = self.f.reduce(filled(t, self.filly), axis,
dtype=dtype or t.dtype)
m = umath.logical_and.reduce(m, axis)
if isinstance(t, ndarray):
return masked_array(t, m, get_fill_value(target))
elif m:
return masked
else:
return t
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
d = self.f.outer(filled(a, self.fillx), filled(b, self.filly))
return masked_array(d, m)
def accumulate (self, target, axis=0):
"""Accumulate target along axis after filling with y fill value."""
t = filled(target, self.filly)
return masked_array (self.f.accumulate (t, axis))
def __str__ (self):
return "Masked version of " + str(self.f)
sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0))
log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0))
log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0))
exp = masked_unary_operation(umath.exp)
conjugate = masked_unary_operation(umath.conjugate)
sin = masked_unary_operation(umath.sin)
cos = masked_unary_operation(umath.cos)
tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35))
arcsin = masked_unary_operation(umath.arcsin, 0.0, domain_check_interval(-1.0, 1.0))
arccos = masked_unary_operation(umath.arccos, 0.0, domain_check_interval(-1.0, 1.0))
arctan = masked_unary_operation(umath.arctan)
# Missing from numeric
arcsinh = masked_unary_operation(umath.arcsinh)
arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0))
arctanh = masked_unary_operation(umath.arctanh, 0.0, domain_check_interval(-1.0+1e-15, 1.0-1e-15))
sinh = masked_unary_operation(umath.sinh)
cosh = masked_unary_operation(umath.cosh)
tanh = masked_unary_operation(umath.tanh)
absolute = masked_unary_operation(umath.absolute)
fabs = masked_unary_operation(umath.fabs)
negative = masked_unary_operation(umath.negative)
def nonzero(a):
"""returns the indices of the elements of a which are not zero
and not masked
"""
return numeric.asarray(filled(a, 0).nonzero())
around = masked_unary_operation(fromnumeric.round_)
floor = masked_unary_operation(umath.floor)
ceil = masked_unary_operation(umath.ceil)
logical_not = masked_unary_operation(umath.logical_not)
add = masked_binary_operation(umath.add)
subtract = masked_binary_operation(umath.subtract)
subtract.reduce = None
multiply = masked_binary_operation(umath.multiply, 1, 1)
divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1)
true_divide = domained_binary_operation(umath.true_divide, domain_safe_divide(), 0, 1)
floor_divide = domained_binary_operation(umath.floor_divide, domain_safe_divide(), 0, 1)
remainder = domained_binary_operation(umath.remainder, domain_safe_divide(), 0, 1)
fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1)
hypot = masked_binary_operation(umath.hypot)
arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0)
arctan2.reduce = None
equal = masked_binary_operation(umath.equal)
equal.reduce = None
not_equal = masked_binary_operation(umath.not_equal)
not_equal.reduce = None
less_equal = masked_binary_operation(umath.less_equal)
less_equal.reduce = None
greater_equal = masked_binary_operation(umath.greater_equal)
greater_equal.reduce = None
less = masked_binary_operation(umath.less)
less.reduce = None
greater = masked_binary_operation(umath.greater)
greater.reduce = None
logical_and = masked_binary_operation(umath.logical_and)
alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce
logical_or = masked_binary_operation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = masked_binary_operation(umath.logical_xor)
bitwise_and = masked_binary_operation(umath.bitwise_and)
bitwise_or = masked_binary_operation(umath.bitwise_or)
bitwise_xor = masked_binary_operation(umath.bitwise_xor)
def rank (object):
return fromnumeric.rank(filled(object))
def shape (object):
return fromnumeric.shape(filled(object))
def size (object, axis=None):
return fromnumeric.size(filled(object), axis)
class MaskedArray (object):
"""Arrays with possibly masked values.
Masked values of 1 exclude the corresponding element from
any computation.
Construction:
x = array(data, dtype=None, copy=True, order=False,
mask = nomask, fill_value=None)
If copy=False, every effort is made not to copy the data:
If data is a MaskedArray, and argument mask=nomask,
then the candidate data is data.data and the
mask used is data.mask. If data is a numeric array,
it is used as the candidate raw data.
If dtype is not None and
is != data.dtype.char then a data copy is required.
Otherwise, the candidate is used.
If a data copy is required, raw data stored is the result of:
numeric.array(data, dtype=dtype.char, copy=copy)
If mask is nomask there are no masked values. Otherwise mask must
be convertible to an array of booleans with the same shape as x.
fill_value is used to fill in masked values when necessary,
such as when printing and in method/function filled().
The fill_value is not used for computation within this module.
"""
__array_priority__ = 10.1
def __init__(self, data, dtype=None, copy=True, order=False,
mask=nomask, fill_value=None):
"""array(data, dtype=None, copy=True, order=False, mask=nomask, fill_value=None)
If data already a numeric array, its dtype becomes the default value of dtype.
"""
if dtype is None:
tc = None
else:
tc = numeric.dtype(dtype)
need_data_copied = copy
if isinstance(data, MaskedArray):
c = data.data
if tc is None:
tc = c.dtype
elif tc != c.dtype:
need_data_copied = True
if mask is nomask:
mask = data.mask
elif mask is not nomask: #attempting to change the mask
need_data_copied = True
elif isinstance(data, ndarray):
c = data
if tc is None:
tc = c.dtype
elif tc != c.dtype:
need_data_copied = True
else:
need_data_copied = False #because I'll do it now
c = numeric.array(data, dtype=tc, copy=True, order=order)
tc = c.dtype
if need_data_copied:
if tc == c.dtype:
self._data = numeric.array(c, dtype=tc, copy=True, order=order)
else:
self._data = c.astype(tc)
else:
self._data = c
if mask is nomask:
self._mask = nomask
self._shared_mask = 0
else:
self._mask = make_mask (mask)
if self._mask is nomask:
self._shared_mask = 0
else:
self._shared_mask = (self._mask is mask)
nm = size(self._mask)
nd = size(self._data)
if nm != nd:
if nm == 1:
self._mask = fromnumeric.resize(self._mask, self._data.shape)
self._shared_mask = 0
elif nd == 1:
self._data = fromnumeric.resize(self._data, self._mask.shape)
self._data.shape = self._mask.shape
else:
raise MAError, "Mask and data not compatible."
elif nm == 1 and shape(self._mask) != shape(self._data):
self.unshare_mask()
self._mask.shape = self._data.shape
self.set_fill_value(fill_value)
def __array__ (self, t=None, context=None):
"Special hook for numeric. Converts to numeric if possible."
if self._mask is not nomask:
if fromnumeric.ravel(self._mask).any():
if context is None:
warnings.warn("Cannot automatically convert masked array to "\
"numeric because data\n is masked in one or "\
"more locations.");
return self._data
#raise MAError, \
# """Cannot automatically convert masked array to numeric because data
# is masked in one or more locations.
# """
else:
func, args, i = context
fills = ufunc_fills.get(func)
if fills is None:
raise MAError, "%s not known to ma" % func
return self.filled(fills[i])
else: # Mask is all false
# Optimize to avoid future invocations of this section.
self._mask = nomask
self._shared_mask = 0
if t:
return self._data.astype(t)
else:
return self._data
def __array_wrap__ (self, array, context=None):
"""Special hook for ufuncs.
Wraps the numpy array and sets the mask according to
context.
"""
if context is None:
return MaskedArray(array, copy=False, mask=nomask)
func, args = context[:2]
domain = ufunc_domain[func]
m = reduce(mask_or, [getmask(a) for a in args])
if domain is not None:
m = mask_or(m, domain(*[getattr(a, '_data', a)
for a in args]))
if m is not nomask:
try:
shape = array.shape
except AttributeError:
pass
else:
if m.shape != shape:
m = reduce(mask_or, [getmaskarray(a) for a in args])
return MaskedArray(array, copy=False, mask=m)
def _get_shape(self):
"Return the current shape."
return self._data.shape
def _set_shape (self, newshape):
"Set the array's shape."
self._data.shape = newshape
if self._mask is not nomask:
self._mask = self._mask.copy()
self._mask.shape = newshape
def _get_flat(self):
"""Calculate the flat value.
"""
if self._mask is nomask:
return masked_array(self._data.ravel(), mask=nomask,
fill_value = self.fill_value())
else:
return masked_array(self._data.ravel(),
mask=self._mask.ravel(),
fill_value = self.fill_value())
def _set_flat (self, value):
"x.flat = value"
y = self.ravel()
y[:] = value
def _get_real(self):
"Get the real part of a complex array."
if self._mask is nomask:
return masked_array(self._data.real, mask=nomask,
fill_value = self.fill_value())
else:
return masked_array(self._data.real, mask=self._mask,
fill_value = self.fill_value())
def _set_real (self, value):
"x.real = value"
y = self.real
y[...] = value
def _get_imaginary(self):
"Get the imaginary part of a complex array."
if self._mask is nomask:
return masked_array(self._data.imag, mask=nomask,
fill_value = self.fill_value())
else:
return masked_array(self._data.imag, mask=self._mask,
fill_value = self.fill_value())
def _set_imaginary (self, value):
"x.imaginary = value"
y = self.imaginary
y[...] = value
def __str__(self):
"""Calculate the str representation, using masked for fill if
it is enabled. Otherwise fill with fill value.
"""
if masked_print_option.enabled():
f = masked_print_option
# XXX: Without the following special case masked
# XXX: would print as "[--]", not "--". Can we avoid
# XXX: checks for masked by choosing a different value
# XXX: for the masked singleton? 2005-01-05 -- sasha
if self is masked:
return str(f)
m = self._mask
if m is not nomask and m.shape == () and m:
return str(f)
# convert to object array to make filled work
self = self.astype(object)
else:
f = self.fill_value()
res = self.filled(f)
return str(res)
def __repr__(self):
"""Calculate the repr representation, using masked for fill if
it is enabled. Otherwise fill with fill value.
"""
with_mask = """\
array(data =
%(data)s,
mask =
%(mask)s,
fill_value=%(fill)s)
"""
with_mask1 = """\
array(data = %(data)s,
mask = %(mask)s,
fill_value=%(fill)s)
"""
without_mask = """array(
%(data)s)"""
without_mask1 = """array(%(data)s)"""
n = len(self.shape)
if self._mask is nomask:
if n <= 1:
return without_mask1 % {'data':str(self.filled())}
return without_mask % {'data':str(self.filled())}
else:
if n <= 1:
return with_mask % {
'data': str(self.filled()),
'mask': str(self._mask),
'fill': str(self.fill_value())
}
return with_mask % {
'data': str(self.filled()),
'mask': str(self._mask),
'fill': str(self.fill_value())
}
without_mask1 = """array(%(data)s)"""
if self._mask is nomask:
return without_mask % {'data':str(self.filled())}
else:
return with_mask % {
'data': str(self.filled()),
'mask': str(self._mask),
'fill': str(self.fill_value())
}
def __float__(self):
"Convert self to float."
self.unmask()
if self._mask is not nomask:
raise MAError, 'Cannot convert masked element to a Python float.'
return float(self.data.item())
def __int__(self):
"Convert self to int."
self.unmask()
if self._mask is not nomask:
raise MAError, 'Cannot convert masked element to a Python int.'
return int(self.data.item())
def __getitem__(self, i):
"Get item described by i. Not a copy as in previous versions."
self.unshare_mask()
m = self._mask
dout = self._data[i]
if m is nomask:
try:
if dout.size == 1:
return dout
else:
return masked_array(dout, fill_value=self._fill_value)
except AttributeError:
return dout
mi = m[i]
if mi.size == 1:
if mi:
return masked
else:
return dout
else:
return masked_array(dout, mi, fill_value=self._fill_value)
# --------
# setitem and setslice notes
# note that if value is masked, it means to mask those locations.
# setting a value changes the mask to match the value in those locations.
def __setitem__(self, index, value):
"Set item described by index. If value is masked, mask those locations."
d = self._data
if self is masked:
raise MAError, 'Cannot alter masked elements.'
if value is masked:
if self._mask is nomask:
self._mask = make_mask_none(d.shape)
self._shared_mask = False
else:
self.unshare_mask()
self._mask[index] = True
return
m = getmask(value)
value = filled(value).astype(d.dtype)
d[index] = value
if m is nomask:
if self._mask is not nomask:
self.unshare_mask()
self._mask[index] = False
else:
if self._mask is nomask:
self._mask = make_mask_none(d.shape)
self._shared_mask = True
else:
self.unshare_mask()
self._mask[index] = m
def __nonzero__(self):
"""returns true if any element is non-zero or masked
"""
# XXX: This changes bool conversion logic from MA.
# XXX: In MA bool(a) == len(a) != 0, but in numpy
# XXX: scalars do not have len
m = self._mask
d = self._data
return bool(m is not nomask and m.any()
or d is not nomask and d.any())
def __len__ (self):
"""Return length of first dimension. This is weird but Python's
slicing behavior depends on it."""
return len(self._data)
def __and__(self, other):
"Return bitwise_and"
return bitwise_and(self, other)
def __or__(self, other):
"Return bitwise_or"
return bitwise_or(self, other)
def __xor__(self, other):
"Return bitwise_xor"
return bitwise_xor(self, other)
__rand__ = __and__
__ror__ = __or__
__rxor__ = __xor__
def __abs__(self):
"Return absolute(self)"
return absolute(self)
def __neg__(self):
"Return negative(self)"
return negative(self)
def __pos__(self):
"Return array(self)"
return array(self)
def __add__(self, other):
"Return add(self, other)"
return add(self, other)
__radd__ = __add__
def __mod__ (self, other):
"Return remainder(self, other)"
return remainder(self, other)
def __rmod__ (self, other):
"Return remainder(other, self)"
return remainder(other, self)
def __lshift__ (self, n):
return left_shift(self, n)
def __rshift__ (self, n):
return right_shift(self, n)
def __sub__(self, other):
"Return subtract(self, other)"
return subtract(self, other)
def __rsub__(self, other):
"Return subtract(other, self)"
return subtract(other, self)
def __mul__(self, other):
"Return multiply(self, other)"
return multiply(self, other)
__rmul__ = __mul__
def __div__(self, other):
"Return divide(self, other)"
return divide(self, other)
def __rdiv__(self, other):
"Return divide(other, self)"
return divide(other, self)
def __truediv__(self, other):
"Return divide(self, other)"
return true_divide(self, other)
def __rtruediv__(self, other):
"Return divide(other, self)"
return true_divide(other, self)
def __floordiv__(self, other):
"Return divide(self, other)"
return floor_divide(self, other)
def __rfloordiv__(self, other):
"Return divide(other, self)"
return floor_divide(other, self)
def __pow__(self, other, third=None):
"Return power(self, other, third)"
return power(self, other, third)
def __sqrt__(self):
"Return sqrt(self)"
return sqrt(self)
def __iadd__(self, other):
"Add other to self in place."
t = self._data.dtype.char
f = filled(other, 0)
t1 = f.dtype.char
if t == t1:
pass
elif t in typecodes['Integer']:
if t1 in typecodes['Integer']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Float']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Complex']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
elif t1 in typecodes['Complex']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
else:
raise TypeError, 'Incorrect type for in-place operation.'
if self._mask is nomask:
self._data += f
m = getmask(other)
self._mask = m
self._shared_mask = m is not nomask
else:
result = add(self, masked_array(f, mask=getmask(other)))
self._data = result.data
self._mask = result.mask
self._shared_mask = 1
return self
def __imul__(self, other):
"Add other to self in place."
t = self._data.dtype.char
f = filled(other, 0)
t1 = f.dtype.char
if t == t1:
pass
elif t in typecodes['Integer']:
if t1 in typecodes['Integer']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Float']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Complex']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
elif t1 in typecodes['Complex']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
else:
raise TypeError, 'Incorrect type for in-place operation.'
if self._mask is nomask:
self._data *= f
m = getmask(other)
self._mask = m
self._shared_mask = m is not nomask
else:
result = multiply(self, masked_array(f, mask=getmask(other)))
self._data = result.data
self._mask = result.mask
self._shared_mask = 1
return self
def __isub__(self, other):
"Subtract other from self in place."
t = self._data.dtype.char
f = filled(other, 0)
t1 = f.dtype.char
if t == t1:
pass
elif t in typecodes['Integer']:
if t1 in typecodes['Integer']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Float']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Complex']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
elif t1 in typecodes['Complex']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
else:
raise TypeError, 'Incorrect type for in-place operation.'
if self._mask is nomask:
self._data -= f
m = getmask(other)
self._mask = m
self._shared_mask = m is not nomask
else:
result = subtract(self, masked_array(f, mask=getmask(other)))
self._data = result.data
self._mask = result.mask
self._shared_mask = 1
return self
def __idiv__(self, other):
"Divide self by other in place."
t = self._data.dtype.char
f = filled(other, 0)
t1 = f.dtype.char
if t == t1:
pass
elif t in typecodes['Integer']:
if t1 in typecodes['Integer']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Float']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
elif t in typecodes['Complex']:
if t1 in typecodes['Integer']:
f = f.astype(t)
elif t1 in typecodes['Float']:
f = f.astype(t)
elif t1 in typecodes['Complex']:
f = f.astype(t)
else:
raise TypeError, 'Incorrect type for in-place operation.'
else:
raise TypeError, 'Incorrect type for in-place operation.'
mo = getmask(other)
result = divide(self, masked_array(f, mask=mo))
self._data = result.data
dm = result.raw_mask()
if dm is not self._mask:
self._mask = dm
self._shared_mask = 1
return self
def __eq__(self, other):
return equal(self,other)
def __ne__(self, other):
return not_equal(self,other)
def __lt__(self, other):
return less(self,other)
def __le__(self, other):
return less_equal(self,other)
def __gt__(self, other):
return greater(self,other)
def __ge__(self, other):
return greater_equal(self,other)
def astype (self, tc):
"return self as array of given type."
d = self._data.astype(tc)
return array(d, mask=self._mask)
def byte_swapped(self):
"""Returns the raw data field, byte_swapped. Included for consistency
with numeric but doesn't make sense in this context.
"""
return self._data.byte_swapped()
def compressed (self):
"A 1-D array of all the non-masked data."
d = fromnumeric.ravel(self._data)
if self._mask is nomask:
return array(d)
else:
m = 1 - fromnumeric.ravel(self._mask)
c = fromnumeric.compress(m, d)
return array(c, copy=0)
def count (self, axis = None):
"Count of the non-masked elements in a, or along a certain axis."
m = self._mask
s = self._data.shape
ls = len(s)
if m is nomask:
if ls == 0:
return 1
if ls == 1:
return s[0]
if axis is None:
return reduce(lambda x, y:x*y, s)
else:
n = s[axis]
t = list(s)
del t[axis]
return ones(t) * n
if axis is None:
w = fromnumeric.ravel(m).astype(int)
n1 = size(w)
if n1 == 1:
n2 = w[0]
else:
n2 = umath.add.reduce(w)
return n1 - n2
else:
n1 = size(m, axis)
n2 = sum(m.astype(int), axis)
return n1 - n2
def dot (self, other):
"s.dot(other) = innerproduct(s, other)"
return innerproduct(self, other)
def fill_value(self):
"Get the current fill value."
return self._fill_value
def filled (self, fill_value=None):
"""A numeric array with masked values filled. If fill_value is None,
use self.fill_value().
If mask is nomask, copy data only if not contiguous.
Result is always a contiguous, numeric array.
# Is contiguous really necessary now?
"""
d = self._data
m = self._mask
if m is nomask:
if d.flags['CONTIGUOUS']:
return d
else:
return d.copy()
else:
if fill_value is None:
value = self._fill_value
else:
value = fill_value
if self is masked:
result = numeric.array(value)
else:
try:
result = numeric.array(d, dtype=d.dtype, copy=1)
result[m] = value
except (TypeError, AttributeError):
#ok, can't put that value in here
value = numeric.array(value, dtype=object)
d = d.astype(object)
result = fromnumeric.choose(m, (d, value))
return result
def ids (self):
"""Return the ids of the data and mask areas"""
return (id(self._data), id(self._mask))
def iscontiguous (self):
"Is the data contiguous?"
return self._data.flags['CONTIGUOUS']
def itemsize(self):
"Item size of each data item."
return self._data.itemsize
def outer(self, other):
"s.outer(other) = outerproduct(s, other)"
return outerproduct(self, other)
def put (self, values):
"""Set the non-masked entries of self to filled(values).
No change to mask
"""
iota = numeric.arange(self.size)
d = self._data
if self._mask is nomask:
ind = iota
else:
ind = fromnumeric.compress(1 - self._mask, iota)
d[ind] = filled(values).astype(d.dtype)
def putmask (self, values):
"""Set the masked entries of self to filled(values).
Mask changed to nomask.
"""
d = self._data
if self._mask is not nomask:
d[self._mask] = filled(values).astype(d.dtype)
self._shared_mask = 0
self._mask = nomask
def ravel (self):
"""Return a 1-D view of self."""
if self._mask is nomask:
return masked_array(self._data.ravel())
else:
return masked_array(self._data.ravel(), self._mask.ravel())
def raw_data (self):
""" Obsolete; use data property instead.
The raw data; portions may be meaningless.
May be noncontiguous. Expert use only."""
return self._data
data = property(fget=raw_data,
doc="The data, but values at masked locations are meaningless.")
def raw_mask (self):
""" Obsolete; use mask property instead.
May be noncontiguous. Expert use only.
"""
return self._mask
mask = property(fget=raw_mask,
doc="The mask, may be nomask. Values where mask true are meaningless.")
def reshape (self, *s):
"""This array reshaped to shape s"""
d = self._data.reshape(*s)
if self._mask is nomask:
return masked_array(d)
else:
m = self._mask.reshape(*s)
return masked_array(d, m)
def set_fill_value (self, v=None):
"Set the fill value to v. Omit v to restore default."
if v is None:
v = default_fill_value (self.raw_data())
self._fill_value = v
def _get_ndim(self):
return self._data.ndim
ndim = property(_get_ndim, doc=numeric.ndarray.ndim.__doc__)
def _get_size (self):
return self._data.size
size = property(fget=_get_size, doc="Number of elements in the array.")
## CHECK THIS: signature of numeric.array.size?
def _get_dtype(self):
return self._data.dtype
dtype = property(fget=_get_dtype, doc="type of the array elements.")
def item(self, *args):
"Return Python scalar if possible"
if self._mask is not nomask:
m = self._mask.item(*args)
try:
if m[0]:
return masked
except IndexError:
return masked
return self._data.item(*args)
def itemset(self, *args):
"Set Python scalar into array"
item = args[-1]
args = args[:-1]
self[args] = item
def tolist(self, fill_value=None):
"Convert to list"
return self.filled(fill_value).tolist()
def tostring(self, fill_value=None):
"Convert to string"
return self.filled(fill_value).tostring()
def unmask (self):
"Replace the mask by nomask if possible."
if self._mask is nomask: return
m = make_mask(self._mask, flag=1)
if m is nomask:
self._mask = nomask
self._shared_mask = 0
def unshare_mask (self):
"If currently sharing mask, make a copy."
if self._shared_mask:
self._mask = make_mask (self._mask, copy=1, flag=0)
self._shared_mask = 0
def _get_ctypes(self):
return self._data.ctypes
def _get_T(self):
if (self.ndim < 2):
return self
return self.transpose()
shape = property(_get_shape, _set_shape,
doc = 'tuple giving the shape of the array')
flat = property(_get_flat, _set_flat,
doc = 'Access array in flat form.')
real = property(_get_real, _set_real,
doc = 'Access the real part of the array')
imaginary = property(_get_imaginary, _set_imaginary,
doc = 'Access the imaginary part of the array')
imag = imaginary
ctypes = property(_get_ctypes, None, doc="ctypes")
T = property(_get_T, None, doc="get transpose")
#end class MaskedArray
array = MaskedArray
def isMaskedArray (x):
"Is x a masked array, that is, an instance of MaskedArray?"
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
def allclose (a, b, fill_value=1, rtol=1.e-5, atol=1.e-8):
""" Returns true if all components of a and b are equal
subject to given tolerances.
If fill_value is 1, masked values considered equal.
If fill_value is 0, masked values considered unequal.
The relative error rtol should be positive and << 1.0
The absolute error atol comes into play for those elements
of b that are very small or zero; it says how small a must be also.
"""
m = mask_or(getmask(a), getmask(b))
d1 = filled(a)
d2 = filled(b)
x = filled(array(d1, copy=0, mask=m), fill_value).astype(float)
y = filled(array(d2, copy=0, mask=m), 1).astype(float)
d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y))
return fromnumeric.alltrue(fromnumeric.ravel(d))
def allequal (a, b, fill_value=1):
"""
True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = filled(a)
y = filled(b)
d = umath.equal(x, y)
return fromnumeric.alltrue(fromnumeric.ravel(d))
elif fill_value:
x = filled(a)
y = filled(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=0)
return fromnumeric.alltrue(fromnumeric.ravel(filled(dm, 1)))
else:
return 0
def masked_values (data, value, rtol=1.e-5, atol=1.e-8, copy=1):
"""
masked_values(data, value, rtol=1.e-5, atol=1.e-8)
Create a masked array; mask is nomask if possible.
If copy==0, and otherwise possible, result
may share data values with original array.
Let d = filled(data, value). Returns d
masked where abs(data-value)<= atol + rtol * abs(value)
if d is of a floating point type. Otherwise returns
masked_object(d, value, copy)
"""
abs = umath.absolute
d = filled(data, value)
if issubclass(d.dtype.type, numeric.floating):
m = umath.less_equal(abs(d-value), atol+rtol*abs(value))
m = make_mask(m, flag=1)
return array(d, mask = m, copy=copy,
fill_value=value)
else:
return masked_object(d, value, copy=copy)
def masked_object (data, value, copy=1):
"Create array masked where exactly data equal to value"
d = filled(data, value)
dm = make_mask(umath.equal(d, value), flag=1)
return array(d, mask=dm, copy=copy, fill_value=value)
def arange(start, stop=None, step=1, dtype=None):
"""Just like range() except it returns a array whose type can be specified
by the keyword argument dtype.
"""
return array(numeric.arange(start, stop, step, dtype))
arrayrange = arange
def fromstring (s, t):
"Construct a masked array from a string. Result will have no mask."
return masked_array(numeric.fromstring(s, t))
def left_shift (a, n):
"Left shift n bits"
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, m)
def right_shift (a, n):
"Right shift n bits"
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, m)
def resize (a, new_shape):
"""resize(a, new_shape) returns a new array with the specified shape.
The original array's total size can be any size."""
m = getmask(a)
if m is not nomask:
m = fromnumeric.resize(m, new_shape)
result = array(fromnumeric.resize(filled(a), new_shape), mask=m)
result.set_fill_value(get_fill_value(a))
return result
def new_repeat(a, repeats, axis=None):
"""repeat elements of a repeats times along axis
repeats is a sequence of length a.shape[axis]
telling how many times to repeat each element.
"""
af = filled(a)
if isinstance(repeats, types.IntType):
if axis is None:
num = af.size
else:
num = af.shape[axis]
repeats = tuple([repeats]*num)
m = getmask(a)
if m is not nomask:
m = fromnumeric.repeat(m, repeats, axis)
d = fromnumeric.repeat(af, repeats, axis)
result = masked_array(d, m)
result.set_fill_value(get_fill_value(a))
return result
def identity(n):
"""identity(n) returns the identity matrix of shape n x n.
"""
return array(numeric.identity(n))
def indices (dimensions, dtype=None):
"""indices(dimensions,dtype=None) returns an array representing a grid
of indices with row-only, and column-only variation.
"""
return array(numeric.indices(dimensions, dtype))
def zeros (shape, dtype=float):
"""zeros(n, dtype=float) =
an array of all zeros of the given length or shape."""
return array(numeric.zeros(shape, dtype))
def ones (shape, dtype=float):
"""ones(n, dtype=float) =
an array of all ones of the given length or shape."""
return array(numeric.ones(shape, dtype))
def count (a, axis = None):
"Count of the non-masked elements in a, or along a certain axis."
a = masked_array(a)
return a.count(axis)
def power (a, b, third=None):
"a**b"
if third is not None:
raise MAError, "3-argument power not supported."
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
fa = filled(a, 1)
fb = filled(b, 1)
if fb.dtype.char in typecodes["Integer"]:
return masked_array(umath.power(fa, fb), m)
md = make_mask(umath.less(fa, 0), flag=1)
m = mask_or(m, md)
if m is nomask:
return masked_array(umath.power(fa, fb))
else:
fa = numeric.where(m, 1, fa)
return masked_array(umath.power(fa, fb), m)
def masked_array (a, mask=nomask, fill_value=None):
"""masked_array(a, mask=nomask) =
array(a, mask=mask, copy=0, fill_value=fill_value)
"""
return array(a, mask=mask, copy=0, fill_value=fill_value)
def sum (target, axis=None, dtype=None):
if axis is None:
target = ravel(target)
axis = 0
return add.reduce(target, axis, dtype)
def product (target, axis=None, dtype=None):
if axis is None:
target = ravel(target)
axis = 0
return multiply.reduce(target, axis, dtype)
def new_average (a, axis=None, weights=None, returned = 0):
"""average(a, axis=None, weights=None)
Computes average along indicated axis.
If axis is None, average over the entire array
Inputs can be integer or floating types; result is of type float.
If weights are given, result is sum(a*weights,axis=0)/(sum(weights,axis=0)*1.0)
weights must have a's shape or be the 1-d with length the size
of a in the given axis.
If returned, return a tuple: the result and the sum of the weights
or count of values. Results will have the same shape.
masked values in the weights will be set to 0.0
"""
a = masked_array(a)
mask = a.mask
ash = a.shape
if ash == ():
ash = (1,)
if axis is None:
if mask is nomask:
if weights is None:
n = add.reduce(a.raw_data().ravel())
d = reduce(lambda x, y: x * y, ash, 1.0)
else:
w = filled(weights, 0.0).ravel()
n = umath.add.reduce(a.raw_data().ravel() * w)
d = umath.add.reduce(w)
del w
else:
if weights is None:
n = add.reduce(a.ravel())
w = fromnumeric.choose(mask, (1.0, 0.0)).ravel()
d = umath.add.reduce(w)
del w
else:
w = array(filled(weights, 0.0), float, mask=mask).ravel()
n = add.reduce(a.ravel() * w)
d = add.reduce(w)
del w
else:
if mask is nomask:
if weights is None:
d = ash[axis] * 1.0
n = umath.add.reduce(a.raw_data(), axis)
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = numeric.array(w, float, copy=0)
n = add.reduce(a*w, axis)
d = add.reduce(w, axis)
del w
elif wsh == (ash[axis],):
r = [newaxis]*len(ash)
r[axis] = slice(None, None, 1)
w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)")
n = add.reduce(a*w, axis)
d = add.reduce(w, axis)
del w, r
else:
raise ValueError, 'average: weights wrong shape.'
else:
if weights is None:
n = add.reduce(a, axis)
w = numeric.choose(mask, (1.0, 0.0))
d = umath.add.reduce(w, axis)
del w
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = array(w, float, mask=mask, copy=0)
n = add.reduce(a*w, axis)
d = add.reduce(w, axis)
elif wsh == (ash[axis],):
r = [newaxis]*len(ash)
r[axis] = slice(None, None, 1)
w = eval ("w["+ repr(tuple(r)) + "] * masked_array(ones(ash, float), mask)")
n = add.reduce(a*w, axis)
d = add.reduce(w, axis)
else:
raise ValueError, 'average: weights wrong shape.'
del w
#print n, d, repr(mask), repr(weights)
if n is masked or d is masked: return masked
result = divide (n, d)
del n
if isinstance(result, MaskedArray):
result.unmask()
if returned:
if not isinstance(d, MaskedArray):
d = masked_array(d)
if not d.shape == result.shape:
d = ones(result.shape, float) * d
d.unmask()
if returned:
return result, d
else:
return result
def where (condition, x, y):
"""where(condition, x, y) is x where condition is nonzero, y otherwise.
condition must be convertible to an integer array.
Answer is always the shape of condition.
The type depends on x and y. It is integer if both x and y are
the value masked.
"""
fc = filled(not_equal(condition, 0), 0)
xv = filled(x)
xm = getmask(x)
yv = filled(y)
ym = getmask(y)
d = numeric.choose(fc, (yv, xv))
md = numeric.choose(fc, (ym, xm))
m = getmask(condition)
m = make_mask(mask_or(m, md), copy=0, flag=1)
return masked_array(d, m)
def choose (indices, t, out=None, mode='raise'):
"Returns array shaped like indices with elements chosen from t"
def fmask (x):
if x is masked: return 1
return filled(x)
def nmask (x):
if x is masked: return 1
m = getmask(x)
if m is nomask: return 0
return m
c = filled(indices, 0)
masks = [nmask(x) for x in t]
a = [fmask(x) for x in t]
d = numeric.choose(c, a)
m = numeric.choose(c, masks)
m = make_mask(mask_or(m, getmask(indices)), copy=0, flag=1)
return masked_array(d, m)
def masked_where(condition, x, copy=1):
"""Return x as an array masked where condition is true.
Also masked where x or condition masked.
"""
cm = filled(condition,1)
m = mask_or(getmask(x), cm)
return array(filled(x), copy=copy, mask=m)
def masked_greater(x, value, copy=1):
"masked_greater(x, value) = x masked where x > value"
return masked_where(greater(x, value), x, copy)
def masked_greater_equal(x, value, copy=1):
"masked_greater_equal(x, value) = x masked where x >= value"
return masked_where(greater_equal(x, value), x, copy)
def masked_less(x, value, copy=1):
"masked_less(x, value) = x masked where x < value"
return masked_where(less(x, value), x, copy)
def masked_less_equal(x, value, copy=1):
"masked_less_equal(x, value) = x masked where x <= value"
return masked_where(less_equal(x, value), x, copy)
def masked_not_equal(x, value, copy=1):
"masked_not_equal(x, value) = x masked where x != value"
d = filled(x, 0)
c = umath.not_equal(d, value)
m = mask_or(c, getmask(x))
return array(d, mask=m, copy=copy)
def masked_equal(x, value, copy=1):
"""masked_equal(x, value) = x masked where x == value
For floating point consider masked_values(x, value) instead.
"""
d = filled(x, 0)
c = umath.equal(d, value)
m = mask_or(c, getmask(x))
return array(d, mask=m, copy=copy)
def masked_inside(x, v1, v2, copy=1):
"""x with mask of all values of x that are inside [v1,v2]
v1 and v2 can be given in either order.
"""
if v2 < v1:
t = v2
v2 = v1
v1 = t
d = filled(x, 0)
c = umath.logical_and(umath.less_equal(d, v2), umath.greater_equal(d, v1))
m = mask_or(c, getmask(x))
return array(d, mask = m, copy=copy)
def masked_outside(x, v1, v2, copy=1):
"""x with mask of all values of x that are outside [v1,v2]
v1 and v2 can be given in either order.
"""
if v2 < v1:
t = v2
v2 = v1
v1 = t
d = filled(x, 0)
c = umath.logical_or(umath.less(d, v1), umath.greater(d, v2))
m = mask_or(c, getmask(x))
return array(d, mask = m, copy=copy)
def reshape (a, *newshape):
"Copy of a with a new shape."
m = getmask(a)
d = filled(a).reshape(*newshape)
if m is nomask:
return masked_array(d)
else:
return masked_array(d, mask=numeric.reshape(m, *newshape))
def ravel (a):
"a as one-dimensional, may share data and mask"
m = getmask(a)
d = fromnumeric.ravel(filled(a))
if m is nomask:
return masked_array(d)
else:
return masked_array(d, mask=numeric.ravel(m))
def concatenate (arrays, axis=0):
"Concatenate the arrays along the given axis"
d = []
for x in arrays:
d.append(filled(x))
d = numeric.concatenate(d, axis)
for x in arrays:
if getmask(x) is not nomask: break
else:
return masked_array(d)
dm = []
for x in arrays:
dm.append(getmaskarray(x))
dm = numeric.concatenate(dm, axis)
return masked_array(d, mask=dm)
def swapaxes (a, axis1, axis2):
m = getmask(a)
d = masked_array(a).data
if m is nomask:
return masked_array(data=numeric.swapaxes(d, axis1, axis2))
else:
return masked_array(data=numeric.swapaxes(d, axis1, axis2),
mask=numeric.swapaxes(m, axis1, axis2),)
def new_take (a, indices, axis=None, out=None, mode='raise'):
"returns selection of items from a."
m = getmask(a)
# d = masked_array(a).raw_data()
d = masked_array(a).data
if m is nomask:
return masked_array(numeric.take(d, indices, axis))
else:
return masked_array(numeric.take(d, indices, axis),
mask = numeric.take(m, indices, axis))
def transpose(a, axes=None):
"reorder dimensions per tuple axes"
m = getmask(a)
d = filled(a)
if m is nomask:
return masked_array(numeric.transpose(d, axes))
else:
return masked_array(numeric.transpose(d, axes),
mask = numeric.transpose(m, axes))
def put(a, indices, values, mode='raise'):
"""sets storage-indexed locations to corresponding values.
Values and indices are filled if necessary.
"""
d = a.raw_data()
ind = filled(indices)
v = filled(values)
numeric.put (d, ind, v)
m = getmask(a)
if m is not nomask:
a.unshare_mask()
numeric.put(a.raw_mask(), ind, 0)
def putmask(a, mask, values):
"putmask(a, mask, values) sets a where mask is true."
if mask is nomask:
return
numeric.putmask(a.raw_data(), mask, values)
m = getmask(a)
if m is nomask: return
a.unshare_mask()
numeric.putmask(a.raw_mask(), mask, 0)
def inner(a, b):
"""inner(a,b) returns the dot product of two arrays, which has
shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the
product of the elements from the last dimensions of a and b.
Masked elements are replace by zeros.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0: fa.shape = (1,)
if len(fb.shape) == 0: fb.shape = (1,)
return masked_array(numeric.inner(fa, fb))
innerproduct = inner
def outer(a, b):
"""outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))"""
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = numeric.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0)
return masked_array(d, m)
outerproduct = outer
def dot(a, b):
"""dot(a,b) returns matrix-multiplication between a and b. The product-sum
is over the last dimension of a and the second-to-last dimension of b.
Masked values are replaced by zeros. See also innerproduct.
"""
return innerproduct(filled(a, 0), numeric.swapaxes(filled(b, 0), -1, -2))
def compress(condition, x, dimension=-1, out=None):
"""Select those parts of x for which condition is true.
Masked values in condition are considered false.
"""
c = filled(condition, 0)
m = getmask(x)
if m is not nomask:
m = numeric.compress(c, m, dimension)
d = numeric.compress(c, filled(x), dimension)
return masked_array(d, m)
class _minimum_operation:
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case returns the scalar minimum.
"""
pass
def __call__ (self, a, b=None):
"Execute the call behavior."
if b is None:
m = getmask(a)
if m is nomask:
d = amin(filled(a).ravel())
return d
ac = a.compressed()
if len(ac) == 0:
return masked
else:
return amin(ac.raw_data())
else:
return where(less(a, b), a, b)
def reduce (self, target, axis=0):
"""Reduce target along the given axis."""
m = getmask(target)
if m is nomask:
t = filled(target)
return masked_array (umath.minimum.reduce (t, axis))
else:
t = umath.minimum.reduce(filled(target, minimum_fill_value(target)), axis)
m = umath.logical_and.reduce(m, axis)
return masked_array(t, m, get_fill_value(target))
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
d = umath.minimum.outer(filled(a), filled(b))
return masked_array(d, m)
minimum = _minimum_operation ()
class _maximum_operation:
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
pass
def __call__ (self, a, b=None):
"Execute the call behavior."
if b is None:
m = getmask(a)
if m is nomask:
d = amax(filled(a).ravel())
return d
ac = a.compressed()
if len(ac) == 0:
return masked
else:
return amax(ac.raw_data())
else:
return where(greater(a, b), a, b)
def reduce (self, target, axis=0):
"""Reduce target along the given axis."""
m = getmask(target)
if m is nomask:
t = filled(target)
return masked_array (umath.maximum.reduce (t, axis))
else:
t = umath.maximum.reduce(filled(target, maximum_fill_value(target)), axis)
m = umath.logical_and.reduce(m, axis)
return masked_array(t, m, get_fill_value(target))
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
d = umath.maximum.outer(filled(a), filled(b))
return masked_array(d, m)
maximum = _maximum_operation ()
def sort (x, axis = -1, fill_value=None):
"""If x does not have a mask, return a masked array formed from the
result of numeric.sort(x, axis).
Otherwise, fill x with fill_value. Sort it.
Set a mask where the result is equal to fill_value.
Note that this may have unintended consequences if the data contains the
fill value at a non-masked site.
If fill_value is not given the default fill value for x's type will be
used.
"""
if fill_value is None:
fill_value = default_fill_value (x)
d = filled(x, fill_value)
s = fromnumeric.sort(d, axis)
if getmask(x) is nomask:
return masked_array(s)
return masked_values(s, fill_value, copy=0)
def diagonal(a, k = 0, axis1=0, axis2=1):
"""diagonal(a,k=0,axis1=0, axis2=1) = the k'th diagonal of a"""
d = fromnumeric.diagonal(filled(a), k, axis1, axis2)
m = getmask(a)
if m is nomask:
return masked_array(d, m)
else:
return masked_array(d, fromnumeric.diagonal(m, k, axis1, axis2))
def trace (a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""trace(a,offset=0, axis1=0, axis2=1) returns the sum along diagonals
(defined by the last two dimenions) of the array.
"""
return diagonal(a, offset, axis1, axis2).sum(dtype=dtype)
def argsort (x, axis = -1, out=None, fill_value=None):
"""Treating masked values as if they have the value fill_value,
return sort indices for sorting along given axis.
if fill_value is None, use get_fill_value(x)
Returns a numpy array.
"""
d = filled(x, fill_value)
return fromnumeric.argsort(d, axis)
def argmin (x, axis = -1, out=None, fill_value=None):
"""Treating masked values as if they have the value fill_value,
return indices for minimum values along given axis.
if fill_value is None, use get_fill_value(x).
Returns a numpy array if x has more than one dimension.
Otherwise, returns a scalar index.
"""
d = filled(x, fill_value)
return fromnumeric.argmin(d, axis)
def argmax (x, axis = -1, out=None, fill_value=None):
"""Treating masked values as if they have the value fill_value,
return sort indices for maximum along given axis.
if fill_value is None, use -get_fill_value(x) if it exists.
Returns a numpy array if x has more than one dimension.
Otherwise, returns a scalar index.
"""
if fill_value is None:
fill_value = default_fill_value (x)
try:
fill_value = - fill_value
except:
pass
d = filled(x, fill_value)
return fromnumeric.argmax(d, axis)
def fromfunction (f, s):
"""apply f to s to create array as in umath."""
return masked_array(numeric.fromfunction(f, s))
def asarray(data, dtype=None):
"""asarray(data, dtype) = array(data, dtype, copy=0)
"""
if isinstance(data, MaskedArray) and \
(dtype is None or dtype == data.dtype):
return data
return array(data, dtype=dtype, copy=0)
# Add methods to support ndarray interface
# XXX: I is better to to change the masked_*_operation adaptors
# XXX: to wrap ndarray methods directly to create ma.array methods.
from types import MethodType
def _m(f):
return MethodType(f, None, array)
def not_implemented(*args, **kwds):
raise NotImplementedError, "not yet implemented for numpy.ma arrays"
array.all = _m(alltrue)
array.any = _m(sometrue)
array.argmax = _m(argmax)
array.argmin = _m(argmin)
array.argsort = _m(argsort)
array.base = property(_m(not_implemented))
array.byteswap = _m(not_implemented)
def _choose(self, *args, **kwds):
return choose(self, args)
array.choose = _m(_choose)
del _choose
def _clip(self,a_min,a_max,out=None):
return MaskedArray(data = self.data.clip(asarray(a_min).data,
asarray(a_max).data),
mask = mask_or(self.mask,
mask_or(getmask(a_min),getmask(a_max))))
array.clip = _m(_clip)
def _compress(self, cond, axis=None, out=None):
return compress(cond, self, axis)
array.compress = _m(_compress)
del _compress
array.conj = array.conjugate = _m(conjugate)
array.copy = _m(not_implemented)
def _cumprod(self, axis=None, dtype=None, out=None):
m = self.mask
if m is not nomask:
m = umath.logical_or.accumulate(self.mask, axis)
return MaskedArray(data = self.filled(1).cumprod(axis, dtype), mask=m)
array.cumprod = _m(_cumprod)
def _cumsum(self, axis=None, dtype=None, out=None):
m = self.mask
if m is not nomask:
m = umath.logical_or.accumulate(self.mask, axis)
return MaskedArray(data=self.filled(0).cumsum(axis, dtype), mask=m)
array.cumsum = _m(_cumsum)
array.diagonal = _m(diagonal)
array.dump = _m(not_implemented)
array.dumps = _m(not_implemented)
array.fill = _m(not_implemented)
array.flags = property(_m(not_implemented))
array.flatten = _m(ravel)
array.getfield = _m(not_implemented)
def _max(a, axis=None, out=None):
if out is not None:
raise TypeError("Output arrays Unsupported for masked arrays")
if axis is None:
return maximum(a)
else:
return maximum.reduce(a, axis)
array.max = _m(_max)
del _max
def _min(a, axis=None, out=None):
if out is not None:
raise TypeError("Output arrays Unsupported for masked arrays")
if axis is None:
return minimum(a)
else:
return minimum.reduce(a, axis)
array.min = _m(_min)
del _min
array.mean = _m(new_average)
array.nbytes = property(_m(not_implemented))
array.newbyteorder = _m(not_implemented)
array.nonzero = _m(nonzero)
array.prod = _m(product)
def _ptp(a,axis=None,out=None):
return a.max(axis,out)-a.min(axis)
array.ptp = _m(_ptp)
array.repeat = _m(new_repeat)
array.resize = _m(resize)
array.searchsorted = _m(not_implemented)
array.setfield = _m(not_implemented)
array.setflags = _m(not_implemented)
array.sort = _m(not_implemented) # NB: ndarray.sort is inplace
def _squeeze(self):
try:
result = MaskedArray(data = self.data.squeeze(),
mask = self.mask.squeeze())
except AttributeError:
result = _wrapit(self, 'squeeze')
return result
array.squeeze = _m(_squeeze)
array.strides = property(_m(not_implemented))
array.sum = _m(sum)
def _swapaxes(self,axis1,axis2):
return MaskedArray(data = self.data.swapaxes(axis1, axis2),
mask = self.mask.swapaxes(axis1, axis2))
array.swapaxes = _m(_swapaxes)
array.take = _m(new_take)
array.tofile = _m(not_implemented)
array.trace = _m(trace)
array.transpose = _m(transpose)
def _var(self,axis=None,dtype=None, out=None):
if axis is None:
return numeric.asarray(self.compressed()).var()
a = self.swapaxes(axis,0)
a = a - a.mean(axis=0)
a *= a
a /= a.count(axis=0)
return a.swapaxes(0,axis).sum(axis)
def _std(self,axis=None, dtype=None, out=None):
return (self.var(axis,dtype))**0.5
array.var = _m(_var)
array.std = _m(_std)
array.view = _m(not_implemented)
array.round = _m(around)
del _m, MethodType, not_implemented
masked = MaskedArray(0, int, mask=1)
def repeat(a, repeats, axis=0):
return new_repeat(a, repeats, axis)
def average(a, axis=0, weights=None, returned=0):
return new_average(a, axis, weights, returned)
def take(a, indices, axis=0):
return new_take(a, indices, axis)
| gpl-3.0 |
almarklein/pywasm | wasmfun/util.py | 2 | 5596 | """
Utils for working with WASM and binary data.
"""
import os
import tempfile
import subprocess
from .components import Module
__all__ = ['inspect_bytes_at', 'hexdump', 'export_wasm_example',
'run_wasm_in_node', 'run_wasm_in_notebook']
def inspect_bytes_at(bb, offset):
""" Inspect bytes at the specified offset.
"""
start = max(0, offset - 16)
end = offset + 16
bytes2show = bb[start:end]
bytes2skip = bb[start:offset]
text_offset = len(repr(bytes2skip))
print(bytes2show)
print('|'.rjust(text_offset))
def hexdump(bb):
""" Do a hexdump of the given bytes.
"""
i = 0
line = 0
while i < len(bb):
ints = [hex(j)[2:].rjust(2, '0') for j in bb[i:i+16]]
print(str(line).rjust(8, '0'), *ints, sep=' ')
i += 16
line += 1
def export_wasm_example(filename, code, wasm):
""" Generate an html file for the given code and wasm module.
"""
if isinstance(wasm, Module):
wasm = wasm.to_bytes()
elif isinstance(wasm, bytes):
if not wasm.startswith(b'\x00asm'):
raise ValueError('export_wasm_example() given bytes do not look like a wasm module.')
else:
raise TypeError('export_wasm_example() expects a wasm module or bytes.')
wasm_text = str(list(wasm)) # [0, 1, 12, ...]
fname = os.path.basename(filename).rsplit('.', 1)[0]
# Read templates
src_filename_js = os.path.join(os.path.dirname(__file__), 'template.js')
src_filename_html = os.path.join(os.path.dirname(__file__), 'template.html')
with open(src_filename_js, 'rb') as f:
js = f.read().decode()
with open(src_filename_html, 'rb') as f:
html = f.read().decode()
# Produce HTML
js = js.replace('WASM_PLACEHOLDER', 'var wasm_data = new Uint8Array(' + wasm_text + ');')
html = html.replace('<title></title>', '<title>%s</title>' % fname)
html = html.replace('CODE_PLACEHOLDER', code)
html = html.replace('JS_PLACEHOLDER', js)
# Export HTML file
with open(filename, 'wb') as f:
f.write(html.encode())
print('Wrote example HTML to', filename)
_nb_output = 0
def run_wasm_in_notebook(wasm):
""" Load a WASM module in the Jupyter notebook.
"""
from IPython.display import display, HTML, Javascript
if isinstance(wasm, Module):
wasm = wasm.to_bytes()
elif isinstance(wasm, bytes):
if not wasm.startswith(b'\x00asm'):
raise ValueError('run_wasm_in_notebook() given bytes do not look like a wasm module.')
else:
raise TypeError('run_wasm_in_notebook() expects a wasm module or bytes.')
wasm_text = str(list(wasm)) # [0, 1, 12, ...]
# Read templates
src_filename_js = os.path.join(os.path.dirname(__file__), 'template.js')
with open(src_filename_js, 'rb') as f:
js = f.read().decode()
# Get id
global _nb_output
_nb_output += 1
id = 'wasm_output_%u' % _nb_output
# Produce JS
js = js.replace('wasm_output', id)
js = js.replace('WASM_PLACEHOLDER', 'var wasm_data = new Uint8Array(' + wasm_text + ');')
js = '(function() {\n%s;\ncompile_my_wasm();\n})();' % js
# Output in current cell
display(HTML("<div style='border: 2px solid blue;' id='%s'></div>" % id))
display(Javascript(js))
def run_wasm_in_node(wasm):
""" Load a WASM module in node.
Just make sure that your module has a main function.
"""
if isinstance(wasm, Module):
wasm = wasm.to_bytes()
elif isinstance(wasm, bytes):
if not wasm.startswith(b'\x00asm'):
raise ValueError('run_wasm_in_node() given bytes do not look like a wasm module.')
else:
raise TypeError('run_wasm_in_node() expects a wasm module or bytes.')
wasm_text = str(list(wasm)) # [0, 1, 12, ...]
# Read templates
src_filename_js = os.path.join(os.path.dirname(__file__), 'template.js')
with open(src_filename_js, 'rb') as f:
js = f.read().decode()
# Produce JS
js = js.replace('WASM_PLACEHOLDER', 'var wasm_data = new Uint8Array(' + wasm_text + ');')
js += '\nprint_ln("Hello from Nodejs!");\ncompile_my_wasm();\n'
# Write temporary file
filename = os.path.join(tempfile.gettempdir(), 'pyscript_%i.js' % os.getpid())
with open(filename, 'wb') as f:
f.write(js.encode())
# Execute JS in nodejs
try:
res = subprocess.check_output([get_node_exe(), '--use_strict', filename])
except Exception as err:
if hasattr(err, 'output'):
err = err.output.decode()
else:
err = str(err)
err = err[:200] + '...' if len(err) > 200 else err
raise Exception(err)
finally:
try:
os.remove(filename)
except Exception:
pass
print(res.decode().rstrip())
NODE_EXE = None
def get_node_exe():
""" Small utility that provides the node exe. The first time this
is called both 'nodejs' and 'node' are tried. To override the
executable path, set the ``FLEXX_NODE_EXE`` environment variable.
"""
# This makes things work on Ubuntu's nodejs as well as other node
# implementations, and allows users to set the node exe if necessary
global NODE_EXE
NODE_EXE = os.getenv('WASMFUN_NODE_EXE') or NODE_EXE
if NODE_EXE is None:
NODE_EXE = 'nodejs'
try:
subprocess.check_output([NODE_EXE, '-v'])
except Exception: # pragma: no cover
NODE_EXE = 'node'
return NODE_EXE
| bsd-2-clause |
mjtamlyn/django | tests/sessions_tests/models.py | 85 | 1160 | """
This custom Session model adds an extra column to store an account ID. In
real-world applications, it gives you the option of querying the database for
all active sessions for a particular account.
"""
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.contrib.sessions.base_session import AbstractBaseSession
from django.db import models
class CustomSession(AbstractBaseSession):
"""
A session model with a column for an account ID.
"""
account_id = models.IntegerField(null=True, db_index=True)
@classmethod
def get_session_store_class(cls):
return SessionStore
class SessionStore(DBStore):
"""
A database session store, that handles updating the account ID column
inside the custom session model.
"""
@classmethod
def get_model_class(cls):
return CustomSession
def create_model_instance(self, data):
obj = super().create_model_instance(data)
try:
account_id = int(data.get('_auth_user_id'))
except (ValueError, TypeError):
account_id = None
obj.account_id = account_id
return obj
| bsd-3-clause |
leighpauls/k2cro4 | third_party/python_26/Lib/ctypes/test/test_as_parameter.py | 66 | 6600 | import unittest
from ctypes import *
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.failUnlessEqual(result, 139)
self.failUnless(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.failUnlessEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.failUnlessEqual(type(result), POINTER(c_int))
self.failUnlessEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.failUnlessEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.failUnlessEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.failUnlessEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.failUnlessEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.failUnless(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.failUnlessEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.failUnlessEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.failUnlessEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.failUnlessEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.failUnlessEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rajashreer7/autotest-client-tests | linux-tools/python_urlgrabber/python_urlgrabber.py | 3 | 1617 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class python_urlgrabber(test.test):
"""
Autotest module for testing basic functionality
of python_urlgrabber
@author Kingsuk Deb, [email protected] ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
cwd = os.getcwd()
os.chdir("%s/python_urlgrabber" %(test_path))
os.system("patch -p0 < ibmbug81490-exclude-test_byterange.diff")
os.system("patch -p0 < ibmbug78855-fix-test_url-fix-pycurl_error.diff")
os.system("patch -p0 < ibmbug78855-print_to_stderr_in_interrupt_callback_tests.diff")
os.chdir(cwd)
ret_val = subprocess.call(test_path + '/python_urlgrabber' + '/python-urlgrabber.sh', shell=True)
if ret_val != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
pranavtendolkr/horizon | openstack_dashboard/dashboards/project/loadbalancers/tables.py | 16 | 13632 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletevip"
policy_rules = (("network", "delete_vip"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete VIP",
u"Delete VIPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of VIP",
u"Scheduled deletion of VIPs",
count
)
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
policy_rules = (("network", "delete_pool"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Pool",
u"Delete Pools",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Pool",
u"Scheduled deletion of Pools",
count
)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Monitor",
u"Delete Monitors",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Monitor",
u"Scheduled deletion of Monitors",
count
)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
policy_rules = (("network", "delete_member"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Member",
u"Delete Members",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Member",
u"Scheduled deletion of Members",
count
)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
def get_vip_link(pool):
if pool.vip_id:
return reverse("horizon:project:loadbalancers:vipdetails",
args=(http.urlquote(pool.vip_id),))
else:
return None
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class UpdatePoolsRow(tables.Row):
ajax = True
def get_data(self, request, pool_id):
pool = api.lbaas.pool_get(request, pool_id)
try:
vip = api.lbaas.vip_get(request, pool.vip_id)
pool.vip_name = vip.name
except Exception:
pool.vip_name = pool.vip_id
try:
subnet = api.neutron.subnet_get(request, pool.subnet_id)
pool.subnet_name = subnet.cidr
except Exception:
pool.subnet_name = pool.subnet_id
return pool
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a Pool",
u"Active")),
("Down", pgettext_lazy("Current status of a Pool",
u"Down")),
("Error", pgettext_lazy("Current status of a Pool",
u"Error")),
("Created", pgettext_lazy("Current status of a Pool",
u"Created")),
("Pending_Create", pgettext_lazy("Current status of a Pool",
u"Pending Create")),
("Pending_Update", pgettext_lazy("Current status of a Pool",
u"Pending Update")),
("Pending_Delete", pgettext_lazy("Current status of a Pool",
u"Pending Delete")),
("Inactive", pgettext_lazy("Current status of a Pool",
u"Inactive")),
)
class PoolsTable(tables.DataTable):
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column('subnet_name', verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
vip_name = tables.Column('vip_name', verbose_name=_("VIP"),
link=get_vip_link)
class Meta(object):
name = "poolstable"
verbose_name = _("Pools")
status_columns = ["status"]
row_class = UpdatePoolsRow
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class UpdateMemberRow(tables.Row):
ajax = True
def get_data(self, request, member_id):
member = api.lbaas.member_get(request, member_id)
try:
pool = api.lbaas.pool_get(request, member.pool_id)
member.pool_name = pool.name
except Exception:
member.pool_name = member.pool_id
return member
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "memberstable"
verbose_name = _("Members")
status_columns = ["status"]
row_class = UpdateMemberRow
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
class Meta(object):
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
| apache-2.0 |
EvenStrangest/tensorflow | tensorflow/examples/skflow/iris_save_restore.py | 9 | 1677 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
# Clean checkpoint folder if exists
try:
shutil.rmtree('/tmp/skflow_examples/iris_custom_model')
except OSError:
pass
# Save model, parameters and learned variables.
classifier.save('/tmp/skflow_examples/iris_custom_model')
classifier = None
## Restore everything
new_classifier = learn.TensorFlowEstimator.restore('/tmp/skflow_examples/iris_custom_model')
score = metrics.accuracy_score(y_test, new_classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
deuxpi/pytrainer | pytrainer/gui/windowextensions.py | 2 | 5311 | # -*- coding: iso-8859-1 -*-
#Copyright (C) Fiz Vazquez [email protected]
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from SimpleGladeApp import SimpleGladeApp
import gtk
import gobject
import os
class WindowExtensions(SimpleGladeApp):
def __init__(self, data_path = None, parent=None):
glade_path="glade/extensions.glade"
root = "extensions"
domain = None
self.parent = parent
SimpleGladeApp.__init__(self, data_path+glade_path, root, domain)
def new(self):
column_names=["id","name"]
self.create_treeview(self.extensionsTree,column_names)
def setList(self, list):
iterOne = False
store = gtk.ListStore(
gobject.TYPE_STRING,
gobject.TYPE_STRING
)
for i in list:
iter = store.append()
if not iterOne:
iterOne = iter
store.set (iter,
0, i[0],
1, i[1]
)
self.extensionsTree.set_model(store)
if iterOne:
self.extensionsTree.get_selection().select_iter(iterOne)
self.on_extensionsTree_clicked(None,None)
def create_treeview(self,treeview,column_names):
i=0
for column_index, column_name in enumerate(column_names):
column = gtk.TreeViewColumn(column_name, gtk.CellRendererText(), text=column_index)
if i==0:
column.set_visible(False)
treeview.append_column(column)
i+=1
def on_extensionsTree_clicked(self,widget,widget2):
selected,iter = self.extensionsTree.get_selection().get_selected()
name,description,status,helpfile,type = self.parent.getExtensionInfo(selected.get_value(iter,0))
self.nameEntry.set_text(name)
self.descriptionEntry.set_text(description)
if status is None or int(status) == 0:
self.statusEntry.set_text(_("Disable"))
else:
self.statusEntry.set_text(_("Enable"))
def on_preferences_clicked(self,widget):
selected,iter = self.extensionsTree.get_selection().get_selected()
name,description,status,helpfile,type = self.parent.getExtensionInfo(selected.get_value(iter,0))
prefs = self.parent.getExtensionConfParams(selected.get_value(iter,0))
self.prefwindow = gtk.Window()
self.prefwindow.set_border_width(20)
self.prefwindow.set_title(_("%s settings" %name))
table = gtk.Table(1,2)
i=0
self.entryList = []
#print prefs
for key in prefs.keys():
#print key, prefs[key]
label = gtk.Label("<b>%s</b>"%key)
label.set_use_markup(True)
if key != "status":
entry = gtk.Entry()
if prefs[key] is None:
entry.set_text("")
else:
entry.set_text(prefs[key])
self.entryList.append(entry)
table.attach(entry,1,2,i,i+1)
else:
combobox = gtk.combo_box_new_text()
combobox.append_text("Disable")
combobox.append_text("Enable")
if prefs[key] is None:
combobox.set_active(0)
else:
combobox.set_active(int(prefs[key]))
table.attach(combobox,1,2,i,i+1)
self.entryList.append(combobox)
table.attach(label,0,1,i,i+1)
i+=1
button = gtk.Button(_("OK"))
button.connect("clicked", self.on_acceptSettings_clicked, None)
table.attach(button,0,2,i,i+1)
self.prefwindow.add(table)
self.prefwindow.show_all()
def on_help_clicked(self,widget):
selected,iter = self.extensionsTree.get_selection().get_selected()
name,description,status,helpfile,type = self.parent.getExtensionInfo(selected.get_value(iter,0))
file = os.open(helpfile,0)
text = os.read(file,2000)
os.close(file)
helpwindow = gtk.Window()
button = gtk.Button(_("OK"))
button.connect("clicked", self.on_accepthelp_clicked, helpwindow)
vbox = gtk.VBox()
buffer = gtk.TextBuffer()
buffer.set_text(text)
textview = gtk.TextView()
textview.set_buffer(buffer)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.add(textview)
vbox.pack_start(scrolledwindow, True)
vbox.pack_start(button, False)
helpwindow.add(vbox)
helpwindow.resize(550,300)
helpwindow.show_all()
def on_accepthelp_clicked(self,widget,window):
window.hide()
window = None
def on_acceptSettings_clicked(self, widget, widget2):
selected,iter = self.extensionsTree.get_selection().get_selected()
prefs = self.parent.getExtensionConfParams(selected.get_value(iter,0))
savedOptions = []
i = 0
for key in prefs.keys():
try:
savedOptions.append((key,self.entryList[i].get_text()))
except:
combobox = self.entryList[i]
index = combobox.get_active()
savedOptions.append((key,"%s" %index))
i+=1
self.prefwindow.hide()
self.prefwindow = None
self.parent.setExtensionConfParams(selected.get_value(iter,0),savedOptions)
self.on_extensionsTree_clicked(None,None)
def on_accept_clicked(self,widget):
self.extensions.hide()
self.extensions = None
self.quit()
| gpl-2.0 |
eaplatanios/tensorflow | tensorflow/python/util/tf_contextlib_test.py | 141 | 3075 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_contextlib."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
@tf_contextlib.contextmanager
def test_yield_append_before_and_after_yield(x, before, after):
x.append(before)
yield
x.append(after)
@tf_contextlib.contextmanager
def test_yield_return_x_plus_1(x):
yield x + 1
@tf_contextlib.contextmanager
def test_params_and_defaults(a, b=2, c=True, d='hello'):
return [a, b, c, d]
class TfContextlibTest(test.TestCase):
def testRunsCodeBeforeYield(self):
x = []
with test_yield_append_before_and_after_yield(x, 'before', ''):
self.assertEqual('before', x[-1])
def testRunsCodeAfterYield(self):
x = []
with test_yield_append_before_and_after_yield(x, '', 'after'):
pass
self.assertEqual('after', x[-1])
def testNestedWith(self):
x = []
with test_yield_append_before_and_after_yield(x, 'before', 'after'):
with test_yield_append_before_and_after_yield(x, 'inner', 'outer'):
with test_yield_return_x_plus_1(1) as var:
x.append(var)
self.assertEqual(['before', 'inner', 2, 'outer', 'after'], x)
def testMultipleCallsOfSeparateInstances(self):
x = []
with test_yield_append_before_and_after_yield(x, 1, 2):
pass
with test_yield_append_before_and_after_yield(x, 3, 4):
pass
self.assertEqual([1, 2, 3, 4], x)
def testReturnsResultFromYield(self):
with test_yield_return_x_plus_1(3) as result:
self.assertEqual(4, result)
def testUnwrapContextManager(self):
decorators, target = tf_decorator.unwrap(test_params_and_defaults)
self.assertEqual(1, len(decorators))
self.assertTrue(isinstance(decorators[0], tf_decorator.TFDecorator))
self.assertEqual('contextmanager', decorators[0].decorator_name)
self.assertFalse(isinstance(target, tf_decorator.TFDecorator))
def testGetArgSpecReturnsWrappedArgSpec(self):
argspec = tf_inspect.getargspec(test_params_and_defaults)
self.assertEqual(['a', 'b', 'c', 'd'], argspec.args)
self.assertEqual((2, True, 'hello'), argspec.defaults)
if __name__ == '__main__':
test.main()
| apache-2.0 |
epiqc/ScaffCC | clang/utils/ABITest/ABITestGen.py | 35 | 30467 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from pprint import pprint
import random, atexit, time
from random import randrange
import re
from Enumeration import *
from TypeGen import *
####
class TypePrinter(object):
def __init__(self, output, outputHeader=None,
outputTests=None, outputDriver=None,
headerName=None, info=None):
self.output = output
self.outputHeader = outputHeader
self.outputTests = outputTests
self.outputDriver = outputDriver
self.writeBody = outputHeader or outputTests or outputDriver
self.types = {}
self.testValues = {}
self.testReturnValues = {}
self.layoutTests = []
self.declarations = set()
if info:
for f in (self.output,self.outputHeader,self.outputTests,self.outputDriver):
if f:
print(info, file=f)
if self.writeBody:
print('#include <stdio.h>\n', file=self.output)
if self.outputTests:
print('#include <stdio.h>', file=self.outputTests)
print('#include <string.h>', file=self.outputTests)
print('#include <assert.h>\n', file=self.outputTests)
if headerName:
for f in (self.output,self.outputTests,self.outputDriver):
if f is not None:
print('#include "%s"\n'%(headerName,), file=f)
if self.outputDriver:
print('#include <stdio.h>', file=self.outputDriver)
print('#include <stdlib.h>\n', file=self.outputDriver)
print('int main(int argc, char **argv) {', file=self.outputDriver)
print(' int index = -1;', file=self.outputDriver)
print(' if (argc > 1) index = atoi(argv[1]);', file=self.outputDriver)
def finish(self):
if self.layoutTests:
print('int main(int argc, char **argv) {', file=self.output)
print(' int index = -1;', file=self.output)
print(' if (argc > 1) index = atoi(argv[1]);', file=self.output)
for i,f in self.layoutTests:
print(' if (index == -1 || index == %d)' % i, file=self.output)
print(' %s();' % f, file=self.output)
print(' return 0;', file=self.output)
print('}', file=self.output)
if self.outputDriver:
print(' printf("DONE\\n");', file=self.outputDriver)
print(' return 0;', file=self.outputDriver)
print('}', file=self.outputDriver)
def addDeclaration(self, decl):
if decl in self.declarations:
return False
self.declarations.add(decl)
if self.outputHeader:
print(decl, file=self.outputHeader)
else:
print(decl, file=self.output)
if self.outputTests:
print(decl, file=self.outputTests)
return True
def getTypeName(self, T):
name = self.types.get(T)
if name is None:
# Reserve slot
self.types[T] = None
self.types[T] = name = T.getTypeName(self)
return name
def writeLayoutTest(self, i, ty):
tyName = self.getTypeName(ty)
tyNameClean = tyName.replace(' ','_').replace('*','star')
fnName = 'test_%s' % tyNameClean
print('void %s(void) {' % fnName, file=self.output)
self.printSizeOfType(' %s'%fnName, tyName, ty, self.output)
self.printAlignOfType(' %s'%fnName, tyName, ty, self.output)
self.printOffsetsOfType(' %s'%fnName, tyName, ty, self.output)
print('}', file=self.output)
print(file=self.output)
self.layoutTests.append((i,fnName))
def writeFunction(self, i, FT):
args = ', '.join(['%s arg%d'%(self.getTypeName(t),i) for i,t in enumerate(FT.argTypes)])
if not args:
args = 'void'
if FT.returnType is None:
retvalName = None
retvalTypeName = 'void'
else:
retvalTypeName = self.getTypeName(FT.returnType)
if self.writeBody or self.outputTests:
retvalName = self.getTestReturnValue(FT.returnType)
fnName = 'fn%d'%(FT.index,)
if self.outputHeader:
print('%s %s(%s);'%(retvalTypeName, fnName, args), file=self.outputHeader)
elif self.outputTests:
print('%s %s(%s);'%(retvalTypeName, fnName, args), file=self.outputTests)
print('%s %s(%s)'%(retvalTypeName, fnName, args), end=' ', file=self.output)
if self.writeBody:
print('{', file=self.output)
for i,t in enumerate(FT.argTypes):
self.printValueOfType(' %s'%fnName, 'arg%d'%i, t)
if retvalName is not None:
print(' return %s;'%(retvalName,), file=self.output)
print('}', file=self.output)
else:
print('{}', file=self.output)
print(file=self.output)
if self.outputDriver:
print(' if (index == -1 || index == %d) {' % i, file=self.outputDriver)
print(' extern void test_%s(void);' % fnName, file=self.outputDriver)
print(' test_%s();' % fnName, file=self.outputDriver)
print(' }', file=self.outputDriver)
if self.outputTests:
if self.outputHeader:
print('void test_%s(void);'%(fnName,), file=self.outputHeader)
if retvalName is None:
retvalTests = None
else:
retvalTests = self.getTestValuesArray(FT.returnType)
tests = [self.getTestValuesArray(ty) for ty in FT.argTypes]
print('void test_%s(void) {'%(fnName,), file=self.outputTests)
if retvalTests is not None:
print(' printf("%s: testing return.\\n");'%(fnName,), file=self.outputTests)
print(' for (int i=0; i<%d; ++i) {'%(retvalTests[1],), file=self.outputTests)
args = ', '.join(['%s[%d]'%(t,randrange(l)) for t,l in tests])
print(' %s RV;'%(retvalTypeName,), file=self.outputTests)
print(' %s = %s[i];'%(retvalName, retvalTests[0]), file=self.outputTests)
print(' RV = %s(%s);'%(fnName, args), file=self.outputTests)
self.printValueOfType(' %s_RV'%fnName, 'RV', FT.returnType, output=self.outputTests, indent=4)
self.checkTypeValues('RV', '%s[i]' % retvalTests[0], FT.returnType, output=self.outputTests, indent=4)
print(' }', file=self.outputTests)
if tests:
print(' printf("%s: testing arguments.\\n");'%(fnName,), file=self.outputTests)
for i,(array,length) in enumerate(tests):
for j in range(length):
args = ['%s[%d]'%(t,randrange(l)) for t,l in tests]
args[i] = '%s[%d]'%(array,j)
print(' %s(%s);'%(fnName, ', '.join(args),), file=self.outputTests)
print('}', file=self.outputTests)
def getTestReturnValue(self, type):
typeName = self.getTypeName(type)
info = self.testReturnValues.get(typeName)
if info is None:
name = '%s_retval'%(typeName.replace(' ','_').replace('*','star'),)
print('%s %s;'%(typeName,name), file=self.output)
if self.outputHeader:
print('extern %s %s;'%(typeName,name), file=self.outputHeader)
elif self.outputTests:
print('extern %s %s;'%(typeName,name), file=self.outputTests)
info = self.testReturnValues[typeName] = name
return info
def getTestValuesArray(self, type):
typeName = self.getTypeName(type)
info = self.testValues.get(typeName)
if info is None:
name = '%s_values'%(typeName.replace(' ','_').replace('*','star'),)
print('static %s %s[] = {'%(typeName,name), file=self.outputTests)
length = 0
for item in self.getTestValues(type):
print('\t%s,'%(item,), file=self.outputTests)
length += 1
print('};', file=self.outputTests)
info = self.testValues[typeName] = (name,length)
return info
def getTestValues(self, t):
if isinstance(t, BuiltinType):
if t.name=='float':
for i in ['0.0','-1.0','1.0']:
yield i+'f'
elif t.name=='double':
for i in ['0.0','-1.0','1.0']:
yield i
elif t.name in ('void *'):
yield '(void*) 0'
yield '(void*) -1'
else:
yield '(%s) 0'%(t.name,)
yield '(%s) -1'%(t.name,)
yield '(%s) 1'%(t.name,)
elif isinstance(t, EnumType):
for i in range(0, len(t.enumerators)):
yield 'enum%dval%d_%d' % (t.index, i, t.unique_id)
elif isinstance(t, RecordType):
nonPadding = [f for f in t.fields
if not f.isPaddingBitField()]
if not nonPadding:
yield '{ }'
return
# FIXME: Use designated initializers to access non-first
# fields of unions.
if t.isUnion:
for v in self.getTestValues(nonPadding[0]):
yield '{ %s }' % v
return
fieldValues = [list(v) for v in map(self.getTestValues, nonPadding)]
for i,values in enumerate(fieldValues):
for v in values:
elements = [random.choice(fv) for fv in fieldValues]
elements[i] = v
yield '{ %s }'%(', '.join(elements))
elif isinstance(t, ComplexType):
for t in self.getTestValues(t.elementType):
yield '%s + %s * 1i'%(t,t)
elif isinstance(t, ArrayType):
values = list(self.getTestValues(t.elementType))
if not values:
yield '{ }'
for i in range(t.numElements):
for v in values:
elements = [random.choice(values) for i in range(t.numElements)]
elements[i] = v
yield '{ %s }'%(', '.join(elements))
else:
raise NotImplementedError('Cannot make tests values of type: "%s"'%(t,))
def printSizeOfType(self, prefix, name, t, output=None, indent=2):
print('%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'%(indent, '', prefix, name, name), file=output)
def printAlignOfType(self, prefix, name, t, output=None, indent=2):
print('%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'%(indent, '', prefix, name, name), file=output)
def printOffsetsOfType(self, prefix, name, t, output=None, indent=2):
if isinstance(t, RecordType):
for i,f in enumerate(t.fields):
if f.isBitField():
continue
fname = 'field%d' % i
print('%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'%(indent, '', prefix, name, fname, name, fname), file=output)
def printValueOfType(self, prefix, name, t, output=None, indent=2):
if output is None:
output = self.output
if isinstance(t, BuiltinType):
value_expr = name
if t.name.split(' ')[-1] == '_Bool':
# Hack to work around PR5579.
value_expr = "%s ? 2 : 0" % name
if t.name.endswith('long long'):
code = 'lld'
elif t.name.endswith('long'):
code = 'ld'
elif t.name.split(' ')[-1] in ('_Bool','char','short',
'int','unsigned'):
code = 'd'
elif t.name in ('float','double'):
code = 'f'
elif t.name == 'long double':
code = 'Lf'
else:
code = 'p'
print('%*sprintf("%s: %s = %%%s\\n", %s);'%(
indent, '', prefix, name, code, value_expr), file=output)
elif isinstance(t, EnumType):
print('%*sprintf("%s: %s = %%d\\n", %s);'%(indent, '', prefix, name, name), file=output)
elif isinstance(t, RecordType):
if not t.fields:
print('%*sprintf("%s: %s (empty)\\n");'%(indent, '', prefix, name), file=output)
for i,f in enumerate(t.fields):
if f.isPaddingBitField():
continue
fname = '%s.field%d'%(name,i)
self.printValueOfType(prefix, fname, f, output=output, indent=indent)
elif isinstance(t, ComplexType):
self.printValueOfType(prefix, '(__real %s)'%name, t.elementType, output=output,indent=indent)
self.printValueOfType(prefix, '(__imag %s)'%name, t.elementType, output=output,indent=indent)
elif isinstance(t, ArrayType):
for i in range(t.numElements):
# Access in this fashion as a hackish way to portably
# access vectors.
if t.isVector:
self.printValueOfType(prefix, '((%s*) &%s)[%d]'%(t.elementType,name,i), t.elementType, output=output,indent=indent)
else:
self.printValueOfType(prefix, '%s[%d]'%(name,i), t.elementType, output=output,indent=indent)
else:
raise NotImplementedError('Cannot print value of type: "%s"'%(t,))
def checkTypeValues(self, nameLHS, nameRHS, t, output=None, indent=2):
prefix = 'foo'
if output is None:
output = self.output
if isinstance(t, BuiltinType):
print('%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS), file=output)
elif isinstance(t, EnumType):
print('%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS), file=output)
elif isinstance(t, RecordType):
for i,f in enumerate(t.fields):
if f.isPaddingBitField():
continue
self.checkTypeValues('%s.field%d'%(nameLHS,i), '%s.field%d'%(nameRHS,i),
f, output=output, indent=indent)
if t.isUnion:
break
elif isinstance(t, ComplexType):
self.checkTypeValues('(__real %s)'%nameLHS, '(__real %s)'%nameRHS, t.elementType, output=output,indent=indent)
self.checkTypeValues('(__imag %s)'%nameLHS, '(__imag %s)'%nameRHS, t.elementType, output=output,indent=indent)
elif isinstance(t, ArrayType):
for i in range(t.numElements):
# Access in this fashion as a hackish way to portably
# access vectors.
if t.isVector:
self.checkTypeValues('((%s*) &%s)[%d]'%(t.elementType,nameLHS,i),
'((%s*) &%s)[%d]'%(t.elementType,nameRHS,i),
t.elementType, output=output,indent=indent)
else:
self.checkTypeValues('%s[%d]'%(nameLHS,i), '%s[%d]'%(nameRHS,i),
t.elementType, output=output,indent=indent)
else:
raise NotImplementedError('Cannot print value of type: "%s"'%(t,))
import sys
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog [options] {indices}")
parser.add_option("", "--mode", dest="mode",
help="autogeneration mode (random or linear) [default %default]",
type='choice', choices=('random','linear'), default='linear')
parser.add_option("", "--count", dest="count",
help="autogenerate COUNT functions according to MODE",
type=int, default=0)
parser.add_option("", "--min", dest="minIndex", metavar="N",
help="start autogeneration with the Nth function type [default %default]",
type=int, default=0)
parser.add_option("", "--max", dest="maxIndex", metavar="N",
help="maximum index for random autogeneration [default %default]",
type=int, default=10000000)
parser.add_option("", "--seed", dest="seed",
help="random number generator seed [default %default]",
type=int, default=1)
parser.add_option("", "--use-random-seed", dest="useRandomSeed",
help="use random value for initial random number generator seed",
action='store_true', default=False)
parser.add_option("", "--skip", dest="skipTests",
help="add a test index to skip",
type=int, action='append', default=[])
parser.add_option("-o", "--output", dest="output", metavar="FILE",
help="write output to FILE [default %default]",
type=str, default='-')
parser.add_option("-O", "--output-header", dest="outputHeader", metavar="FILE",
help="write header file for output to FILE [default %default]",
type=str, default=None)
parser.add_option("-T", "--output-tests", dest="outputTests", metavar="FILE",
help="write function tests to FILE [default %default]",
type=str, default=None)
parser.add_option("-D", "--output-driver", dest="outputDriver", metavar="FILE",
help="write test driver to FILE [default %default]",
type=str, default=None)
parser.add_option("", "--test-layout", dest="testLayout", metavar="FILE",
help="test structure layout",
action='store_true', default=False)
group = OptionGroup(parser, "Type Enumeration Options")
# Builtins - Ints
group.add_option("", "--no-char", dest="useChar",
help="do not generate char types",
action="store_false", default=True)
group.add_option("", "--no-short", dest="useShort",
help="do not generate short types",
action="store_false", default=True)
group.add_option("", "--no-int", dest="useInt",
help="do not generate int types",
action="store_false", default=True)
group.add_option("", "--no-long", dest="useLong",
help="do not generate long types",
action="store_false", default=True)
group.add_option("", "--no-long-long", dest="useLongLong",
help="do not generate long long types",
action="store_false", default=True)
group.add_option("", "--no-unsigned", dest="useUnsigned",
help="do not generate unsigned integer types",
action="store_false", default=True)
# Other builtins
group.add_option("", "--no-bool", dest="useBool",
help="do not generate bool types",
action="store_false", default=True)
group.add_option("", "--no-float", dest="useFloat",
help="do not generate float types",
action="store_false", default=True)
group.add_option("", "--no-double", dest="useDouble",
help="do not generate double types",
action="store_false", default=True)
group.add_option("", "--no-long-double", dest="useLongDouble",
help="do not generate long double types",
action="store_false", default=True)
group.add_option("", "--no-void-pointer", dest="useVoidPointer",
help="do not generate void* types",
action="store_false", default=True)
# Enumerations
group.add_option("", "--no-enums", dest="useEnum",
help="do not generate enum types",
action="store_false", default=True)
# Derived types
group.add_option("", "--no-array", dest="useArray",
help="do not generate record types",
action="store_false", default=True)
group.add_option("", "--no-complex", dest="useComplex",
help="do not generate complex types",
action="store_false", default=True)
group.add_option("", "--no-record", dest="useRecord",
help="do not generate record types",
action="store_false", default=True)
group.add_option("", "--no-union", dest="recordUseUnion",
help="do not generate union types",
action="store_false", default=True)
group.add_option("", "--no-vector", dest="useVector",
help="do not generate vector types",
action="store_false", default=True)
group.add_option("", "--no-bit-field", dest="useBitField",
help="do not generate bit-field record members",
action="store_false", default=True)
group.add_option("", "--no-builtins", dest="useBuiltins",
help="do not use any types",
action="store_false", default=True)
# Tuning
group.add_option("", "--no-function-return", dest="functionUseReturn",
help="do not generate return types for functions",
action="store_false", default=True)
group.add_option("", "--vector-types", dest="vectorTypes",
help="comma separated list of vector types (e.g., v2i32) [default %default]",
action="store", type=str, default='v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32', metavar="N")
group.add_option("", "--bit-fields", dest="bitFields",
help="comma separated list 'type:width' bit-field specifiers [default %default]",
action="store", type=str, default=(
"char:0,char:4,int:0,unsigned:1,int:1,int:4,int:13,int:24"))
group.add_option("", "--max-args", dest="functionMaxArgs",
help="maximum number of arguments per function [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-array", dest="arrayMaxSize",
help="maximum array size [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-record", dest="recordMaxSize",
help="maximum number of fields per record [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-record-depth", dest="recordMaxDepth",
help="maximum nested structure depth [default %default]",
action="store", type=int, default=None, metavar="N")
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if not opts.useRandomSeed:
random.seed(opts.seed)
# Construct type generator
builtins = []
if opts.useBuiltins:
ints = []
if opts.useChar: ints.append(('char',1))
if opts.useShort: ints.append(('short',2))
if opts.useInt: ints.append(('int',4))
# FIXME: Wrong size.
if opts.useLong: ints.append(('long',4))
if opts.useLongLong: ints.append(('long long',8))
if opts.useUnsigned:
ints = ([('unsigned %s'%i,s) for i,s in ints] +
[('signed %s'%i,s) for i,s in ints])
builtins.extend(ints)
if opts.useBool: builtins.append(('_Bool',1))
if opts.useFloat: builtins.append(('float',4))
if opts.useDouble: builtins.append(('double',8))
if opts.useLongDouble: builtins.append(('long double',16))
# FIXME: Wrong size.
if opts.useVoidPointer: builtins.append(('void*',4))
btg = FixedTypeGenerator([BuiltinType(n,s) for n,s in builtins])
bitfields = []
for specifier in opts.bitFields.split(','):
if not specifier.strip():
continue
name,width = specifier.strip().split(':', 1)
bitfields.append(BuiltinType(name,None,int(width)))
bftg = FixedTypeGenerator(bitfields)
charType = BuiltinType('char',1)
shortType = BuiltinType('short',2)
intType = BuiltinType('int',4)
longlongType = BuiltinType('long long',8)
floatType = BuiltinType('float',4)
doubleType = BuiltinType('double',8)
sbtg = FixedTypeGenerator([charType, intType, floatType, doubleType])
atg = AnyTypeGenerator()
artg = AnyTypeGenerator()
def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
atg.addGenerator(btg)
if useBitField and opts.useBitField:
atg.addGenerator(bftg)
if useRecord and opts.useRecord:
assert subgen
atg.addGenerator(RecordTypeGenerator(subfieldgen, opts.recordUseUnion,
opts.recordMaxSize))
if opts.useComplex:
# FIXME: Allow overriding builtins here
atg.addGenerator(ComplexTypeGenerator(sbtg))
if useArray and opts.useArray:
assert subgen
atg.addGenerator(ArrayTypeGenerator(subgen, opts.arrayMaxSize))
if opts.useVector:
vTypes = []
for i,t in enumerate(opts.vectorTypes.split(',')):
m = re.match('v([1-9][0-9]*)([if][1-9][0-9]*)', t.strip())
if not m:
parser.error('Invalid vector type: %r' % t)
count,kind = m.groups()
count = int(count)
type = { 'i8' : charType,
'i16' : shortType,
'i32' : intType,
'i64' : longlongType,
'f32' : floatType,
'f64' : doubleType,
}.get(kind)
if not type:
parser.error('Invalid vector type: %r' % t)
vTypes.append(ArrayType(i, True, type, count * type.size))
atg.addGenerator(FixedTypeGenerator(vTypes))
if opts.useEnum:
atg.addGenerator(EnumTypeGenerator([None, '-1', '1', '1u'], 1, 4))
if opts.recordMaxDepth is None:
# Fully recursive, just avoid top-level arrays.
subFTG = AnyTypeGenerator()
subTG = AnyTypeGenerator()
atg = AnyTypeGenerator()
makeGenerator(subFTG, atg, atg, True, True, True)
makeGenerator(subTG, atg, subFTG, True, True, False)
makeGenerator(atg, subTG, subFTG, True, False, False)
else:
# Make a chain of type generators, each builds smaller
# structures.
base = AnyTypeGenerator()
fbase = AnyTypeGenerator()
makeGenerator(base, None, None, False, False, False)
makeGenerator(fbase, None, None, False, False, True)
for i in range(opts.recordMaxDepth):
n = AnyTypeGenerator()
fn = AnyTypeGenerator()
makeGenerator(n, base, fbase, True, True, False)
makeGenerator(fn, base, fbase, True, True, True)
base = n
fbase = fn
atg = AnyTypeGenerator()
makeGenerator(atg, base, fbase, True, False, False)
if opts.testLayout:
ftg = atg
else:
ftg = FunctionTypeGenerator(atg, opts.functionUseReturn, opts.functionMaxArgs)
# Override max,min,count if finite
if opts.maxIndex is None:
if ftg.cardinality is aleph0:
opts.maxIndex = 10000000
else:
opts.maxIndex = ftg.cardinality
opts.maxIndex = min(opts.maxIndex, ftg.cardinality)
opts.minIndex = max(0,min(opts.maxIndex-1, opts.minIndex))
if not opts.mode=='random':
opts.count = min(opts.count, opts.maxIndex-opts.minIndex)
if opts.output=='-':
output = sys.stdout
else:
output = open(opts.output,'w')
atexit.register(lambda: output.close())
outputHeader = None
if opts.outputHeader:
outputHeader = open(opts.outputHeader,'w')
atexit.register(lambda: outputHeader.close())
outputTests = None
if opts.outputTests:
outputTests = open(opts.outputTests,'w')
atexit.register(lambda: outputTests.close())
outputDriver = None
if opts.outputDriver:
outputDriver = open(opts.outputDriver,'w')
atexit.register(lambda: outputDriver.close())
info = ''
info += '// %s\n'%(' '.join(sys.argv),)
info += '// Generated: %s\n'%(time.strftime('%Y-%m-%d %H:%M'),)
info += '// Cardinality of function generator: %s\n'%(ftg.cardinality,)
info += '// Cardinality of type generator: %s\n'%(atg.cardinality,)
if opts.testLayout:
info += '\n#include <stdio.h>'
P = TypePrinter(output,
outputHeader=outputHeader,
outputTests=outputTests,
outputDriver=outputDriver,
headerName=opts.outputHeader,
info=info)
def write(N):
try:
FT = ftg.get(N)
except RuntimeError as e:
if e.args[0]=='maximum recursion depth exceeded':
print('WARNING: Skipped %d, recursion limit exceeded (bad arguments?)'%(N,), file=sys.stderr)
return
raise
if opts.testLayout:
P.writeLayoutTest(N, FT)
else:
P.writeFunction(N, FT)
if args:
[write(int(a)) for a in args]
skipTests = set(opts.skipTests)
for i in range(opts.count):
if opts.mode=='linear':
index = opts.minIndex + i
else:
index = opts.minIndex + int((opts.maxIndex-opts.minIndex) * random.random())
if index in skipTests:
continue
write(index)
P.finish()
if __name__=='__main__':
main()
| bsd-2-clause |
dr0pz0ne/sibble | lib/ansible/executor/task_executor.py | 3 | 31318 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import subprocess
import sys
import time
import traceback
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.executor.task_result import TaskResult
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.template import Templar
from ansible.utils.encrypt import key_for_hostname
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode, to_bytes
from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, rslt_q):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._rslt_q = rslt_q
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with self._run_loop()
or self._execute(). After that, the returned results are parsed and
returned as a dict.
'''
display.debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
res = self._execute()
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res):
if isinstance(res, dict):
for k in res.keys():
res[k] = _clean_res(res[k])
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item)
elif isinstance(res, UnsafeProxy):
return res._obj
return res
display.debug("dumping result to json")
res = _clean_res(res)
display.debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
except Exception as e:
return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_unicode(traceback.format_exc()), stdout='')
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
display.debug(u"error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# save the play context variables to a temporary dictionary,
# so that we can modify the job vars without doing a full copy
# and later restore them to avoid modifying things too early
play_context_vars = dict()
self._play_context.update_vars(play_context_vars)
old_vars = dict()
for k in play_context_vars.keys():
if k in self._job_vars:
old_vars[k] = self._job_vars[k]
self._job_vars[k] = play_context_vars[k]
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined
# then we want to fall through to the next value rather
# than failing.
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True)
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
else:
try:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
loop_terms = []
display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e))
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars, wantlist=True)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
# now we restore any old job variables that may have been modified,
# and delete them if they were in the play context vars but not in
# the old variables dictionary
for k in play_context_vars.keys():
if k in old_vars:
self._job_vars[k] = old_vars[k]
else:
del self._job_vars[k]
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
if item is not None and not isinstance(item, UnsafeProxy):
items[idx] = UnsafeProxy(item)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
#task_vars = self._job_vars.copy()
task_vars = self._job_vars
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=to_unicode(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
res['_ansible_item_result'] = True
self._rslt_q.put(TaskResult(self._host, self._task, res), block=False)
results.append(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
task_action = self._task.action
if templar._contains_vars(task_action):
task_action = templar.template(task_action, fail_on_undefined=False)
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
if all(isinstance(o, string_types) for o in items):
final_items = []
name = None
for allowed in ['name', 'pkg', 'package']:
name = self._task.args.pop(allowed, None)
if name is not None:
break
# This gets the information to check whether the name field
# contains a template that we can squash for
template_no_item = template_with_item = None
if name:
if templar._contains_vars(name):
variables['item'] = '\0$'
template_no_item = templar.template(name, variables, cache=False)
variables['item'] = '\0@'
template_with_item = templar.template(name, variables, cache=False)
del variables['item']
# Check if the user is doing some operation that doesn't take
# name/pkg or the name/pkg field doesn't have any variables
# and thus the items can't be squashed
if template_no_item != template_with_item:
for item in items:
variables['item'] = item
if self._task.evaluate_conditional(templar, variables):
new_item = templar.template(name, cache=False)
final_items.append(new_item)
self._task.args['name'] = final_items
# Wrap this in a list so that the calling function loop
# executes exactly once
return [final_items]
else:
# Restore the name parameter
self._task.args['name'] = name
#elif:
# Right now we only optimize single entries. In the future we
# could optimize more types:
# * lists can be squashed together
# * dicts could squash entries that match in all cases except the
# name or pkg field.
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
context_validation_error = None
try:
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
self._play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self._play_context.update_vars(variables)
except AnsibleError as e:
# save the error, which we'll raise later if we don't end up
# skipping this task during the conditional evaluation step
context_validation_error = e
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
try:
if not self._task.evaluate_conditional(templar, variables):
display.debug("when evaluation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
except AnsibleError:
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
if self._task.action != 'include':
raise
# if we ran into an error while setting up the PlayContext, raise it now
if context_validation_error is not None:
raise context_validation_error
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = self._task.args.copy()
include_file = include_variables.pop('_raw_params', None)
if not include_file:
return dict(failed=True, msg="No include file was specified to the include")
include_file = templar.template(include_file)
return dict(include=include_file, include_variables=include_variables)
# Now we do final validation on the task, which sets all fields to their final values.
self._task.post_validate(templar=templar)
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
if isinstance(variable_params, dict):
display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
variable_params.update(self._task.args)
self._task.args = variable_params
# get the connection and the handler for this execution
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
self._connection = self._get_connection(variables=variables, templar=templar)
self._connection.set_host_overrides(host=self._host)
else:
# if connection is reused, its _play_context is no longer valid and needs
# to be replaced with the one templated above, in case other data changed
self._connection._play_context = self._play_context
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
if self._task.until is not None:
retries = self._task.retries
if retries <= 0:
retries = 1
else:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
display.debug("starting attempt loop")
result = None
for attempt in range(retries):
display.debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
except AnsibleConnectionFailure as e:
return dict(unreachable=True, msg=to_unicode(e))
display.debug("handler run complete")
# preserve no log
result["_ansible_no_log"] = self._play_context.no_log
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = wrap_var(result.copy())
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
return result
result = json.loads(result.get('stdout'))
except (TypeError, ValueError) as e:
return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
# ensure no log is preserved
result["_ansible_no_log"] = self._play_context.no_log
# helper methods for use below in evaluating changed/failed_when
def _evaluate_changed_when_result(result):
if self._task.changed_when is not None and self._task.changed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.changed_when
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
def _evaluate_failed_when_result(result):
if self._task.failed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.failed_when
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
else:
failed_when_result = False
return failed_when_result
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
# set the failed property if the result has a non-zero rc. This will be
# overridden below if the failed_when property is set
if result.get('rc', 0) != 0:
result['failed'] = True
# if we didn't skip this task, use the helpers to evaluate the changed/
# failed_when properties
if 'skipped' not in result:
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
if attempt < retries - 1:
cond = Conditional(loader=self._loader)
cond.when = self._task.until
if cond.evaluate_conditional(templar, vars_copy):
break
else:
# no conditional check, or it failed, so sleep for the specified time
result['attempts'] = attempt + 1
result['retries'] = retries
result['_ansible_retry'] = True
display.debug('Retrying task, attempt %d of %d' % (attempt + 1, retries))
self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
time.sleep(delay)
else:
if retries > 1:
# we ran out of attempts, so mark the result as failed
result['attempts'] = retries
result['failed'] = True
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = wrap_var(result)
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['_ansible_notify'] = self._task.notify
# add the delegated vars to the result, so we can reference them
# on the results side without having to do any further templating
# FIXME: we only want a limited set of variables here, so this is currently
# hardcoded but should be possibly fixed if we want more or if
# there is another source of truth we can use
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
if len(delegated_vars) > 0:
result["_ansible_delegated_vars"] = dict()
for k in ('ansible_host', ):
result["_ansible_delegated_vars"][k] = delegated_vars.get(k)
# and return
display.debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar):
'''
Polls for the specified JID to be complete
'''
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = self._shared_loader_obj.action_loader.get(
'normal',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return async_result
def _get_connection(self, variables, templar):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
if self._task.delegate_to is not None:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in variables.keys():
if i.startswith('ansible_') and i.endswith('_interpreter'):
del variables[i]
# now replace the interpreter values with those that may have come
# from the delegated-to host
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict())
if isinstance(delegated_vars, dict):
for i in delegated_vars:
if i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
try:
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
err = to_unicode(err)
if u"Bad configuration option" in err or u"Usage:" in err:
conn_type = "paramiko"
except OSError:
conn_type = "paramiko"
connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
if self._play_context.accelerate:
# launch the accelerated daemon here
ssh_connection = connection
handler = self._shared_loader_obj.action_loader.get(
'normal',
task=self._task,
connection=ssh_connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
key = key_for_hostname(self._play_context.remote_addr)
accelerate_args = dict(
password=base64.b64encode(key.__str__()),
port=self._play_context.accelerate_port,
minutes=C.ACCELERATE_DAEMON_TIMEOUT,
ipv6=self._play_context.accelerate_ipv6,
debug=self._play_context.verbosity,
)
connection = self._shared_loader_obj.connection_loader.get('accelerate', self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
try:
connection._connect()
except AnsibleConnectionFailure:
display.debug('connection failed, fallback to accelerate')
res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False)
display.debug(res)
connection._connect()
return connection
def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in self._shared_loader_obj.action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % self._task.action)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
| gpl-3.0 |
mruwnik/magnolia | src/magnolia/ui/canvas.py | 1 | 6571 | import math
from path import Path
from qtpy.QtWidgets import QOpenGLWidget
from qtpy.QtGui import QOpenGLShader, QOpenGLShaderProgram, QMatrix4x4, QOpenGLVersionProfile, QVector3D
from magnolia.ui.drawables import MeristemDisplay
class OGLCanvas(MeristemDisplay, QOpenGLWidget):
"""A class to handle displaying OpenGL things on the screen."""
PERSPECTIVE = (60, 0.1, 1280.0)
"""The perspective matrix settings (angle, nearZ, farZ)"""
def _load_program(self, vertex_shader, fragment_shader):
"""Load the given shader programs."""
self.program = QOpenGLShaderProgram(self)
if not self.program.addShaderFromSourceFile(QOpenGLShader.Vertex, vertex_shader):
raise ImportError('Could not compile %s' % vertex_shader)
if not self.program.addShaderFromSourceFile(QOpenGLShader.Fragment, fragment_shader):
raise ImportError('Could not compile %s' % fragment_shader)
if not self.program.link():
raise ImportError('Could not link the shader program')
def initializeGL(self):
version = QOpenGLVersionProfile()
ui_dir = Path(__file__).parent
self._load_program(ui_dir/'shaders/vshader.glsl', ui_dir/'shaders/fshader.glsl')
# FIXME: this should check which versions are available and then apply the appropriate one
version.setVersion(2, 0)
# version.setVersion(1, 0)
self.gl = self.context().versionFunctions(version)
self.gl.initializeOpenGLFunctions()
self.m_posAttr = self.program.attributeLocation('position')
self.m_colAttr = self.program.attributeLocation('colour')
self.m_normAttr = self.program.attributeLocation('normal')
self.mv_matrixUniform = self.program.uniformLocation('mv_matrix')
self.p_matrixUniform = self.program.uniformLocation('p_matrix')
self.norm_matrixUniform = self.program.uniformLocation('norm_matrix')
self.gl.glEnable(self.gl.GL_DEPTH_TEST)
self.gl.glEnable(self.gl.GL_CULL_FACE)
@property
def viewport_proportions(self):
"""The proportions of the view port."""
return self.width() / float(self.height())
@property
def p_matrix(self):
"""Get the perspective matrix."""
matrix = QMatrix4x4()
angle, near, far = self.PERSPECTIVE
matrix.perspective(angle, self.viewport_proportions, near, far)
return matrix
@property
def view_distance(self):
"""Get the distance from which things should be viewed."""
return -math.sqrt(self.width() * self.height())/50.0
@property
def camera_pos(self):
"""Return the camera's position."""
return QVector3D(0, self.objects.height / 2, self.view_distance - self.zoom)
@property
def camera_look_at(self):
"""A point at which the camera is pointed."""
return QVector3D(0, self.objects.height / 2, 0)
@property
def camera_normal(self):
"""The camera's up vector."""
return QVector3D(0, 1, 0)
@property
def v_matrix(self):
"""The view matrix in use."""
matrix = QMatrix4x4()
matrix.lookAt(self.camera_pos, self.camera_look_at, self.camera_normal)
return matrix
@property
def mv_matrix(self):
"""Return the current model-view matrix."""
matrix = self.v_matrix
matrix.rotate(self.viewing_angle[0], 0, 1, 0)
matrix.rotate(self.viewing_angle[1], 0, 0, 1)
return matrix
def loadAttrArray(self, attr, array):
"""Load the given array to the provided attribute."""
self.gl.glVertexAttribPointer(attr, 3, self.gl.GL_FLOAT, False, 0, array)
self.gl.glEnableVertexAttribArray(attr)
def paintGL(self):
"""Paint all objects."""
self.gl.glViewport(0, 0, self.width(), self.height())
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT)
self.program.bind()
self.program.setUniformValue(self.mv_matrixUniform, self.mv_matrix)
self.program.setUniformValue(self.norm_matrixUniform, self.mv_matrix.inverted()[0])
self.program.setUniformValue(self.p_matrixUniform, self.p_matrix)
if self.displayables:
self.loadAttrArray(self.m_posAttr, self.objects.vertices)
self.loadAttrArray(self.m_colAttr, self.objects.colours)
self.loadAttrArray(self.m_normAttr, self.objects.normals)
self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, self.objects.points_count)
if self._lines:
self.gl.glLineWidth(3)
for line in self._lines:
self.loadAttrArray(self.m_posAttr, line.vertices)
self.loadAttrArray(self.m_colAttr, line.colours)
self.gl.glDrawArrays(self.gl.GL_LINE_STRIP, 0, line.points_count)
self.program.release()
def rayPick(self, event):
"""Return a picking ray going from the camera through the mouse pointer."""
self.mouse_pos = event.pos()
x = (2.0 * event.x()) / self.width() - 1.0
y = 1.0 - (2.0 * event.y()) / self.height()
angle, nearZ, _ = self.PERSPECTIVE
rad = angle * math.pi / 180
vLength = math.tan(rad / 2) * nearZ
hLength = vLength * self.viewport_proportions
# get the camera position in world space
camera_pos = (self.v_matrix * self.camera_pos.toVector4D()).toVector3D()
view = (self.camera_look_at - camera_pos).normalized()
h = view.crossProduct(view, self.camera_normal).normalized()
v = view.crossProduct(h, view).normalized() * vLength
# get the point that was clicked on the XY-plane for Z equal to the closer clip plane
# The point is, of course, in model space
pos = camera_pos + view * nearZ + h * x * hLength + v * y
pos = (self.mv_matrix.inverted()[0] * pos.toVector4D()).toVector3D()
# work out where the camera is in model space
eye_pos = (self.mv_matrix.inverted()[0] * camera_pos.toVector4D()).toVector3D()
# Return the origin and direction of the picking ray
return eye_pos, (pos - eye_pos).normalized()
# Event handlers
def select(self, event):
"""Select the item that is under the cursor (if enabled)."""
if not self.can_select:
return
origin, direction = self.rayPick(event)
if self.objects.ray_pick_test(origin, direction) > 0:
self.objects.select()
# signal all and any slots that something new was selected
self._signal_selected()
| gpl-3.0 |
zuazo-forks/beaver | beaver/worker.py | 2 | 8162 | import errno
import os
import stat
import time
from beaver.utils import REOPEN_FILES, eglob
class Worker(object):
"""Looks for changes in all files of a directory.
This is useful for watching log file changes in real-time.
It also supports files rotation.
Example:
>>> def callback(filename, lines):
... print filename, lines
...
>>> l = Worker(args, callback, ["log", "txt"], tail_lines=0)
>>> l.loop()
"""
def __init__(self, beaver_config, file_config, queue_consumer_function, callback, logger=None, tail_lines=0):
"""Arguments:
(FileConfig) @file_config:
object containing file-related configuration
(BeaverConfig) @beaver_config:
object containing global configuration
(Logger) @logger
object containing a python logger
(callable) @callback:
a function which is called every time a new line in a
file being watched is found;
this is called with "filename" and "lines" arguments.
(int) @tail_lines:
read last N lines from files being watched before starting
"""
self._beaver_config = beaver_config
self._callback = callback
self._create_queue_consumer = queue_consumer_function
self._file_config = file_config
self._file_map = {}
self._folder = self._beaver_config.get('path')
self._logger = logger
self._proc = None
self._update_time = None
if not callable(self._callback):
raise RuntimeError("Callback for worker is not callable")
self.update_files()
# The first time we run the script we move all file markers at EOF.
# In case of files created afterwards we don't do this.
for id, file in self._file_map.iteritems():
file.seek(os.path.getsize(file.name)) # EOF
if tail_lines:
lines = self.tail(file.name, tail_lines)
if lines:
self._callback(("callback", (file.name, lines)))
def __del__(self):
"""Closes all files"""
self.close()
def close(self):
"""Closes all currently open file pointers"""
for id, file in self._file_map.iteritems():
file.close()
self._file_map.clear()
def listdir(self):
"""List directory and filter files by extension.
You may want to override this to add extra logic or
globbling support.
"""
ls = os.listdir(self._folder)
return [x for x in ls if os.path.splitext(x)[1][1:] == "log"]
def loop(self, interval=0.1, async=False):
"""Start the loop.
If async is True make one loop then return.
"""
while 1:
t = time.time()
if not (self._proc and self._proc.is_alive()):
self._proc = self._create_queue_consumer()
if int(time.time()) - self._update_time > self._beaver_config.get('update_file_mapping_time'):
self.update_files()
for fid, file in list(self._file_map.iteritems()):
try:
self.readfile(fid, file)
except IOError, e:
if e.errno == errno.ESTALE:
self.unwatch(file, fid)
if async:
return
self._logger.debug("Iteration took {0}".format(time.time() - t))
time.sleep(interval)
def readfile(self, fid, file):
"""Read lines from a file and performs a callback against them"""
lines = file.readlines(4096)
while lines:
self._callback(("callback", (file.name, lines)))
lines = file.readlines(4096)
def update_files(self):
"""Ensures all files are properly loaded.
Detects new files, file removals, file rotation, and truncation.
On non-linux platforms, it will also manually reload the file for tailing.
Note that this hack is necessary because EOF is cached on BSD systems.
"""
self._update_time = int(time.time())
ls = []
files = []
if len(self._beaver_config.get('globs')) > 0:
for name in self._beaver_config.get('globs'):
globbed = [os.path.realpath(filename) for filename in eglob(name)]
files.extend(globbed)
self._file_config.addglob(name, globbed)
else:
for name in self.listdir():
files.append(os.path.realpath(os.path.join(self._folder, name)))
for absname in files:
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
# check existent files
for fid, file in list(self._file_map.iteritems()):
try:
st = os.stat(file.name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self.unwatch(file, fid)
else:
raise
else:
if fid != self.get_file_id(st):
self._logger.info("[{0}] - file rotated {1}".format(fid, file.name))
self.unwatch(file, fid)
self.watch(file.name)
elif file.tell() > st.st_size:
self._logger.info("[{0}] - file truncated {1}".format(fid, file.name))
self.unwatch(file, fid)
self.watch(file.name)
elif REOPEN_FILES:
self._logger.debug("[{0}] - file reloaded (non-linux) {1}".format(fid, file.name))
position = file.tell()
fname = file.name
file.close()
file = open(fname, "r")
file.seek(position)
self._file_map[fid] = file
# add new ones
for fid, fname in ls:
if fid not in self._file_map:
self.watch(fname)
def unwatch(self, file, fid):
"""file no longer exists; if it has been renamed
try to read it for the last time in case the
log rotator has written something in it.
"""
try:
self.readfile(fid, file)
except IOError:
# Silently ignore any IOErrors -- file is gone
pass
self._logger.info("[{0}] - un-watching logfile {1}".format(fid, file.name))
del self._file_map[fid]
def watch(self, fname):
"""Opens a file for log tailing"""
try:
file = open(fname, "r")
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
self._logger.info("[{0}] - watching logfile {1}".format(fid, fname))
self._file_map[fid] = file
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
@staticmethod
def tail(fname, window):
"""Read last N lines from file fname."""
try:
f = open(fname, 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return []
else:
raise
else:
BUFSIZ = 1024
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
data = ""
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
exit = True
else:
f.seek(step, os.SEEK_END)
data = f.read().strip()
if data.count('\n') >= window:
break
else:
block -= 1
return data.splitlines()[-window:]
| mit |
sargas/scipy | scipy/linalg/tests/test_lapack.py | 4 | 3736 | #!/usr/bin/env python
#
# Created by: Pearu Peterson, September 2002
#
from __future__ import division, print_function, absolute_import
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_almost_equal, assert_, assert_raises
import numpy as np
from scipy.linalg import _flapack as flapack
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(TestCase):
def test_gebal(self):
a = [[1,2,3],[4,5,6],[7,8,9]]
a1 = [[1,0,0,3e-4],
[4,0,0,2e-3],
[7,1,0,0],
[0,1,0,0]]
for p in 'sdzc':
f = getattr(flapack,p+'gebal',None)
if f is None: continue
ba,lo,hi,pivscale,info = f(a)
assert_(not info,repr(info))
assert_array_almost_equal(ba,a)
assert_equal((lo,hi),(0,len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1)
assert_(not info,repr(info))
#print a1
#print ba,lo,hi,pivscale
def test_gehrd(self):
a = [[-149, -50,-154],
[ 537, 180, 546],
[ -27, -9, -25]]
for p in 'd':
f = getattr(flapack,p+'gehrd',None)
if f is None: continue
ht,tau,info = f(a)
assert_(not info,repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), scale * c1, decimal=4)
class TestLapack(TestCase):
def test_flapack(self):
if hasattr(flapack,'empty_module'):
#flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack,'empty_module'):
#clapack module is empty
pass
class TestRegression(TestCase):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
mwmuni/LIGGGHTS_GUI | OpenGL/raw/GL/_types.py | 9 | 5267 | """OpenGL-wide constant types (not OpenGL.GL-specific)
These are basically the fundamental data-types that OpenGL
uses (note, doesn't include the OpenGL-ES types!)
"""
import ctypes
from OpenGL.constant import Constant
from OpenGL._bytes import bytes,unicode,as_8_bit, long
from OpenGL._opaque import opaque_pointer_cls as _opaque_pointer_cls
sizeof = ctypes.sizeof
GL_FALSE = Constant( 'GL_FALSE', 0x0 )
GL_TRUE = Constant( 'GL_TRUE', 0x1 )
GL_BYTE = Constant( 'GL_BYTE', 0x1400 )
GL_UNSIGNED_BYTE = Constant( 'GL_UNSIGNED_BYTE', 0x1401 )
GL_SHORT = Constant( 'GL_SHORT', 0x1402 )
GL_UNSIGNED_SHORT = Constant( 'GL_UNSIGNED_SHORT', 0x1403 )
GL_INT = Constant( 'GL_INT', 0x1404 )
GL_UNSIGNED_INT = Constant( 'GL_UNSIGNED_INT', 0x1405 )
GL_UNSIGNED_INT64 = Constant( 'GL_UNSIGNED_INT64_AMD', 0x8BC2 )
GL_FLOAT = Constant( 'GL_FLOAT', 0x1406 )
GL_DOUBLE = Constant( 'GL_DOUBLE', 0x140a )
GL_CHAR = bytes
GL_HALF_NV = Constant( 'GL_HALF_NV', 0x1401 )
GL_VOID_P = object()
def _get_ctypes_version():
return [int(i) for i in ctypes.__version__.split('.')[:3]]
ctypes_version = _get_ctypes_version()
# Basic OpenGL data-types as ctypes declarations...
def _defineType( name, baseType, convertFunc = long ):
from OpenGL import _configflags
do_wrapping = (
_configflags.ALLOW_NUMPY_SCALARS or # explicitly require
(( # or we are using Python 2.5.x ctypes which doesn't support uint type numpy scalars
ctypes_version < [1,1,0]
and baseType in (ctypes.c_uint,ctypes.c_uint64,ctypes.c_ulong,ctypes.c_ushort)
) or
( # or we are using Python 2.5.x (x < 2) ctypes which doesn't support any numpy int scalars
ctypes_version < [1,0,2]
and baseType in (ctypes.c_int,ctypes.c_int64,ctypes.c_long,ctypes.c_short)
))
)
if do_wrapping:
original = baseType.from_param
if not getattr( original, 'from_param_numpy_scalar', False ):
def from_param( x, typeCode=None ):
try:
return original( x )
except TypeError as err:
try:
return original( convertFunc(x) )
except TypeError as err2:
raise err
from_param = staticmethod( from_param )
setattr( baseType, 'from_param', from_param )
baseType.from_param_numpy_scalar = True
return baseType
else:
return baseType
GLvoid = None
GLboolean = _defineType( 'GLboolean', ctypes.c_ubyte, bool )
GLenum = _defineType( 'GLenum', ctypes.c_uint )
GLfloat = _defineType( 'GLfloat', ctypes.c_float, float )
GLfloat_2 = GLfloat * 2
GLfloat_3 = GLfloat * 3
GLfloat_4 = GLfloat * 4
GLdouble = _defineType( 'GLdouble', ctypes.c_double, float )
GLdouble_2 = GLdouble * 2
GLdouble_3 = GLdouble * 3
GLdouble_4 = GLdouble * 4
GLbyte = ctypes.c_byte
GLshort = _defineType( 'GLshort', ctypes.c_short, int )
GLint = _defineType( 'GLint', ctypes.c_int, int )
GLuint = _defineType( 'GLuint', ctypes.c_uint, long )
GLsizei = _defineType( 'GLsizei', ctypes.c_int, int )
GLubyte = ctypes.c_ubyte
GLubyte_3 = GLubyte * 3
GLushort = _defineType( 'GLushort', ctypes.c_ushort, int )
GLulong = _defineType( 'GLulong', ctypes.c_ulong, int )
GLhandleARB = _defineType( 'GLhandleARB', ctypes.c_uint, long )
GLhandle = _defineType( 'GLhandle', ctypes.c_uint, long )
GLchar = GLcharARB = ctypes.c_char
GLbitfield = _defineType( 'GLbitfield', ctypes.c_uint, long )
GLclampd = _defineType( 'GLclampd', ctypes.c_double, float )
GLclampf = _defineType( 'GLclampf', ctypes.c_float, float )
GLuint64 = GLuint64EXT = _defineType('GLuint64', ctypes.c_uint64, long )
GLint64 = GLint64EXT = _defineType('GLint64', ctypes.c_int64, long )
# ptrdiff_t, actually...
GLsizeiptrARB = GLsizeiptr = GLsizei
GLvdpauSurfaceNV = GLintptrARB = GLintptr = GLsizei
size_t = ctypes.c_size_t
void = None
GLhalfNV = GLhalfARB = ctypes.c_ushort
# GL.ARB.sync extension, GLsync is an opaque pointer to a struct
# in the extensions header, basically just a "token" that can be
# passed to the various operations...
GLsync = _opaque_pointer_cls( 'GLsync' )
GLvoidp = ctypes.c_void_p
ARRAY_TYPE_TO_CONSTANT = [
('GLclampd', GL_DOUBLE),
('GLclampf', GL_FLOAT),
('GLfloat', GL_FLOAT),
('GLdouble', GL_DOUBLE),
('GLbyte', GL_BYTE),
('GLshort', GL_SHORT),
('GLint', GL_INT),
('GLubyte', GL_UNSIGNED_BYTE),
('GLushort', GL_UNSIGNED_SHORT),
('GLuint', GL_UNSIGNED_INT),
('GLenum', GL_UNSIGNED_INT),
]
from OpenGL.platform import PLATFORM as _p
GLDEBUGPROCARB = GLDEBUGPROCKHR = GLDEBUGPROC = _p.DEFAULT_FUNCTION_TYPE(
void,
GLenum, # source,
GLenum, #type,
GLuint, # id
GLenum, # severity
GLsizei, # length
ctypes.c_char_p, # message
GLvoidp, # userParam
)
class _cl_context( ctypes.Structure ):
"""Placeholder/empty structure for _cl_context"""
class _cl_event( ctypes.Structure ):
"""Placeholder/empty structure for _cl_event"""
GLDEBUGPROCAMD = _p.DEFAULT_FUNCTION_TYPE(
void,
GLuint,# id,
GLenum,# category,
GLenum,# severity,
GLsizei,# length,
ctypes.c_char_p,# message,
GLvoidp,# userParam
)
GLeglImageOES = GLvoidp
c_int = ctypes.c_int
| gpl-3.0 |
Kongsea/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/k_feature_routing_function_op_test.py | 103 | 3383 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class KFeatureRoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
self.params = tensor_forest.ForestHParams(
num_features=2,
hybrid_tree_depth=2,
base_random_seed=10,
feature_bagging_fraction=1.0,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
self.params.regression = False
def testParams(self):
self.assertEquals(self.params.num_nodes, 3)
self.assertEquals(self.params.num_features, 2)
self.assertEquals(self.params.num_features_per_node, 2)
def testRoutingFunction(self):
with self.test_session():
route_tensor = gen_training_ops.k_feature_routing_function(
self.input_data,
self.tree_weights,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
print(routes)
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
revmischa/boto | tests/integration/dynamodb2/test_cert_verification.py | 125 | 1564 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.dynamodb2
class DynamoDB2CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
dynamodb2 = True
regions = boto.dynamodb2.regions()
def sample_service_call(self, conn):
conn.list_tables()
| mit |
odejesush/tensorflow | tensorflow/contrib/slim/python/slim/nets/overfeat.py | 164 | 5562 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
| apache-2.0 |
nburn42/tensorflow | tensorflow/contrib/kfac/python/ops/linear_operator.py | 14 | 3662 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SmartMatrices definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.ops.linalg import linear_operator_util as lou
class LinearOperatorExtras(object): # pylint: disable=missing-docstring
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
with self._name_scope(name, values=[x]):
if isinstance(x, ops.IndexedSlices):
return self._matmul_sparse(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def matmul_right(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
with self._name_scope(name, values=[x]):
if isinstance(x, ops.IndexedSlices):
return self._matmul_right_sparse(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -1 if adjoint else -2
arg_dim = -2 if adjoint_arg else -1
self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim])
return self._matmul_right(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
class LinearOperatorFullMatrix(LinearOperatorExtras,
linalg.LinearOperatorFullMatrix):
# TODO(b/78117889) Remove this definition once core LinearOperator
# has _matmul_right.
def _matmul_right(self, x, adjoint=False, adjoint_arg=False):
return lou.matmul_with_broadcast(
x, self._matrix, adjoint_a=adjoint_arg, adjoint_b=adjoint)
def _matmul_sparse(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError
def _matmul_right_sparse(self, x, adjoint=False, adjoint_arg=False):
assert not adjoint and not adjoint_arg
return utils.matmul_sparse_dense(x, self._matrix)
class LinearOperatorDiag(LinearOperatorExtras, # pylint: disable=missing-docstring
linalg.LinearOperatorDiag):
def _matmul_right(self, x, adjoint=False, adjoint_arg=False):
diag_mat = math_ops.conj(self._diag) if adjoint else self._diag
x = linalg_impl.adjoint(x) if adjoint_arg else x
return diag_mat * x
def _matmul_sparse(self, x, adjoint=False, adjoint_arg=False):
diag_mat = math_ops.conj(self._diag) if adjoint else self._diag
assert not adjoint_arg
return utils.matmul_diag_sparse(diag_mat, x)
def _matmul_right_sparse(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError
| apache-2.0 |
elsonrodriguez/madhatter | cobbler/cli.py | 1 | 22124 | """
Command line interface for cobbler.
Copyright 2006-2009, Red Hat, Inc
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import sys
import xmlrpclib
import traceback
import optparse
import exceptions
import time
import os
import utils
import module_loader
import item_distro
import item_profile
import item_system
import item_repo
import item_mgmtclass
import item_image
OBJECT_ACTIONS = {
"distro" : "add copy edit find list remove rename report".split(" "),
"profile" : "add copy dumpvars edit find getks list remove rename report".split(" "),
"system" : "add copy dumpvars edit find getks list remove rename report poweron poweroff reboot".split(" "),
"image" : "add copy edit find list remove rename report".split(" "),
"repo" : "add copy edit find list remove rename report".split(" "),
"mgmtclass" : "add copy edit find list remove rename report".split(" ")
}
OBJECT_TYPES = OBJECT_ACTIONS.keys()
DIRECT_ACTIONS = "aclsetup buildiso deploy import list report reposync sync validateks version".split()
####################################################
def report_items(remote, otype):
items = remote.get_items(otype)
for x in items:
report_item(remote,otype,item=x)
def report_item(remote,otype,item=None,name=None):
if item is None:
item = remote.get_item(otype, name)
if item == "~":
print "No %s found: %s" % (otype, name)
sys.exit(1)
if otype == "distro":
data = utils.printable_from_fields(item, item_distro.FIELDS)
elif otype == "profile":
data = utils.printable_from_fields(item, item_profile.FIELDS)
elif otype == "system":
data = utils.printable_from_fields(item, item_system.FIELDS)
elif otype == "repo":
data = utils.printable_from_fields(item, item_repo.FIELDS)
elif otype == "image":
data = utils.printable_from_fields(item, item_image.FIELDS)
elif otype == "mgmtclass":
data = utils.printable_from_fields(item, item_mgmtclass.FIELDS)
print data
def list_items(remote,otype):
items = remote.get_item_names(otype)
items.sort()
for x in items:
print " %s" % x
def n2s(data):
"""
Return spaces for None
"""
if data is None:
return ""
return data
def opt(options, k):
"""
Returns an option from an Optparse values instance
"""
try:
data = getattr(options, k)
except:
# FIXME: debug only
traceback.print_exc()
return ""
return n2s(data)
class BootCLI:
def __init__(self):
# Load server ip and ports from local config
self.url_cobbler_api = utils.local_get_cobbler_api_url()
self.url_cobbler_xmlrpc = utils.local_get_cobbler_xmlrpc_url()
# FIXME: allow specifying other endpoints, and user+pass
self.parser = optparse.OptionParser()
self.remote = xmlrpclib.Server(self.url_cobbler_api)
self.shared_secret = utils.get_shared_secret()
def start_task(self, name, options):
options = utils.strip_none(vars(options), omit_none=True)
fn = getattr(self.remote, "background_%s" % name)
return fn(options, self.token)
def get_object_type(self, args):
"""
If this is a CLI command about an object type, e.g. "cobbler distro add", return the type, like "distro"
"""
if len(args) < 2:
return None
elif args[1] in OBJECT_TYPES:
return args[1]
return None
def get_object_action(self, object_type, args):
"""
If this is a CLI command about an object type, e.g. "cobbler distro add", return the action, like "add"
"""
if object_type is None or len(args) < 3:
return None
if args[2] in OBJECT_ACTIONS[object_type]:
return args[2]
return None
def get_direct_action(self, object_type, args):
"""
If this is a general command, e.g. "cobbler hardlink", return the action, like "hardlink"
"""
if object_type is not None:
return None
elif len(args) < 2:
return None
elif args[1] == "--help":
return None
elif args[1] == "--version":
return "version"
else:
return args[1]
def check_setup(self):
"""
Detect permissions and service accessibility problems and provide
nicer error messages for them.
"""
s = xmlrpclib.Server(self.url_cobbler_xmlrpc)
try:
s.ping()
except:
print >> sys.stderr, "cobblerd does not appear to be running/accessible"
sys.exit(411)
s = xmlrpclib.Server(self.url_cobbler_api)
try:
s.ping()
except:
print >> sys.stderr, "httpd does not appear to be running and proxying cobbler"
sys.exit(411)
if not os.path.exists("/var/lib/cobbler/web.ss"):
print >> sys.stderr, "Missing login credentials file. Has cobblerd failed to start?"
sys.exit(411)
if not os.access("/var/lib/cobbler/web.ss", os.R_OK):
print >> sys.stderr, "User cannot run command line, need read access to /var/lib/cobbler/web.ss"
sys.exit(411)
def run(self, args):
"""
Process the command line and do what the user asks.
"""
self.token = self.remote.login("", self.shared_secret)
object_type = self.get_object_type(args)
object_action = self.get_object_action(object_type, args)
direct_action = self.get_direct_action(object_type, args)
try:
if object_type is not None:
if object_action is not None:
self.object_command(object_type, object_action)
else:
self.print_object_help(object_type)
elif direct_action is not None:
self.direct_command(direct_action)
else:
self.print_help()
except xmlrpclib.Fault, err:
if err.faultString.find("cobbler.cexceptions.CX") != -1:
print self.cleanup_fault_string(err.faultString)
else:
print "### ERROR ###"
print "Unexpected remote error, check the server side logs for further info"
print err.faultString
sys.exit(1)
def cleanup_fault_string(self,str):
"""
Make a remote exception nicely readable by humans so it's not evident that is a remote
fault. Users should not have to understand tracebacks.
"""
if str.find(">:") != -1:
(first, rest) = str.split(">:",1)
if rest.startswith("\"") or rest.startswith("\'"):
rest = rest[1:]
if rest.endswith("\"") or rest.endswith("\'"):
rest = rest[:-1]
return rest
else:
return str
def get_fields(self, object_type):
"""
For a given name of an object type, return the FIELDS data structure.
"""
# FIXME: this should be in utils, or is it already?
if object_type == "distro":
return item_distro.FIELDS
elif object_type == "profile":
return item_profile.FIELDS
elif object_type == "system":
return item_system.FIELDS
elif object_type == "repo":
return item_repo.FIELDS
elif object_type == "image":
return item_image.FIELDS
elif object_type == "mgmtclass":
return item_mgmtclass.FIELDS
def object_command(self, object_type, object_action):
"""
Process object-based commands such as "distro add" or "profile rename"
"""
task_id = -1 # if assigned, we must tail the logfile
fields = self.get_fields(object_type)
if object_action in [ "add", "edit", "copy", "rename", "find" ]:
utils.add_options_from_fields(object_type, self.parser, fields, object_action)
elif object_action in [ "list" ]:
pass
else:
self.parser.add_option("--name", dest="name", help="name of object")
(options, args) = self.parser.parse_args()
if object_action in [ "add", "edit", "copy", "rename", "remove" ]:
if opt(options, "name") == "":
print "--name is required"
sys.exit(1)
self.remote.xapi_object_edit(object_type, options.name, object_action, utils.strip_none(vars(options), omit_none=True), self.token)
elif object_action == "getks":
if object_type == "profile":
data = self.remote.generate_kickstart(options.name,"")
elif object_type == "system":
data = self.remote.generate_kickstart("",options.name)
print data
elif object_action == "dumpvars":
if object_type == "profile":
data = self.remote.get_blended_data(options.name,"")
elif object_type == "system":
data = self.remote.get_blended_data("",options.name)
# FIXME: pretty-printing and sorting here
keys = data.keys()
keys.sort()
for x in keys:
print "%s : %s" % (x, data[x])
elif object_action in [ "poweron", "poweroff", "reboot" ]:
power={}
power["power"] = object_action.replace("power","")
power["systems"] = [options.name]
task_id = self.remote.background_power_system(power, self.token)
elif object_action == "report":
if options.name is not None:
report_item(self.remote,object_type,None,options.name)
else:
report_items(self.remote,object_type)
elif object_action == "list":
list_items(self.remote, object_type)
elif object_action == "find":
items = self.remote.find_items(object_type, utils.strip_none(vars(options), omit_none=True), "name", False)
for item in items:
print item
else:
raise exceptions.NotImplementedError()
# FIXME: add tail/polling code here
if task_id != -1:
self.print_task(task_id)
self.follow_task(task_id)
return True
# BOOKMARK
def direct_command(self, action_name):
"""
Process non-object based commands like "sync" and "hardlink"
"""
task_id = -1 # if assigned, we must tail the logfile
if action_name == "buildiso":
defaultiso = os.path.join(os.getcwd(), "generated.iso")
self.parser.add_option("--iso", dest="iso", default=defaultiso, help="(OPTIONAL) output ISO to this path")
self.parser.add_option("--profiles", dest="profiles", help="(OPTIONAL) use these profiles only")
self.parser.add_option("--systems", dest="systems", help="(OPTIONAL) use these systems only")
self.parser.add_option("--tempdir", dest="tempdir", help="(OPTIONAL) working directory")
self.parser.add_option("--distro", dest="distro", help="(OPTIONAL) used with --standalone to create a distro-based ISO including all associated profiles/systems")
self.parser.add_option("--standalone", dest="standalone", action="store_true", help="(OPTIONAL) creates a standalone ISO with all required distro files on it")
self.parser.add_option("--source", dest="source", help="(OPTIONAL) used with --standalone to specify a source for the distribution files")
self.parser.add_option("--exclude-dns", dest="exclude_dns", action="store_true", help="(OPTIONAL) prevents addition of name server addresses to the kernel boot options")
(options, args) = self.parser.parse_args()
task_id = self.start_task("buildiso",options)
elif action_name == "replicate":
self.parser.add_option("--master", dest="master", help="Cobbler server to replicate from.")
self.parser.add_option("--distros", dest="distro_patterns", help="patterns of distros to replicate")
self.parser.add_option("--profiles", dest="profile_patterns", help="patterns of profiles to replicate")
self.parser.add_option("--systems", dest="system_patterns", help="patterns of systems to replicate")
self.parser.add_option("--repos", dest="repo_patterns", help="patterns of repos to replicate")
self.parser.add_option("--image", dest="image_patterns", help="patterns of images to replicate")
self.parser.add_option("--omit-data", dest="omit_data", action="store_true", help="do not rsync data")
self.parser.add_option("--prune", dest="prune", action="store_true", help="remove objects (of all types) not found on the master")
(options, args) = self.parser.parse_args()
task_id = self.start_task("replicate",options)
elif action_name == "aclsetup":
self.parser.add_option("--adduser", dest="adduser", help="give acls to this user")
self.parser.add_option("--addgroup", dest="addgroup", help="give acls to this group")
self.parser.add_option("--removeuser", dest="removeuser", help="remove acls from this user")
self.parser.add_option("--removegroup", dest="removegroup", help="remove acls from this group")
(options, args) = self.parser.parse_args()
task_id = self.start_task("aclsetup",options)
elif action_name == "version":
version = self.remote.extended_version()
print "Cobbler %s" % version["version"]
print " source: %s, %s" % (version["gitstamp"], version["gitdate"])
print " build time: %s" % version["builddate"]
elif action_name == "hardlink":
(options, args) = self.parser.parse_args()
task_id = self.start_task("hardlink",options)
elif action_name == "reserialize":
(options, args) = self.parser.parse_args()
task_id = self.start_task("reserialize",options)
elif action_name == "status":
(options, args) = self.parser.parse_args()
print self.remote.get_status("text",self.token)
elif action_name == "validateks":
(options, args) = self.parser.parse_args()
task_id = self.start_task("validateks",options)
elif action_name == "get-loaders":
self.parser.add_option("--force", dest="force", action="store_true", help="overwrite any existing content in /var/lib/cobbler/loaders")
(options, args) = self.parser.parse_args()
task_id = self.start_task("dlcontent",options)
elif action_name == "import":
self.parser.add_option("--arch", dest="arch", help="OS architecture being imported")
self.parser.add_option("--breed", dest="breed", help="the breed being imported")
self.parser.add_option("--os-version", dest="os_version", help="the version being imported")
self.parser.add_option("--path", dest="path", help="local path or rsync location")
self.parser.add_option("--name", dest="name", help="name, ex 'RHEL-5'")
self.parser.add_option("--available-as", dest="available_as", help="tree is here, don't mirror")
self.parser.add_option("--kickstart", dest="kickstart_file", help="assign this kickstart file")
self.parser.add_option("--rsync-flags", dest="rsync_flags", help="pass additional flags to rsync")
(options, args) = self.parser.parse_args()
task_id = self.start_task("import",options)
elif action_name == "reposync":
self.parser.add_option("--only", dest="only", help="update only this repository name")
self.parser.add_option("--tries", dest="tries", help="try each repo this many times", default=1)
self.parser.add_option("--no-fail", dest="nofail", help="don't stop reposyncing if a failure occurs", action="store_true")
(options, args) = self.parser.parse_args()
task_id = self.start_task("reposync",options)
elif action_name == "aclsetup":
(options, args) = self.parser.parse_args()
# FIXME: missing options, add them here
task_id = self.start_task("aclsetup",options)
elif action_name == "check":
results = self.remote.check(self.token)
ct = 0
if len(results) > 0:
print "The following are potential configuration items that you may want to fix:\n"
for r in results:
ct = ct + 1
print "%s : %s" % (ct, r)
print "\nRestart cobblerd and then run 'cobbler sync' to apply changes."
else:
print "No configuration problems found. All systems go."
elif action_name == "sync":
(options, args) = self.parser.parse_args()
self.parser.add_option("--verbose", dest="verbose", action="store_true", help="run sync with more output")
task_id = self.start_task("sync",options)
elif action_name == "report":
(options, args) = self.parser.parse_args()
print "distros:\n=========="
report_items(self.remote,"distro")
print "\nprofiles:\n=========="
report_items(self.remote,"profile")
print "\nsystems:\n=========="
report_items(self.remote,"system")
print "\nrepos:\n=========="
report_items(self.remote,"repo")
print "\nimages:\n=========="
report_items(self.remote,"image")
print "\nmgmtclasses:\n=========="
report_items(self.remote,"mgmtclass")
elif action_name == "list":
# no tree view like 1.6? This is more efficient remotely
# for large configs and prevents xfering the whole config
# though we could consider that...
(options, args) = self.parser.parse_args()
print "distros:"
list_items(self.remote,"distro")
print "\nprofiles:"
list_items(self.remote,"profile")
print "\nsystems:"
list_items(self.remote,"system")
print "\nrepos:"
list_items(self.remote,"repo")
print "\nimages:"
list_items(self.remote,"image")
print "\nmgmtclasses:"
list_items(self.remote,"mgmtclass")
else:
print "No such command: %s" % action_name
sys.exit(1)
# FIXME: run here
# FIXME: add tail/polling code here
if task_id != -1:
self.print_task(task_id)
self.follow_task(task_id)
return True
def print_task(self, task_id):
print "task started: %s" % task_id
events = self.remote.get_events()
(etime, name, status, who_viewed) = events[task_id]
atime = time.asctime(time.localtime(etime))
print "task started (id=%s, time=%s)" % (name, atime)
def follow_task(self, task_id):
logfile = "/var/log/cobbler/tasks/%s.log" % task_id
# adapted from: http://code.activestate.com/recipes/157035/
file = open(logfile,'r')
#Find the size of the file and move to the end
#st_results = os.stat(filename)
#st_size = st_results[6]
#file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if line.find("### TASK COMPLETE ###") != -1:
print "*** TASK COMPLETE ***"
sys.exit(0)
if line.find("### TASK FAILED ###") != -1:
print "!!! TASK FAILED !!!"
sys.exit(1)
if not line:
time.sleep(1)
file.seek(where)
else:
if line.find(" | "):
line = line.split(" | ")[-1]
print line, # already has newline
def print_object_help(self, object_type):
"""
Prints the subcommands for a given object, e.g. "cobbler distro --help"
"""
commands = OBJECT_ACTIONS[object_type]
commands.sort()
print "usage\n====="
for c in commands:
print "cobbler %s %s" % (object_type, c)
sys.exit(2)
def print_help(self):
"""
Prints general-top level help, e.g. "cobbler --help" or "cobbler" or "cobbler command-does-not-exist"
"""
print "usage\n====="
print "cobbler <distro|profile|system|repo|image|mgmtclass> ... "
print " [add|edit|copy|getks*|list|remove|rename|report] [options|--help]"
print "cobbler <%s> [options|--help]" % "|".join(DIRECT_ACTIONS)
sys.exit(2)
def main():
"""
CLI entry point
"""
cli = BootCLI()
cli.check_setup()
rc = cli.run(sys.argv)
if rc == True or rc is None:
sys.exit(0)
elif rc == False:
sys.exit(1)
return sys.exit(rc)
if __name__ == "__main__":
main()
| gpl-2.0 |
veger/ansible | lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py | 56 | 6294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, IBM Corp
# Author(s): Andreas Nafpliotis <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author:
- Andreas Nafpliotis (@nafpliot-ibm)
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed.
required: True
local_user_password:
description:
- The password to be set.
required: False
local_user_description:
description:
- Description for the user.
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add local user to ESXi
vmware_local_user_manager:
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
delegate_to: localhost
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalUserManager(PyVmomi):
def __init__(self, module):
super(VMwareLocalUserManager, self).__init__(module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
if self.is_vcenter():
self.module.fail_json(msg="Failed to get local account manager settings "
"from ESXi server: %s" % self.module.params['hostname'],
details="It seems that %s is a vCenter server instead of an "
"ESXi server" % self.module.params['hostname'])
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(type='str', no_log=True),
local_user_description=dict(type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
iruga090/python-social-auth | social/backends/mailru.py | 83 | 1693 | """
Mail.ru OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/mailru.html
"""
from hashlib import md5
from social.p3 import unquote
from social.backends.oauth import BaseOAuth2
class MailruOAuth2(BaseOAuth2):
"""Mail.ru authentication backend"""
name = 'mailru-oauth2'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://connect.mail.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'https://connect.mail.ru/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [('refresh_token', 'refresh_token'),
('expires_in', 'expires')]
def get_user_details(self, response):
"""Return user details from Mail.ru request"""
fullname, first_name, last_name = self.get_user_names(
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {'username': unquote(response['nick']),
'email': unquote(response['email']),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Mail.ru REST API"""
key, secret = self.get_key_and_secret()
data = {'method': 'users.getInfo',
'session_key': access_token,
'app_id': key,
'secure': '1'}
param_list = sorted(list(item + '=' + data[item] for item in data))
data['sig'] = md5(
(''.join(param_list) + secret).encode('utf-8')
).hexdigest()
return self.get_json('http://www.appsmail.ru/platform/api',
params=data)[0]
| bsd-3-clause |
janelia-pypi/kicad_netlist_reader | setup.py | 2 | 3679 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
from version import get_git_version
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='kicad_netlist_reader',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version=get_git_version(),
description='KiCad python module for interpreting generic netlists.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/janelia-pypi/kicad_netlist_reader',
# Author details
author='Jean-Pierre Charras',
author_email='[email protected]',
# Choose your license
license='GPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License (GPL)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='kicad netlist',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| gpl-2.0 |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/lib/sql_torment_testcase.py | 9 | 5153 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
from gppylib.commands.base import Command, CommandResult, REMOTE
from gppylib.commands.gp import GpLogFilter
import inspect, math, os, time
import logging, sys
from time import localtime, strftime
import tinctest
class PTormentSQL(Command):
"""This is a wrapper for running sql torment command."""
def __init__(self, sql_file = None, parallel=1, dbname = None, username = None, password = None,
PGOPTIONS = None, host = None, port = None, out_file = None, output_to_file=True):
if dbname == None:
dbname_option = "-d %s" % os.environ.get("PGDATABASE", os.environ["USER"])
else:
dbname_option = "-d %s" % (dbname)
if username == None:
username_option = ""
else:
username_option = "-U %s" % (username)
if PGOPTIONS == None:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if host == None:
hostname_option = ""
else:
hostname_option = "-h %s" % (host)
if port == None:
port_option = "-p %s" % os.environ.get("PGPORT", 5432)
else:
port_option = "-p %s" % (port)
assert os.path.exists(sql_file)
if out_file == None:
out_file = sql_file.replace('.sql', '.out')
if out_file[-2:] == '.t':
out_file = out_file[:-2]
cmd_str = '%s gptorment.pl -connect="%s %s %s %s" -parallel=%s -sqlfile %s' \
% (PGOPTIONS, dbname_option, username_option, hostname_option, port_option, parallel, sql_file)
if output_to_file:
cmd_str = "%s &> %s" % (cmd_str, out_file)
Command.__init__(self, 'run sql test', cmd_str)
@staticmethod
def run_sql_file(sql_file = None, parallel = 1, dbname = None, username = None, password = None,
PGOPTIONS = None, host = None, port = None, out_file = None, output_to_file=True):
cmd = PTormentSQL(sql_file, parallel, dbname, username, password, PGOPTIONS, host, port, out_file = out_file, output_to_file=output_to_file)
tinctest.logger.info("Running sql file - %s" %cmd)
cmd.run(validateAfter = False)
result = cmd.get_results()
tinctest.logger.info("Output - %s" %result)
if result.rc != 0:
return False
return True
@tinctest.skipLoading("Model class.Need not load.")
class SQLTormentTestCase(SQLTestCase):
"""
The SQLTormentTestCase is an extension of Tinc's SQLTestCase.
During setup is searched for a file named <testcasename>_torment_setup.sql.
If found, it executes it with gptorment.pl.
This test case model allows to ensure that multiple transaction are used
for setup. This is important for some append-only tests to ensure
that multiple segment files are used.
"""
torment_parallel = 2
def _run_setup_sql(self):
super(SQLTormentTestCase, self)._run_setup_sql()
test_case_setup_torment_sql_file = self.sql_file.replace('.sql', '_torment_setup.sql')
if os.path.exists(test_case_setup_torment_sql_file):
tinctest.logger.info("Running setup torment sql for test - %s" % test_case_setup_torment_sql_file)
self._run_torment_sql_file(test_case_setup_torment_sql_file)
def _run_torment_sql_file(self, sql_file):
result = True
self.test_artifacts.append(sql_file)
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql','.out'))
self.test_artifacts.append(out_file)
PTormentSQL.run_sql_file(sql_file, parallel=self.__class__.torment_parallel,
dbname = self.db_name, out_file = out_file)
return result
def get_ans_suffix(self):
return
def run_test(self):
sql_file = self.sql_file
ans_file = self.ans_file
def check_valid_suffix(suffix):
import re
if not re.match("[a-zA-Z0-9]+", suffix):
raise Exception("Invalid ans file suffix %s" % suffix)
# Modify the ans file based on the suffix
suffix = self.get_ans_suffix()
if suffix:
check_valid_suffix(suffix)
new_ans_file = ans_file[:-4] + "_" + suffix + ".ans"
if os.path.exists(new_ans_file):
self.ans_file = new_ans_file
return super(SQLTormentTestCase, self).run_test()
| apache-2.0 |
danielpalomino/gem5 | ext/ply/test/lex_optimize3.py | 164 | 1062 | # -----------------------------------------------------------------------------
# lex_optimize3.py
#
# Writes table in a subdirectory structure.
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex(optimize=1,lextab="lexdir.sub.calctab",outputdir="lexdir/sub")
lex.runmain(data="3+4")
| bsd-3-clause |
jay-tuckey/python-vmwaremirage | tests/test_main_functions.py | 1 | 4514 | from vmwaremirage import VmwareMirageClient
import config
import config_secure
import os
vm = VmwareMirageClient(server=config_secure.server,
username=config_secure.username,
password=os.environ['VMWARE_MIRAGE_PASSWORD'])
def test_reauth():
# Cofirm we are working
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
# Logout
vm.client.service.Logout()
# And try again. It should automatically re-authenticate
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
def test_get_cvds():
# Test the by id function
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
# Test getting two cvds by id
cvds = vm.get_cvds(by='ID', value=[config.cvd_1['id'],config.cvd_2['id']], query_type='EQUALS')
assert len(cvds) == 2
cvds = vm.get_cvds(by='DEVICE_ID', value=[config.cvd_1['deviceid'],config.cvd_2['deviceid']], query_type='EQUALS')
assert len(cvds) == 2
cvds = vm.get_cvds(by='POLICY_ID', value=config.cvd_1['policyid'], query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='NAME', value=config.cvd_1['name'])
assert len(cvds) == 1
cvds = vm.get_cvds(by='USER_NAME', value=config.cvd_1['username'], query_type='CONTAINS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='POLICY_NAME', value=config.cvd_1['policyname'], query_type='ENDS_WITH')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='CONNECTION_STATE', value=False, query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='CLIENT_STATUS', value='Idle', query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='PROGRESS', value=100, query_type='NOT_EQUALS')
assert len(cvds) >= 1
def test_get_collection_cvds():
cvds = vm.get_collection_cvds(config.collection['id'])
assert len(cvds) >= 1
def test_get_app_layers():
layers = vm.get_app_layers()
assert len(layers) >= 1
layer = vm.get_app_layers(by='ID', value=config.app_layer['id'], query_type='EQUALS')[0]
assert layer.Name == config.app_layer['name']
layers = vm.get_app_layers(by='NAME', value=config.app_layer['name'])
assert len(layers) >= 1
def test_get_base_layers():
layers = vm.get_base_layers()
assert len(layers) >= 1
layer = vm.get_base_layers(by='ID', value=config.base_layer['id'], query_type='EQUALS')[0]
assert layer.Name == config.base_layer['name']
layers = vm.get_base_layers(by='NAME', value=config.base_layer['name'])
assert len(layers) >= 1
def test_get_collections():
colls = vm.get_collections(by='ID', value=config.collection['id'], query_type='EQUALS')
assert len(colls) == 1
colls = vm.get_collections(by='NAME', value=config.collection['name'])
assert len(colls) >= 1
colls = vm.get_collections(by='DESCRIPTION', value=config.collection['description'], query_type='CONTAINS')
assert len(colls) >= 1
def test_get_pending_devices():
pends = vm.get_pending_devices(by='DEVICE_ID', value=config.pending['deviceid'], query_type='EQUALS')
assert len(pends) == 1
pends = vm.get_pending_devices(by='NAME', value=config.pending['name'])
assert len(pends) == 1
pends = vm.get_pending_devices(by='USER_NAME', value=config.pending['username'], query_type='CONTAINS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='CONNECTION_STATE', value=False, query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='MODEL_NAME', value=config.pending['model'], query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='VENDOR_NAME', value=config.pending['vendor'], query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='OS_VERSION', value=config.pending['os'], query_type='EQUALS')
assert len(pends) >= 1
def test_get_policies():
pols = vm.get_policies(by='ID', value=config.policy['id'], query_type='EQUALS')
assert len(pols) == 1
pols = vm.get_policies(by='NAME', value=config.policy['name'], query_type='EQUALS')
assert len(pols) == 1
def test_get_volumes():
vols = vm.get_volumes(by='ID', value=config.volume['id'], query_type='EQUALS')
assert len(vols) == 1
vols = vm.get_volumes(by='NAME', value=config.volume['name'], query_type='EQUALS')
assert len(vols) == 1
vols = vm.get_volumes(by='PATH', value=config.volume['path'], query_type='EQUALS')
| mit |
khinsen/pyh5md | examples/random_walk_1d.py | 2 | 1086 | # -*- coding: utf-8 -*-
# Copyright 2012-2013 Pierre de Buyl
# Copyright 2013 Felix Hoëfling
#
# This file is part of pyh5md
#
# pyh5md is free software and is licensed under the modified BSD license (see
# LICENSE file).
import numpy as np
import pyh5md
# Open a H5MD file
f = pyh5md.H5MD_File('walk_1d.h5', 'w', creator='pyh5md examples/jump_process.py', creator_version='0', author='Pierre de Buyl')
# Add a trajectory group
part = f.particles_group('particles')
part.box(dimension=1, boundary=['none'])
# Create the trajectory data
r = np.zeros((30,1), dtype=np.int32)
# Add the trajectory position data element in the trajectory group
part_pos = part.trajectory('position', r.shape, r.dtype)
# Create an observable
obs_com = f.observable('center_of_mass', (), np.float64)
# Run a simulation
step=0
time=0.
for i in range(800):
step+=1
time+=.1
r += -1 + 2*np.random.random_integers(0,1,r.shape)
# Append the current position data to the H5MD file.
part_pos.append(r, step, time)
obs_com.append(r[:,0].mean(), step, time)
# Close the file
f.close()
| bsd-3-clause |
doraemonext/wechat-platform | wechat_platform/system/keyword/tests.py | 1 | 4271 | # -*- coding: utf-8 -*-
from system.core.test import WechatTestCase
from system.official_account.models import OfficialAccount
from system.rule.models import Rule
from system.keyword.models import Keyword
class KeywordTest(WechatTestCase):
def test_add_keyword(self):
"""
测试添加关键字
"""
official_account = OfficialAccount.manager.add(level=OfficialAccount.LEVEL_3, name='name', email='[email protected]', original='original', wechat='wechat')
rule = Rule.manager.add(official_account=official_account, name='rule test', reply_pattern=Rule.REPLY_PATTERN_ALL)
keyword = Keyword.manager.add(rule, keyword='keyword')
self.assertEqual(keyword.rule, rule)
self.assertEqual(keyword.keyword, 'keyword')
self.assertEqual(keyword.status, True)
self.assertEqual(keyword.type, Keyword.TYPE_FULL)
def test_keyword_search(self):
"""
测试关键字搜索
"""
official_account = OfficialAccount.manager.add(level=OfficialAccount.LEVEL_3, name='name', email='[email protected]', original='original', wechat='wechat')
rule_1 = Rule.manager.add(official_account=official_account, name='rule 1', reply_pattern=Rule.REPLY_PATTERN_RANDOM)
rule_2 = Rule.manager.add(official_account=official_account, name='rule 2', reply_pattern=Rule.REPLY_PATTERN_ALL)
rule_3 = Rule.manager.add(official_account=official_account, name='rule 3', reply_pattern=Rule.REPLY_PATTERN_FORWARD, status=False)
rule_4 = Rule.manager.add(official_account=official_account, name='rule 4', reply_pattern=Rule.REPLY_PATTERN_REVERSE, top=True)
# 测试包含匹配
keyword_1 = Keyword.manager.add(rule=rule_1, keyword=u'你好', type=Keyword.TYPE_CONTAIN)
keyword_2 = Keyword.manager.add(rule=rule_1, keyword=u'你', type=Keyword.TYPE_CONTAIN)
keyword_3 = Keyword.manager.add(rule=rule_2, keyword=u'我们', type=Keyword.TYPE_CONTAIN)
keyword_4 = Keyword.manager.add(rule=rule_2, keyword=u'我们有', type=Keyword.TYPE_CONTAIN)
keyword_5 = Keyword.manager.add(rule=rule_3, keyword=u'deadline都快到了, 我竟然还在写测试 -_-# 真是少妇座的强迫症犯了……', status=False, type=Keyword.TYPE_CONTAIN)
keyword_6 = Keyword.manager.add(rule=rule_4, keyword=u'你', type=Keyword.TYPE_CONTAIN)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'你好嘛'), keyword_6)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'我们'), keyword_3)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'你好'), keyword_6)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'你'), keyword_6)
# 测试完全匹配
keyword_7 = Keyword.manager.add(rule=rule_1, keyword=u'完全匹配测试', type=Keyword.TYPE_FULL)
keyword_8 = Keyword.manager.add(rule=rule_2, keyword=u'完全', type=Keyword.TYPE_FULL)
keyword_9 = Keyword.manager.add(rule=rule_3, keyword=u'完全匹配', type=Keyword.TYPE_FULL)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'完全'), keyword_8)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'完全匹配测试'), keyword_7)
self.assertIsNone(Keyword.manager.search(official_account=official_account, keyword=u'完全匹配'))
# 测试正则表达式匹配
keyword_10 = Keyword.manager.add(rule=rule_1, keyword=u'^今天', type=Keyword.TYPE_REGEX)
keyword_11 = Keyword.manager.add(rule=rule_2, keyword=u'^[^@]+@[^@]+\.[^@]{2,}$', type=Keyword.TYPE_REGEX)
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'今天天气真好'), keyword_10)
self.assertIsNone(Keyword.manager.search(official_account=official_account, keyword=u'天气预报说今天天气真好'))
self.assertIsNone(Keyword.manager.search(official_account=official_account, keyword=u'doraemonext@xx'))
self.assertEqual(Keyword.manager.search(official_account=official_account, keyword=u'[email protected]'), keyword_11) | bsd-2-clause |
fidlej/searchrss | src/formatter.py | 1 | 1389 |
import urllib
import web
import time
from src import config
def install():
web.template.Template.globals["len"] = len
web.template.Template.globals["int"] = int
web.template.Template.globals["urlEncode"] = urlEncode
web.template.Template.globals["formatDay"] = formatDay
web.template.Template.globals["formatDatetime"] = formatDatetime
def page(pageName, title, extraHead=None):
""" Returns template
to render given page inside shell.
"""
def body(*args, **kwargs):
body = getattr(RENDER, pageName)(*args, **kwargs)
return RENDER.shell(body, title, extraHead)
return body
def urlEncode(value):
if isinstance(value, unicode):
value = value.encode("utf-8")
return urllib.quote_plus(value)
def formatDay(timestamp=None):
#TODO: what to do with the localtime?
# The appengine machines use GMT-07 (California).
if timestamp is None:
timestamp = time.time()
return time.strftime("%Y-%m-%d", time.localtime(timestamp))
def formatDatetime(utcDt):
return utcDt.strftime("%Y-%m-%dT%H:%M:%SZ")
install()
RENDER = web.template.render("templates/", cache=not config.DEBUG)
if hasattr(RENDER, "mod"):
# GAE_Render ignores later changes to globals.
# We need to define the "render" var directly.
RENDER.mod.render = RENDER
else:
web.template.Template.globals["render"] = RENDER
| gpl-3.0 |
mims2707/bite-project | deps/gdata-python-client/src/gdata/tlslite/Session.py | 359 | 4733 | """Class representing a TLS session."""
from utils.compat import *
from mathtls import *
from constants import *
class Session:
"""
This class represents a TLS session.
TLS distinguishes between connections and sessions. A new
handshake creates both a connection and a session. Data is
transmitted over the connection.
The session contains a more permanent record of the handshake. The
session can be inspected to determine handshake results. The
session can also be used to create a new connection through
"session resumption". If the client and server both support this,
they can create a new connection based on an old session without
the overhead of a full handshake.
The session for a L{tlslite.TLSConnection.TLSConnection} can be
retrieved from the connection's 'session' attribute.
@type srpUsername: str
@ivar srpUsername: The client's SRP username (or None).
@type sharedKeyUsername: str
@ivar sharedKeyUsername: The client's shared-key username (or
None).
@type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@ivar clientCertChain: The client's certificate chain (or None).
@type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@ivar serverCertChain: The server's certificate chain (or None).
"""
def __init__(self):
self.masterSecret = createByteArraySequence([])
self.sessionID = createByteArraySequence([])
self.cipherSuite = 0
self.srpUsername = None
self.sharedKeyUsername = None
self.clientCertChain = None
self.serverCertChain = None
self.resumable = False
self.sharedKey = False
def _clone(self):
other = Session()
other.masterSecret = self.masterSecret
other.sessionID = self.sessionID
other.cipherSuite = self.cipherSuite
other.srpUsername = self.srpUsername
other.sharedKeyUsername = self.sharedKeyUsername
other.clientCertChain = self.clientCertChain
other.serverCertChain = self.serverCertChain
other.resumable = self.resumable
other.sharedKey = self.sharedKey
return other
def _calcMasterSecret(self, version, premasterSecret, clientRandom,
serverRandom):
if version == (3,0):
self.masterSecret = PRF_SSL(premasterSecret,
concatArrays(clientRandom, serverRandom), 48)
elif version in ((3,1), (3,2)):
self.masterSecret = PRF(premasterSecret, "master secret",
concatArrays(clientRandom, serverRandom), 48)
else:
raise AssertionError()
def valid(self):
"""If this session can be used for session resumption.
@rtype: bool
@return: If this session can be used for session resumption.
"""
return self.resumable or self.sharedKey
def _setResumable(self, boolean):
#Only let it be set if this isn't a shared key
if not self.sharedKey:
#Only let it be set to True if the sessionID is non-null
if (not boolean) or (boolean and self.sessionID):
self.resumable = boolean
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if self.cipherSuite in CipherSuite.aes128Suites:
return "aes128"
elif self.cipherSuite in CipherSuite.aes256Suites:
return "aes256"
elif self.cipherSuite in CipherSuite.rc4Suites:
return "rc4"
elif self.cipherSuite in CipherSuite.tripleDESSuites:
return "3des"
else:
return None
def _createSharedKey(self, sharedKeyUsername, sharedKey):
if len(sharedKeyUsername)>16:
raise ValueError()
if len(sharedKey)>47:
raise ValueError()
self.sharedKeyUsername = sharedKeyUsername
self.sessionID = createByteArrayZeros(16)
for x in range(len(sharedKeyUsername)):
self.sessionID[x] = ord(sharedKeyUsername[x])
premasterSecret = createByteArrayZeros(48)
sharedKey = chr(len(sharedKey)) + sharedKey
for x in range(48):
premasterSecret[x] = ord(sharedKey[x % len(sharedKey)])
self.masterSecret = PRF(premasterSecret, "shared secret",
createByteArraySequence([]), 48)
self.sharedKey = True
return self
| apache-2.0 |
ismail-s/warehouse | warehouse/migrations/versions/5988e3e8d2e_add_primary_key_to_release_files.py | 7 | 1119 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add Primary Key to Release Files
Revision ID: 5988e3e8d2e
Revises: 128a0ead322
Create Date: 2015-03-08 21:08:16.285082
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = "5988e3e8d2e"
down_revision = "128a0ead322"
def upgrade():
op.add_column(
"release_files",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
)
def downgrade():
op.drop_column("release_files", "id")
| apache-2.0 |
ragupta-git/ImcSdk | imcsdk/mometa/error/Error.py | 1 | 3229 | """This module contains the general information for Error ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ErrorConsts:
pass
class Error(ManagedObject):
"""This is Error class."""
consts = ErrorConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("Error", "error", "", VersionMeta.Version151f, "OutputOnly", 0x1, [], [""], [], [], [None]),
"modular": MoMeta("Error", "error", "", VersionMeta.Version2013e, "OutputOnly", 0x1, [], [""], [], [], [None])
}
prop_meta = {
"classic": {
"cookie": MoPropertyMeta("cookie", "cookie", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_code": MoPropertyMeta("error_code", "errorCode", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_descr": MoPropertyMeta("error_descr", "errorDescr", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"invocation_result": MoPropertyMeta("invocation_result", "invocationResult", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"response": MoPropertyMeta("response", "response", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
},
"modular": {
"cookie": MoPropertyMeta("cookie", "cookie", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_code": MoPropertyMeta("error_code", "errorCode", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_descr": MoPropertyMeta("error_descr", "errorDescr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"invocation_result": MoPropertyMeta("invocation_result", "invocationResult", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"response": MoPropertyMeta("response", "response", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
},
}
prop_map = {
"classic": {
"cookie": "cookie",
"errorCode": "error_code",
"errorDescr": "error_descr",
"invocationResult": "invocation_result",
"response": "response",
},
"modular": {
"cookie": "cookie",
"errorCode": "error_code",
"errorDescr": "error_descr",
"invocationResult": "invocation_result",
"response": "response",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.cookie = None
self.error_code = None
self.error_descr = None
self.invocation_result = None
self.response = None
ManagedObject.__init__(self, "Error", parent_mo_or_dn, **kwargs)
| apache-2.0 |
dmitriy0611/django | tests/template_tests/templatetags/custom.py | 42 | 4823 | import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
| bsd-3-clause |
adobecs5/urp2015 | lib/python3.4/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| apache-2.0 |
MRigal/django | django/contrib/postgres/lookups.py | 199 | 1175 | from django.db.models import Lookup, Transform
class PostgresSimpleLookup(Lookup):
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s %s %s' % (lhs, self.operator, rhs), params
class FunctionTransform(Transform):
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return "%s(%s)" % (self.function, lhs), params
class DataContains(PostgresSimpleLookup):
lookup_name = 'contains'
operator = '@>'
class ContainedBy(PostgresSimpleLookup):
lookup_name = 'contained_by'
operator = '<@'
class Overlap(PostgresSimpleLookup):
lookup_name = 'overlap'
operator = '&&'
class HasKey(PostgresSimpleLookup):
lookup_name = 'has_key'
operator = '?'
class HasKeys(PostgresSimpleLookup):
lookup_name = 'has_keys'
operator = '?&'
class HasAnyKeys(PostgresSimpleLookup):
lookup_name = 'has_any_keys'
operator = '?|'
class Unaccent(FunctionTransform):
bilateral = True
lookup_name = 'unaccent'
function = 'UNACCENT'
| bsd-3-clause |
gaumire/heroku-buildpack-python | vendor/pip-pop/pip/pep425tags.py | 249 | 4427 | """Generate and work with PEP 425 Compatibility Tags."""
from __future__ import absolute_import
import re
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
return ''.join(map(str, sys.version_info[:2]))
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-')[1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
if sys.platform == 'darwin':
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
actual_arches = [actual_arch]
if actual_arch in ('i386', 'ppc'):
actual_arches.append('fat')
if actual_arch in ('i386', 'x86_64'):
actual_arches.append('intel')
if actual_arch in ('i386', 'ppc', 'x86_64'):
actual_arches.append('fat3')
if actual_arch in ('ppc64', 'x86_64'):
actual_arches.append('fat64')
if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
actual_arches.append('universal')
tpl = '{0}_{1}_%i_%s'.format(name, major)
arches = []
for m in range(int(minor) + 1):
for a in actual_arches:
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# Has binaries, does not use the Python API:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
| mit |
40223209/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/compat.py | 603 | 3054 | """Python 2.x/3.x compatibility tools"""
import sys
__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_',
'unicode_', 'raw_input_', 'as_bytes', 'as_unicode']
def geterror ():
return sys.exc_info()[1]
try:
long_ = long
except NameError:
long_ = int
try:
xrange_ = xrange
except NameError:
xrange_ = range
def get_BytesIO():
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
return BytesIO
def get_StringIO():
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
return StringIO
def ord_(o):
try:
return ord(o)
except TypeError:
return o
try:
unichr_ = unichr
except NameError:
unichr_ = chr
try:
unicode_ = unicode
except NameError:
unicode_ = str
try:
bytes_ = bytes
except NameError:
bytes_ = str
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
if sys.platform == 'win32':
filesystem_errors = "replace"
elif sys.version_info >= (3, 0, 0):
filesystem_errors = "surrogateescape"
else:
filesystem_errors = "strict"
def filesystem_encode(u):
fsencoding = sys.getfilesystemencoding()
if (fsencoding.lower() == 'ascii') and sys.platform.startswith('linux'):
# Don't believe Linux systems claiming ASCII-only filesystems. In
# practice, arbitrary bytes are allowed, and most things expect UTF-8.
fsencoding = 'utf-8'
return u.encode(fsencoding, filesystem_errors)
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
try:
unicode
def as_bytes(string):
""" '<binary literal>' => '<binary literal>' """
return string
def as_unicode(rstring):
""" r'<Unicode literal>' => u'<Unicode literal>' """
return rstring.decode('unicode_escape', 'strict')
except NameError:
def as_bytes(string):
""" '<binary literal>' => b'<binary literal>' """
return string.encode('latin-1', 'strict')
def as_unicode(rstring):
""" r'<Unicode literal>' => '<Unicode literal>' """
return rstring.encode('ascii', 'strict').decode('unicode_escape',
'stict')
# Include a next compatible function for Python versions < 2.6
try:
next_ = next
except NameError:
def next_(i, *args):
try:
return i.next()
except StopIteration:
if args:
return args[0]
raise
# itertools.imap is missing in 3.x
try:
import itertools.imap as imap_
except ImportError:
imap_ = map
| gpl-3.0 |
OpenHumans/open-humans | private_sharing/api_authentication.py | 2 | 3436 | import arrow
from django.contrib.auth import get_user_model
from oauth2_provider.models import AccessToken
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from .models import DataRequestProject, OAuth2DataRequestProject
UserModel = get_user_model()
class MasterTokenAuthentication(BaseAuthentication):
"""
Master token based authentication.
"""
def authenticate(self, request):
request.oauth2_error = getattr(request, "oauth2_error", {})
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b"bearer":
return None
if len(auth) == 1:
msg = "Invalid token header. No credentials provided."
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = "Invalid token header. " "Token string should not contain spaces."
raise exceptions.AuthenticationFailed(msg)
try:
token = auth[1].decode()
except UnicodeError:
msg = (
"Invalid token header. "
"Token string should not contain invalid characters."
)
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(token)
@staticmethod
def authenticate_credentials(key):
try:
project = DataRequestProject.objects.get(master_access_token=key)
if (
not project.token_expiration_disabled
and project.token_expiration_date < arrow.utcnow().datetime
):
raise exceptions.AuthenticationFailed("Expired token.")
user = project.coordinator.user
except DataRequestProject.DoesNotExist:
project = None
user = None
if not project or not user:
raise exceptions.AuthenticationFailed("Invalid token.")
return (user, project)
def authenticate_header(self, request):
return 'Bearer realm="api"'
class CustomOAuth2Authentication(OAuth2Authentication):
"""
Custom OAuth2 auth based on `django-oauth-toolkit` version.
(1) this raises a better error for expired tokens
(2) this modifies the return of authenticate() to replace returned
(user, token) with (user, project), matching the behavior of
ProjectTokenAuthentication.
"""
def authenticate(self, request):
"""
Raises an exception for an expired token, or returns two-tuple of
(user, project) if authentication succeeds, or None otherwise.
"""
request.oauth2_error = getattr(request, "oauth2_error", {})
access_token = None
try:
auth = get_authorization_header(request).split()
token = auth[1].decode()
access_token = AccessToken.objects.get(token=token)
except Exception:
pass
if access_token and access_token.is_expired():
raise exceptions.AuthenticationFailed("Expired token.")
auth = super(CustomOAuth2Authentication, self).authenticate(request)
if auth:
project = OAuth2DataRequestProject.objects.get(
application=auth[1].application
)
return (auth[0], project)
return auth
| mit |
twotreeszf/AriaThunder | xunlei-lixian/lixian_cli.py | 14 | 1801 | #!/usr/bin/env python
from lixian_commands.util import *
import lixian_help
import sys
from lixian_commands.login import login
from lixian_commands.logout import logout
from lixian_commands.download import download_task
from lixian_commands.list import list_task
from lixian_commands.add import add_task
from lixian_commands.delete import delete_task
from lixian_commands.pause import pause_task
from lixian_commands.restart import restart_task
from lixian_commands.rename import rename_task
from lixian_commands.readd import readd_task
from lixian_commands.info import lixian_info
from lixian_commands.config import lx_config
from lixian_commands.help import lx_help
def execute_command(args=sys.argv[1:]):
import lixian_plugins # load plugins at import
if not args:
usage()
sys.exit(1)
command = args[0]
if command.startswith('-'):
if command in ('-h', '--help'):
usage(lixian_help.welcome_help)
elif command in ('-v', '--version'):
print '0.0.x'
else:
usage()
sys.exit(1)
sys.exit(0)
import lixian_alias
command = lixian_alias.to_alias(command)
commands = {'login': login,
'logout': logout,
'download': download_task,
'list': list_task,
'add': add_task,
'delete': delete_task,
'pause': pause_task,
'restart': restart_task,
'rename': rename_task,
'readd': readd_task,
'info': lixian_info,
'config': lx_config,
'help': lx_help}
import lixian_plugins.commands
commands.update(lixian_plugins.commands.commands)
if command not in commands:
usage()
sys.exit(1)
if '-h' in args or '--help' in args:
lx_help([command])
else:
commands[command](args[1:])
if __name__ == '__main__':
execute_command()
| bsd-3-clause |
csm0042/guibyini | guibyini/_main.py | 1 | 2739 | __author__ = 'chris.maue'
#######################################################################################################################
# Import required libraries
#######################################################################################################################
import logging
import os
import gui_builder
#######################################################################################################################
# Determine project path and auto-set debug log file and gui configuration file names as appropriate
#######################################################################################################################
projectPath = os.path.split(__file__)
debugLogFile = os.path.normcase(os.path.join(projectPath[0], 'debug.log'))
guiIniFile = os.path.normcase(os.path.join(projectPath[0], 'gui_setup.ini'))
#######################################################################################################################
# Start program logger / debugger
#######################################################################################################################
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
filename=debugLogFile,
filemode='w')
logging.info('[Main] Program Logger Started')
logging.info('[Main] Logging to file: %s' % debugLogFile)
logging.info('[Main] Using GUI configuration file: %s' % guiIniFile)
#######################################################################################################################
# Define Data types
#######################################################################################################################
class application_IO(object):
def __init__(self):
self.input = [bool() for i in range(32)]
self.output = [bool() for i in range(32)]
#######################################################################################################################
# Define Data tags used for interlocking between application window and IO monitor threads
#######################################################################################################################
IoTable = application_IO()
IoTableCache = application_IO()
IoTableOS = application_IO()
#######################################################################################################################
# Start application window (runs in main thread)
#######################################################################################################################
gui_object = gui_builder.gui(guiIniFile, debugLogFile, IoTable)
gui_object.create_window() | gpl-2.0 |
Yen-Chung-En/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/etree/__init__.py | 1200 | 1604 | # $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
| gpl-3.0 |
sysadmind/ansible-modules-extras | cloud/amazon/dynamodb_table.py | 48 | 14484 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: dynamodb_table
short_description: Create, update or delete AWS Dynamo DB tables.
version_added: "2.0"
description:
- Create or delete AWS Dynamo DB tables.
- Can update the provisioned throughput on existing tables.
- Returns the status of the specified table.
author: Alan Loi (@loia)
requirements:
- "boto >= 2.37.0"
options:
state:
description:
- Create or delete the table
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the table.
required: true
hash_key_name:
description:
- Name of the hash key.
- Required when C(state=present).
required: false
default: null
hash_key_type:
description:
- Type of the hash key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
range_key_name:
description:
- Name of the range key.
required: false
default: null
range_key_type:
description:
- Type of the range key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
read_capacity:
description:
- Read throughput capacity (units) to provision.
required: false
default: 1
write_capacity:
description:
- Write throughput capacity (units) to provision.
required: false
default: 1
indexes:
description:
- list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
- "required options: ['name', 'type', 'hash_key_name']"
- "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']"
- "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
required: false
default: []
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Create dynamo table with hash and range primary key
- dynamodb_table:
name: my-table
region: us-east-1
hash_key_name: id
hash_key_type: STRING
range_key_name: create_time
range_key_type: NUMBER
read_capacity: 2
write_capacity: 2
# Update capacity on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
read_capacity: 10
write_capacity: 10
# set index on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
indexes:
- name: NamedIndex
type: global_include
hash_key_name: id
range_key_name: create_time
includes:
- other_field
- other_field2
read_capacity: 10
write_capacity: 10
# Delete dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
state: absent
'''
RETURN = '''
table_status:
description: The current status of the table.
returned: success
type: string
sample: ACTIVE
'''
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
from boto.dynamodb2.types import STRING, NUMBER, BINARY
from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
from boto.dynamodb2.exceptions import ValidationException
HAS_BOTO = True
DYNAMO_TYPE_MAP = {
'STRING': STRING,
'NUMBER': NUMBER,
'BINARY': BINARY
}
except ImportError:
HAS_BOTO = False
DYNAMO_TYPE_DEFAULT = 'STRING'
INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
def create_or_update_dynamo_table(connection, module):
table_name = module.params.get('name')
hash_key_name = module.params.get('hash_key_name')
hash_key_type = module.params.get('hash_key_type')
range_key_name = module.params.get('range_key_name')
range_key_type = module.params.get('range_key_type')
read_capacity = module.params.get('read_capacity')
write_capacity = module.params.get('write_capacity')
all_indexes = module.params.get('indexes')
for index in all_indexes:
validate_index(index, module)
schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
throughput = {
'read': read_capacity,
'write': write_capacity
}
indexes, global_indexes = get_indexes(all_indexes)
result = dict(
region=module.params.get('region'),
table_name=table_name,
hash_key_name=hash_key_name,
hash_key_type=hash_key_type,
range_key_name=range_key_name,
range_key_type=range_key_type,
read_capacity=read_capacity,
write_capacity=write_capacity,
indexes=all_indexes,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
else:
if not module.check_mode:
Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
result['changed'] = True
if not module.check_mode:
result['table_status'] = table.describe()['Table']['TableStatus']
except BotoServerError:
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def delete_dynamo_table(connection, module):
table_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
table_name=table_name,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
if not module.check_mode:
table.delete()
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def dynamo_table_exists(table):
try:
table.describe()
return True
except JSONResponseError, e:
if e.message and e.message.startswith('Requested resource not found'):
return False
else:
raise e
def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
table.describe() # populate table details
throughput_changed = False
global_indexes_changed = False
if has_throughput_changed(table, throughput):
if not check_mode:
throughput_changed = table.update(throughput=throughput)
else:
throughput_changed = True
removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
if removed_indexes:
if not check_mode:
for name, index in removed_indexes.iteritems():
global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
else:
global_indexes_changed = True
if added_indexes:
if not check_mode:
for name, index in added_indexes.iteritems():
global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
else:
global_indexes_changed = True
if index_throughput_changes:
if not check_mode:
# todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
try:
global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
except ValidationException as e:
pass
else:
global_indexes_changed = True
return throughput_changed or global_indexes_changed
def has_throughput_changed(table, new_throughput):
if not new_throughput:
return False
return new_throughput['read'] != table.throughput['read'] or \
new_throughput['write'] != table.throughput['write']
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
if range_key_name:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
else:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
return schema
def get_changed_global_indexes(table, global_indexes):
table.describe()
table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
table_index_objects = dict((index.name, index) for index in table.global_indexes)
set_index_info = dict((index.name, index.schema()) for index in global_indexes)
set_index_objects = dict((index.name, index) for index in global_indexes)
removed_indexes = dict((name, index) for name, index in table_index_info.iteritems() if name not in set_index_info)
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.iteritems() if name not in table_index_info)
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
# index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes)
return removed_indexes, added_indexes, index_throughput_changes
def validate_index(index, module):
for key, val in index.iteritems():
if key not in INDEX_OPTIONS:
module.fail_json(msg='%s is not a valid option for an index' % key)
for required_option in INDEX_REQUIRED_OPTIONS:
if required_option not in index:
module.fail_json(msg='%s is a required option for an index' % required_option)
if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
def get_indexes(all_indexes):
indexes = []
global_indexes = []
for index in all_indexes:
name = index['name']
schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
throughput = {
'read': index.get('read_capacity', 1),
'write': index.get('write_capacity', 1)
}
if index['type'] == 'all':
indexes.append(AllIndex(name, parts=schema))
elif index['type'] == 'global_all':
global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'global_include':
global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
elif index['type'] == 'global_keys_only':
global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'include':
indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
elif index['type'] == 'keys_only':
indexes.append(KeysOnlyIndex(name, parts=schema))
return indexes, global_indexes
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
hash_key_name=dict(required=True, type='str'),
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
range_key_name=dict(type='str'),
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
indexes=dict(default=[], type='list'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_dynamo_table(connection, module)
elif state == 'absent':
delete_dynamo_table(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cchurch/ansible | lib/ansible/modules/network/fortios/fortios_report_theme.py | 21 | 15174 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_report_theme
short_description: Report themes configuratio in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify report feature and theme category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
report_theme:
description:
- Report themes configuration
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
bullet-list-style:
description:
- Bullet list style.
column-count:
description:
- Report page column count.
choices:
- 1
- 2
- 3
default-html-style:
description:
- Default HTML report style.
default-pdf-style:
description:
- Default PDF report style.
graph-chart-style:
description:
- Graph chart style.
heading1-style:
description:
- Report heading style.
heading2-style:
description:
- Report heading style.
heading3-style:
description:
- Report heading style.
heading4-style:
description:
- Report heading style.
hline-style:
description:
- Horizontal line style.
image-style:
description:
- Image style.
name:
description:
- Report theme name.
required: true
normal-text-style:
description:
- Normal text style.
numbered-list-style:
description:
- Numbered list style.
page-footer-style:
description:
- Report page footer style.
page-header-style:
description:
- Report page header style.
page-orient:
description:
- Report page orientation.
choices:
- portrait
- landscape
page-style:
description:
- Report page style.
report-subtitle-style:
description:
- Report subtitle style.
report-title-style:
description:
- Report title style.
table-chart-caption-style:
description:
- Table chart caption style.
table-chart-even-row-style:
description:
- Table chart even row style.
table-chart-head-style:
description:
- Table chart head row style.
table-chart-odd-row-style:
description:
- Table chart odd row style.
table-chart-style:
description:
- Table chart style.
toc-heading1-style:
description:
- Table of contents heading style.
toc-heading2-style:
description:
- Table of contents heading style.
toc-heading3-style:
description:
- Table of contents heading style.
toc-heading4-style:
description:
- Table of contents heading style.
toc-title-style:
description:
- Table of contents title style.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Report themes configuration
fortios_report_theme:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
report_theme:
state: "present"
bullet-list-style: "<your_own_value>"
column-count: "1"
default-html-style: "<your_own_value>"
default-pdf-style: "<your_own_value>"
graph-chart-style: "<your_own_value>"
heading1-style: "<your_own_value>"
heading2-style: "<your_own_value>"
heading3-style: "<your_own_value>"
heading4-style: "<your_own_value>"
hline-style: "<your_own_value>"
image-style: "<your_own_value>"
name: "default_name_14"
normal-text-style: "<your_own_value>"
numbered-list-style: "<your_own_value>"
page-footer-style: "<your_own_value>"
page-header-style: "<your_own_value>"
page-orient: "portrait"
page-style: "<your_own_value>"
report-subtitle-style: "<your_own_value>"
report-title-style: "<your_own_value>"
table-chart-caption-style: "<your_own_value>"
table-chart-even-row-style: "<your_own_value>"
table-chart-head-style: "<your_own_value>"
table-chart-odd-row-style: "<your_own_value>"
table-chart-style: "<your_own_value>"
toc-heading1-style: "<your_own_value>"
toc-heading2-style: "<your_own_value>"
toc-heading3-style: "<your_own_value>"
toc-heading4-style: "<your_own_value>"
toc-title-style: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_report_theme_data(json):
option_list = ['bullet-list-style', 'column-count', 'default-html-style',
'default-pdf-style', 'graph-chart-style', 'heading1-style',
'heading2-style', 'heading3-style', 'heading4-style',
'hline-style', 'image-style', 'name',
'normal-text-style', 'numbered-list-style', 'page-footer-style',
'page-header-style', 'page-orient', 'page-style',
'report-subtitle-style', 'report-title-style', 'table-chart-caption-style',
'table-chart-even-row-style', 'table-chart-head-style', 'table-chart-odd-row-style',
'table-chart-style', 'toc-heading1-style', 'toc-heading2-style',
'toc-heading3-style', 'toc-heading4-style', 'toc-title-style']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def report_theme(data, fos):
vdom = data['vdom']
report_theme_data = data['report_theme']
flattened_data = flatten_multilists_attributes(report_theme_data)
filtered_data = filter_report_theme_data(flattened_data)
if report_theme_data['state'] == "present":
return fos.set('report',
'theme',
data=filtered_data,
vdom=vdom)
elif report_theme_data['state'] == "absent":
return fos.delete('report',
'theme',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_report(data, fos):
login(data)
if data['report_theme']:
resp = report_theme(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"report_theme": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"bullet-list-style": {"required": False, "type": "str"},
"column-count": {"required": False, "type": "str",
"choices": ["1", "2", "3"]},
"default-html-style": {"required": False, "type": "str"},
"default-pdf-style": {"required": False, "type": "str"},
"graph-chart-style": {"required": False, "type": "str"},
"heading1-style": {"required": False, "type": "str"},
"heading2-style": {"required": False, "type": "str"},
"heading3-style": {"required": False, "type": "str"},
"heading4-style": {"required": False, "type": "str"},
"hline-style": {"required": False, "type": "str"},
"image-style": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"normal-text-style": {"required": False, "type": "str"},
"numbered-list-style": {"required": False, "type": "str"},
"page-footer-style": {"required": False, "type": "str"},
"page-header-style": {"required": False, "type": "str"},
"page-orient": {"required": False, "type": "str",
"choices": ["portrait", "landscape"]},
"page-style": {"required": False, "type": "str"},
"report-subtitle-style": {"required": False, "type": "str"},
"report-title-style": {"required": False, "type": "str"},
"table-chart-caption-style": {"required": False, "type": "str"},
"table-chart-even-row-style": {"required": False, "type": "str"},
"table-chart-head-style": {"required": False, "type": "str"},
"table-chart-odd-row-style": {"required": False, "type": "str"},
"table-chart-style": {"required": False, "type": "str"},
"toc-heading1-style": {"required": False, "type": "str"},
"toc-heading2-style": {"required": False, "type": "str"},
"toc-heading3-style": {"required": False, "type": "str"},
"toc-heading4-style": {"required": False, "type": "str"},
"toc-title-style": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_report(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.