code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color)
################################################################################
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main()
| ActiveState/code | recipes/Python/511434_Paint_10/recipe-511434.py | Python | mit | 3,174 |
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @[email protected]. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""#!/usr/bin/env python3
import _thread
import os
import sys
import time
import gc
from src import googlequotemachine
from src import yahooquotemachine
from src import bloombergquotemachine
from src import createtablesgooglefinance
from src import createtablesyahoofinance
from src import createtablesbloomberg
from time import localtime, strftime
start1 = []
sys.setrecursionlimit(1000000)
database = "data/"
markettime = {}
with open("conf/MarketTimings.conf") as fillees:
mlist = fillees.read().splitlines()
fillees.close()
for line in mlist:
items = line.split(", ")
key, values = items[0], items[1]
markettime[key] = values
with open('conf/symbolname.conf') as fille:
synamelist = fille.read().splitlines()
fille.close()
timetorun = 1800
cycle = 1
while("TRUE"):
with open('conf/urls.conf') as openedfile:
fileaslist = openedfile.read().splitlines()
openedfile.close()
a_lock = _thread.allocate_lock()
thr = []
with a_lock:
print("locks placed and Market engine is running for the...", cycle)
for lines in fileaslist:
lisj = lines.split('", "')
mtime = markettime[lisj[2].replace('"','')]
mktime = mtime.split("-")
if mktime[1] < mktime[0]:
righto = mktime[1].split(":")
close = str(str(int(righto[0])+24)+":"+righto[1])
else:
close = mktime[1]
rightnow = strftime("%H:%M", localtime())
if rightnow < strftime("04:00"):
right = rightnow.split(":")
rightnow = str(str(int(right[0])+24)+":"+right[1])
if (close > rightnow > mktime[0]):
print("Market ", lisj[2].replace('.db"',''), " is starting at cycle ", cycle)
if lisj[1] =='g':
thr.append(_thread.start_new_thread(googlequotemachine.actionking, (a_lock, start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
elif lisj[1] =='y':
thr.append(_thread.start_new_thread(yahooquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
else:
thr.append(_thread.start_new_thread(bloombergquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,) ))
time.sleep(0.00001)
print("locks placed and Market engine is running for the....", cycle, " time...with threads", thr )
time.sleep(timetorun)
gc.collect()
print("locks released and Market engine is restarting for the...", cycle, " time...")
cycle = cycle + 1
/*email to provide support at [email protected], [email protected], For donations please write to [email protected]*/ | VanceKingSaxbeA/MarketsEngine | engine.py | Python | mit | 3,899 |
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from textwrap import dedent
import pytest
import pytablewriter
from ...._common import print_test_result
from ....data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_iter,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
from .._common import regexp_ansi_escape, strip_ansi_escape
normal_test_data_list = [
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
expected=dedent(
"""\
.. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 3, 4, 3
"""
),
),
Data(
table=None,
indent=0,
header=None,
value=value_matrix,
expected=dedent(
"""\
.. csv-table::
:widths: 1, 5, 5, 3, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=1,
header=headers,
value=value_matrix,
expected=""" .. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
""",
),
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix_with_none,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 5, 4, 6
1, , "a", 1.0,
, 2.2, , 2.2, 2.2
3, 3.3, "ccc", , "cccc"
, , , ,
"""
),
),
Data(
table="table name",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "i", "f", "c", "if", "ifc", "bool", "inf", "nan", "mix_num", "time"
:widths: 3, 4, 6, 4, 5, 6, 8, 5, 9, 27
1, 1.10, "aa", 1.0, 1, True, Infinity, NaN, 1, 2017-01-01T00:00:00
2, 2.20, "bbb", 2.2, 2.2, False, Infinity, NaN, Infinity, "2017-01-02 03:04:05+09:00"
3, 3.33, "cccc", -3.0, "ccc", True, Infinity, NaN, NaN, 2017-01-01T00:00:00
"""
),
),
]
table_writer_class = pytablewriter.RstCsvTableWriter
class Test_RstCsvTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_RstCsvTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_styles(self):
writer = table_writer_class()
writer.from_tabledata(vut_style_tabledata)
writer.column_styles = vut_styles
expected = dedent(
"""\
.. csv-table:: style test
:header: "none", "empty", "tiny", "small", "medium", "large", "null w/ bold", "L bold", "S italic", "L bold italic"
:widths: 6, 7, 6, 7, 8, 7, 14, 8, 10, 15
111, 111, 111, 111, "111", 111, , **111**, *111*, **111**
1234, 1234, 1234, 1234, "1,234", 1 234, , **1234**, *1234*, **1234**
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
assert str(writer) == ""
class Test_RstCsvTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
.. csv-table:: tablename
:header: "ha", "hb", "hc"
:widths: 5, 5, 5
1, 2, 3
11, 12, 13
1, 2, 3
11, 12, 13
101, 102, 103
1001, 1002, 1003
"""
),
]
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in null_test_data_list],
)
def test_normal_smoke(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table_iter()
| thombashi/pytablewriter | test/writer/text/rst/test_rst_csv_writer.py | Python | mit | 6,875 |
#!/usr/bin/env python
"""
demos reading HiST camera parameters from XML file
"""
from histutils.hstxmlparse import xmlparam
from argparse import ArgumentParser
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("fn", help="xml filename to parse")
p = p.parse_args()
params = xmlparam(p.fn)
print(params)
| scienceopen/histutils | XMLparamPrint.py | Python | mit | 340 |
#!/usr/bin/env python
# minimal.py --- Minimal example of using traits.
from traits.api import HasTraits, Float
class Person(HasTraits):
weight = Float(150.0)
| marshallmcdonnell/interactive_plotting | Traits/manual/minimal.py | Python | mit | 167 |
import struct
import uuid
from enum import IntEnum
from typing import List, Optional, Set
from .sid import SID
class ACEFlag(IntEnum):
""" ACE type-specific control flags. """
OBJECT_INHERIT = 0x01
CONTAINER_INHERIT = 0x02
NO_PROPAGATE_INHERIT = 0x04
INHERIT_ONLY = 0x08
INHERITED = 0x10
SUCCESSFUL_ACCESS = 0x40
FAILED_ACCESS = 0x80
@property
def short_name(self) -> str:
""" The SDDL short name of the flag. """
short_names = {
"OBJECT_INHERIT": "OI",
"CONTAINER_INHERIT": "CI",
"NO_PROPAGATE_INHERIT": "NP",
"INHERIT_ONLY": "IO",
"INHERITED": "ID",
"SUCCESSFUL_ACCESS": "SA",
"FAILED_ACCESS": "FA",
}
return short_names[self.name]
class ACEType(IntEnum):
""" Type of the ACE. """
ACCESS_ALLOWED = 0
ACCESS_DENIED = 1
SYSTEM_AUDIT = 2
SYSTEM_ALARM = 3
ACCESS_ALLOWED_COMPOUND = 4
ACCESS_ALLOWED_OBJECT = 5
ACCESS_DENIED_OBJECT = 6
SYSTEM_AUDIT_OBJECT = 7
SYSTEM_ALARM_OBJECT = 8
ACCESS_ALLOWED_CALLBACK = 9
ACCESS_DENIED_CALLBACK = 10
ACCESS_ALLOWED_CALLBACK_OBJECT = 11
ACCESS_DENIED_CALLBACK_OBJECT = 12
SYSTEM_AUDIT_CALLBACK = 13
SYSTEM_ALARM_CALLBACK = 14
SYSTEM_AUDIT_CALLBACK_OBJECT = 15
SYSTEM_ALARM_CALLBACK_OBJECT = 16
SYSTEM_MANDATORY_LABEL = 17
SYSTEM_RESOURCE_ATTRIBUTE = 18
SYSTEM_SCOPED_POLICY_ID = 19
@property
def short_name(self) -> str:
""" The SDDL short name of the type. """
short_names = {
"ACCESS_ALLOWED": "A",
"ACCESS_DENIED": "D",
"SYSTEM_AUDIT": "AU",
"SYSTEM_ALARM": "AL",
"ACCESS_ALLOWED_COMPOUND": "",
"ACCESS_ALLOWED_OBJECT": "OA",
"ACCESS_DENIED_OBJECT": "OD",
"SYSTEM_AUDIT_OBJECT": "OU",
"SYSTEM_ALARM_OBJECT": "OL",
"ACCESS_ALLOWED_CALLBACK": "XA",
"ACCESS_DENIED_CALLBACK": "XD",
"ACCESS_ALLOWED_CALLBACK_OBJECT": "ZA",
"ACCESS_DENIED_CALLBACK_OBJECT": "ZD",
"SYSTEM_AUDIT_CALLBACK": "XU",
"SYSTEM_ALARM_CALLBACK": "XL",
"SYSTEM_AUDIT_CALLBACK_OBJECT": "ZU",
"SYSTEM_ALARM_CALLBACK_OBJECT": "ZL",
"SYSTEM_MANDATORY_LABEL": "ML",
"SYSTEM_RESOURCE_ATTRIBUTE": "RA",
"SYSTEM_SCOPED_POLICY_ID": "SP",
}
return short_names[self.name]
@property
def is_object_type(self) -> bool:
""" Flag for ACE types with objects. """
return self in (
ACEType.ACCESS_ALLOWED_OBJECT,
ACEType.ACCESS_DENIED_OBJECT,
ACEType.SYSTEM_AUDIT_OBJECT,
ACEType.SYSTEM_ALARM_OBJECT,
ACEType.ACCESS_ALLOWED_CALLBACK_OBJECT,
ACEType.ACCESS_DENIED_CALLBACK_OBJECT,
ACEType.SYSTEM_AUDIT_CALLBACK_OBJECT,
ACEType.SYSTEM_ALARM_CALLBACK_OBJECT,
)
class ACERight(IntEnum):
""" The rights of the ACE. """
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x4000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
MAXIMUM_ALLOWED = 0x02000000
ACCESS_SYSTEM_SECURITY = 0x01000000
SYNCHRONIZE = 0x00100000
WRITE_OWNER = 0x00080000
WRITE_DACL = 0x00040000
READ_CONTROL = 0x00020000
DELETE = 0x00010000
DS_CONTROL_ACCESS = 0x00000100
DS_CREATE_CHILD = 0x00000001
DS_DELETE_CHILD = 0x00000002
ACTRL_DS_LIST = 0x00000004
DS_SELF = 0x00000008
DS_READ_PROP = 0x00000010
DS_WRITE_PROP = 0x00000020
DS_DELETE_TREE = 0x00000040
DS_LIST_OBJECT = 0x00000080
@property
def short_name(self) -> str:
""" The SDDL short name of the access right. """
short_names = {
"GENERIC_READ": "GR",
"GENERIC_WRITE": "GW",
"GENERIC_EXECUTE": "GX",
"GENERIC_ALL": "GA",
"MAXIMUM_ALLOWED": "MA",
"ACCESS_SYSTEM_SECURITY": "AS",
"SYNCHRONIZE": "SY",
"WRITE_OWNER": "WO",
"WRITE_DACL": "WD",
"READ_CONTROL": "RC",
"DELETE": "SD",
"DS_CONTROL_ACCESS": "CR",
"DS_CREATE_CHILD": "CC",
"DS_DELETE_CHILD": "DC",
"ACTRL_DS_LIST": "LC",
"DS_SELF": "SW",
"DS_READ_PROP": "RP",
"DS_WRITE_PROP": "WP",
"DS_DELETE_TREE": "DT",
"DS_LIST_OBJECT": "LO",
}
return short_names[self.name]
class ACLRevision(IntEnum):
""" The ACL revision. """
ACL_REVISION = 0x02
ACL_REVISION_DS = 0x04
class ACE:
"""
A class for the access control entry, that encodes the user rights
afforded to a principal.
:param ACEType ace_type: the type of the ACE.
:param Set[ACEFlag] flags: the set of flags for the ACE.
:param int mask: the access mask to encode the user rights as an int.
:param SID trustee_sid: the SID of the trustee.
:param uuid.UUID|None object_type: a UUID that identifies a property
set, property, extended right, or type of child object.
:param uuid.UUID|None inherited_object_type: a UUID that identifies the
type of child object that can inherit the ACE.
:param bytes application_data: optional application data.
"""
def __init__(
self,
ace_type: ACEType,
flags: Set[ACEFlag],
mask: int,
trustee_sid: SID,
object_type: Optional[uuid.UUID],
inherited_object_type: Optional[uuid.UUID],
application_data: bytes,
) -> None:
self.__type = ace_type
self.__flags = flags
self.__mask = mask
self.__object_type = object_type
self.__inherited_object_type = inherited_object_type
self.__trustee_sid = trustee_sid
self.__application_data = application_data
@classmethod
def from_binary(cls, data: bytes) -> "ACE":
"""
Create an ACE object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACE instance.
:rtype: ACE
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACE
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
object_type = None
inherited_object_type = None
application_data = None
ace_type, flags, size, mask = struct.unpack("<BBHL", data[:8])
pos = 8
if ACEType(ace_type).is_object_type:
obj_flag = struct.unpack("<I", data[8:12])[0]
pos += 4
if obj_flag & 0x00000001:
object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
if obj_flag & 0x00000002:
inherited_object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
trustee_sid = SID(bytes_le=data[pos:])
pos += trustee_sid.size
application_data = data[pos:size]
this = cls(
ACEType(ace_type),
{flg for flg in ACEFlag if flags & flg},
mask,
trustee_sid,
object_type,
inherited_object_type,
application_data,
)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACE, {err}")
def __str__(self):
""" Return the SDDL string representation of the ACE object. """
flags = "".join(
flg.short_name for flg in sorted(self.flags, key=lambda f: f.value)
)
rights = "".join(
rgt.short_name for rgt in sorted(self.rights, key=lambda r: r.value)
)
object_guid = self.object_type if self.object_type else ""
inherit_object_guid = (
self.inherited_object_type if self.inherited_object_type else ""
)
sid = (
self.trustee_sid.sddl_alias
if self.trustee_sid.sddl_alias
else str(self.trustee_sid)
)
return f"({self.type.short_name};{flags};{rights};{object_guid};{inherit_object_guid};{sid})"
def to_binary(self) -> bytes:
"""
Convert ACE object to binary form with little-endian byte order.
:returns: Bytes of the binary ACE instance
:rtype: bytes
"""
size = self.size
data = bytearray(size)
struct.pack_into(
"<BBHL", data, 0, self.type.value, sum(self.flags), size, self.mask
)
pos = 8
if self.type.is_object_type:
obj_flag = 0x00000001 if self.object_type else 0
obj_flag |= 0x00000002 if self.inherited_object_type else 0
struct.pack_into("<L", data, pos, obj_flag)
pos += 4
if self.object_type:
data[pos : pos + 16] = self.object_type.bytes_le
pos += 16
if self.inherited_object_type:
data[pos : pos + 16] = self.inherited_object_type.bytes_le
pos += 16
data[pos : pos + self.trustee_sid.size] = self.trustee_sid.bytes_le
pos += self.trustee_sid.size
data[pos : pos + size] = self.application_data
return bytes(data)
@property
def type(self) -> ACEType:
""" The type of the ACE. """
return self.__type
@property
def flags(self) -> Set[ACEFlag]:
""" The flags of the ACE. """
return self.__flags
@property
def size(self) -> int:
""" The binary size of ACE in bytes. """
size = 8
if self.type.is_object_type:
size += 4
if self.object_type:
size += 16
if self.inherited_object_type:
size += 16
size += self.trustee_sid.size
size += len(self.application_data)
return size
@property
def mask(self) -> int:
""" The acces mask """
return self.__mask
@property
def rights(self) -> Set[ACERight]:
""" The set of ACERights based on the access mask."""
return {rgt for rgt in ACERight if self.mask & rgt}
@property
def object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the object type. """
return self.__object_type
@property
def inherited_object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the inherited object type. """
return self.__inherited_object_type
@property
def trustee_sid(self) -> SID:
""" The sid of the trustee. """
return self.__trustee_sid
@property
def application_data(self) -> bytes:
""" The possible application data. """
return self.__application_data
class ACL:
"""
The access control list (ACL) is used to specify a list of individual
access control entries (ACEs). An ACL and an array of ACEs comprise a
complete access control list.
:param ACLRevision revision: the revision of the ACL.
:param List[ACE] aces: list of :class:`ACE`.
"""
def __init__(self, revision: ACLRevision, aces: List[ACE]) -> None:
self.__revision = revision
self.__aces = aces
@classmethod
def from_binary(cls, data: bytes) -> "ACL":
"""
Create an ACL object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACL instance.
:rtype: ACL
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACL
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
# Unwanted values are the reserved sbz1, size and sbz2.
rev, _, _, count, _ = struct.unpack("<BBHHH", data[:8])
pos = 8
aces = []
for _ in range(count):
ace = ACE.from_binary(data[pos:])
aces.append(ace)
pos += ace.size
this = cls(ACLRevision(rev), aces)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACL, {err}")
def to_binary(self) -> bytes:
"""
Convert ACL object to binary form with little-endian byte order.
:returns: Bytes of the binary ACL instance
:rtype: bytes
"""
size = self.size
data = bytearray(8)
struct.pack_into("<BBHHH", data, 0, self.revision, 0, size, len(self.aces), 0)
pos = 8
for ace in self.aces:
data.extend(ace.to_binary())
return bytes(data)
@property
def revision(self) -> ACLRevision:
""" The revision of ACL. """
return self.__revision
@property
def size(self) -> int:
""" The binary size in bytes. """
return 8 + sum(ace.size for ace in self.aces)
@property
def aces(self) -> List[ACE]:
""" The list of :class:`ACE` objects. """
return self.__aces
| Noirello/PyLDAP | src/bonsai/active_directory/acl.py | Python | mit | 13,344 |
from django import forms
from djwed.wedding.models import *
from djwed.wedding.admin_actions import *
from django.contrib import admin
class RequireOneFormSet(forms.models.BaseInlineFormSet):
"""Require at least one form in the formset to be completed."""
def clean(self):
"""Check that at least one form has been completed."""
super(RequireOneFormSet, self).clean()
if not self.is_valid():
return
for cleaned_data in self.cleaned_data:
# form has data and we aren't deleting it.
if cleaned_data and not cleaned_data.get('DELETE', False):
# we can break out after the first complete form
return
raise forms.ValidationError("At least one %s is required." %
(self.model._meta.verbose_name,))
class InviteeNotesInline(admin.TabularInline):
model = InviteeNotes
extra = 0
verbose_name_plural = "invitee notes"
class RSVPInline(admin.TabularInline):
model = RSVP
extra = 2
class GuestInline(admin.StackedInline):
model = Guest
extra = 1
class FoodOptionInline(admin.StackedInline):
model = FoodOption
extra = 3
class CommentInline(admin.StackedInline):
model = Comment
extra = 0
exclude = ('rsvp',)
readonly_fields = ('text',)
verbose_name_plural = "comments from invitees"
class GiftThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
verbose_name = "Source"
verbose_name_plural = "Sources"
formset = RequireOneFormSet
class InviteeThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
class InviteeAdmin(admin.ModelAdmin):
#fieldsets = [
# (None, {'fields': ['question']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
#]
inlines = [GuestInline,InviteeNotesInline,CommentInline,InviteeThankYouInline]
list_display = ('full_name', 'tags', 'full_address', 'state','country')
list_editable = ('tags',)
list_filter = ['side', 'association','country','state']
search_fields = ['full_name_override','invite_code','guest__first_name', 'guest__last_name', 'guest__nickname']
actions = [
export_as_csv_action("Export addresses as CSV",
fields=['full_name', 'full_address']),
]
#date_hierarchy = 'pub_date'
class LongFoodChoiceField(forms.ModelChoiceField):
#widget = forms.widgets.RadioSelect()
def label_from_instance(self, obj):
return obj.long_desc
class GuestAdmin(admin.ModelAdmin):
inlines = [RSVPInline,]
list_display = ('full_name', 'email', 'tags')
list_filter = ['rsvp__status', 'role', 'invitee__side', 'invitee__association']
search_fields = ['first_name', 'last_name']
list_editable = ('email', 'tags')
class RSVPAdminForm(forms.ModelForm):
class Meta: model = RSVP
def clean(self, *args, **kwargs):
sret = super(RSVPAdminForm, self).clean(*args,**kwargs)
if self.cleaned_data['food_selection'] and self.cleaned_data['food_selection'].venue != self.cleaned_data['venue']:
raise ValidationError('Food selected from another venue')
if self.cleaned_data['venue'].site != u'MA' and self.cleaned_data['bus_selection']:
raise ValidationError('Bus selection for a site with no bus')
rsvp_filter = RSVP.objects.filter(venue = self.cleaned_data['venue'],
guest = self.cleaned_data['guest'])
if rsvp_filter.count()>1 or (rsvp_filter.count() == 1
and rsvp_filter.all()[0] != self.instance):
raise ValidationError('Only one RSVP allowed per person')
return sret
class RSVPAdmin(admin.ModelAdmin):
#inlines = [GuestInline,]
#food_selection = LongFoodChoiceField([], required=False, empty_label = "--- Please choose from a dinner selection below ---")
list_display = (
'guest_site',
'venue',
'status',
'food_selection',
'bus_selection',
'last_updated',
'prelim',
'guest_invitee',
'last_update_source',
'guest',
'table_assign',
)
search_fields = [
'guest__first_name',
'guest__last_name',
'guest__invitee__guest__last_name',
'guest__invitee__invite_code',
]
list_editable = (
'status',
'food_selection',
'bus_selection',
'prelim',
'last_update_source',
'table_assign',
)
form = RSVPAdminForm
list_filter = ('venue','status', 'guest__invitee__side',
'guest__invitee__association', 'guest__invitee__country',
'guest__invitee__state',
)
def guest_site(self,rsvp):
return u"%s (%s)"%(rsvp.guest.full_name(), unicode(rsvp.venue.site))
guest_site.short_description = "Guest (Site)"
def guest_invitee(self,rsvp):
return rsvp.guest.invitee
guest_invitee.short_description = "Invitee"
def guest_invitee_association(self,rsvp):
return rsvp.guest.invitee.association
guest_invitee_association.short_description = "Association"
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "guest":
kwargs["queryset"] = Guest.objects.all().order_by('last_name','first_name')
return db_field.formfield(**kwargs)
return super(RSVPAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class InviteeNotesAdmin(admin.ModelAdmin):
search_fields = ['invitee__guest__first_name',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = [ 'invitee',
'likely_site',
'ma_likelihood',
'ca_likelihood',
'or_likelihood',
'savedate',
'batch',
'invitee_rsvp_count',
'adults',
'children',
'invitee_country',
]
list_editable = ['ma_likelihood',
'ca_likelihood',
'savedate',
'batch',
]
def invitee_rsvp_count(self,inote):
counts = inote.invitee.rsvp_yes_counts()
return ', '.join('%s: %d' % (venue, counts[venue])
for venue in sorted(counts.keys()))
invitee_rsvp_count.short_description = "RSVP Yes"
def invitee_country(self,inote):
return str(inote.invitee.country)
invitee_country.short_description = "Country"
class CommentAdmin(admin.ModelAdmin):
list_filter = ['type']
search_fields = ['invitee__guest__first_name','text',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = ['id','invitee','type','last_updated','text']
class VenueAdmin(admin.ModelAdmin):
inlines = [FoodOptionInline,]
class PageSnippetAdmin(admin.ModelAdmin):
list_display = ['key','title','last_updated']
class GiftAdmin(admin.ModelAdmin):
search_fields = [
'sources__guest__first_name',
'sources__guest__nickname',
'sources__guest__last_name',
'notes',
'description',
]
list_filter = ['status','registry','assignment']
list_display = ['source_names','received','description','notes',
'assignment','registry','status']
list_editable = ('status', 'assignment')
inlines = [GiftThankYouInline,]
radio_fields = {
'assignment': admin.HORIZONTAL,
'registry': admin.VERTICAL,
'status': admin.HORIZONTAL,
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "source" and request.META['REQUEST_METHOD'] != 'POST':
kwargs["queryset"] = Invitee.objects.all().order_by('guest__last_name','guest__first_name')
return db_field.formfield(**kwargs)
return super(GiftAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def source_names(self, gift):
return u"; ".join(unicode(inv) for inv in gift.sources.all())
source_names.short_description = "Sources"
class ThankYouAdmin(admin.ModelAdmin):
list_display = [
'invitee',
'status',
'sent',
]
list_editable = ['status', 'sent']
list_filter = [
'status',
'sent',
'gift__assignment',
'gift__received',
'invitee__side',
]
search_fields = [
'invitee__guest__first_name',
'invitee__guest__last_name',
'invitee__guest__nickname',
'gift__description',
'gift__notes',
]
class TableAdmin(admin.ModelAdmin):
search_fields = ['rsvp__guest__first_name','name','number','notes',
'rsvp__guest__last_name','invitee__guest__nickname']
list_display = ['number','name','venue','table_count','table_guests','notes','position']
list_editable = ('name','notes')
list_filter = ['venue',]
def table_count(self,table):
return str(table.rsvp_set.count())
table_count.short_description = "# people"
def table_guests(self,table):
guests = []
for r in table.rsvp_set.all():
guests.append(unicode(r.guest))
guests.sort()
return u" , \n".join(guests)
table_guests.short_description = "guests"
class RSVPOptionAdmin(admin.ModelAdmin):
list_display = ['short_desc', 'likelihood', 'rsvp_count', 'long_desc']
def rsvp_count(self, option):
return str(option.rsvp_set.count())
rsvp_count.short_description = "# people"
admin.site.register(Invitee, InviteeAdmin)
admin.site.register(InviteeNotes, InviteeNotesAdmin)
admin.site.register(Guest, GuestAdmin)
admin.site.register(Venue, VenueAdmin)
admin.site.register(PageSnippet, PageSnippetAdmin)
admin.site.register(RSVP, RSVPAdmin)
admin.site.register(RSVPOption, RSVPOptionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Gift, GiftAdmin)
admin.site.register(ThankYou, ThankYouAdmin)
admin.site.register(Table, TableAdmin)
| garyp/djwed | wedding/admin.py | Python | mit | 10,509 |
import calendar
import collections
try:
from collections.abc import Iterable
except:
from collections import Iterable
from time import strptime
from six import string_types
from lxml import etree
from itertools import chain
def remove_namespace(tree):
"""
Strip namespace from parsed XML
"""
for node in tree.iter():
try:
has_namespace = node.tag.startswith("{")
except AttributeError:
continue # node.tag is not a string (node is a comment or similar)
if has_namespace:
node.tag = node.tag.split("}", 1)[1]
def read_xml(path, nxml=False):
"""
Parse tree from given XML path
"""
try:
tree = etree.parse(path)
if ".nxml" in path or nxml:
remove_namespace(tree) # strip namespace when reading an XML file
except:
try:
tree = etree.fromstring(path)
except Exception:
print(
"Error: it was not able to read a path, a file-like object, or a string as an XML"
)
raise
return tree
def stringify_children(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(chain(*([c.text, c.tail] for c in node.getchildren())))
+ [node.tail]
)
return "".join(filter(None, parts))
def stringify_affiliation(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(
chain(
*(
[c.text if (c.tag != "label" and c.tag != "sup") else "", c.tail]
for c in node.getchildren()
)
)
)
+ [node.tail]
)
return " ".join(filter(None, parts))
def stringify_affiliation_rec(node):
"""
Flatten and join list to string
ref: http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python
"""
parts = _recur_children(node)
parts_flatten = list(_flatten(parts))
return " ".join(parts_flatten).strip()
def _flatten(l):
"""
Flatten list into one dimensional
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, string_types):
for sub in _flatten(el):
yield sub
else:
yield el
def _recur_children(node):
"""
Recursive through node to when it has multiple children
"""
if len(node.getchildren()) == 0:
parts = (
([node.text or ""] + [node.tail or ""])
if (node.tag != "label" and node.tag != "sup")
else ([node.tail or ""])
)
return parts
else:
parts = (
[node.text or ""]
+ [_recur_children(c) for c in node.getchildren()]
+ [node.tail or ""]
)
return parts
def month_or_day_formater(month_or_day):
"""
Parameters
----------
month_or_day: str or int
must be one of the following:
(i) month: a three letter month abbreviation, e.g., 'Jan'.
(ii) day: an integer.
Returns
-------
numeric: str
a month of the form 'MM' or a day of the form 'DD'.
Note: returns None if:
(a) the input could not be mapped to a known month abbreviation OR
(b) the input was not an integer (i.e., a day).
"""
if month_or_day.replace(".", "") in filter(None, calendar.month_abbr):
to_format = strptime(month_or_day.replace(".", ""), "%b").tm_mon
elif month_or_day.strip().isdigit() and "." not in str(month_or_day):
to_format = int(month_or_day.strip())
else:
return None
return ("0" if to_format < 10 else "") + str(to_format)
def pretty_print(node):
"""
Pretty print a given lxml node
"""
print(etree.tostring(node, pretty_print=True).decode("utf-8"))
| titipata/pubmed_parser | pubmed_parser/utils.py | Python | mit | 4,118 |
#!/usr/bin/env python2
import time
import random
class Battle:
def __init__(self, user1, user2):
self.user1 = user1
self.user2 = user2
self.turn = user1
self.notTurn = user2
self.accepted = False
self.finished = False
self.auto = False
self.turnCount = 1
def fight(self, spell):
attacker = self.turn.getActivePokemon()
defender = self.notTurn.getActivePokemon()
message = attacker.fight(spell, defender)
if defender.life <= 0:
message += defender.name + " n'a plus de points de vie. "
if self.notTurn.hasAlivePokemon():
message += self.notTurn.username + " doit invoquer un nouveau pokemon. "
else:
message += self.notTurn.username + " a perdu. " + self.turn.username + " a gagne. "
message += attacker.name + " gagne " + str(attacker.calcGainedExp(defender)) + " points d'experience. "
old = attacker.level
attacker.gainExp(defender)
if attacker.level != old:
message += attacker.name + " passe niveau " + str(attacker.level) + "!"
self.finished = True
self.turn, self.notTurn = self.notTurn, self.turn
self.turnCount += 1
return message
def itemUsed(self):
self.turn, self.notTurn = self.notTurn, self.turn
def nextStep(self):
if self.finished:
self.user1.battle = None
self.user2.battle = None
return False
elif self.auto and self.turnCount % 2 == 0:
time.sleep(2)
return self.fight(self.turn.getActivePokemon().spells[random.randint(0, len(self.turn.getActivePokemon().spells) - 1)].name)
| AlexMog/IRCPokemonBot | commands/classes/Battle.py | Python | mit | 1,766 |
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
# Create your models here.
# POSTS MODELS.PY
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True)
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message'] | srijannnd/Login-and-Register-App-in-Django | simplesocial/posts/models.py | Python | mit | 984 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
import time
import datetime
conn = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="drupal")
cann = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="content_delivery_weather")
cursor = conn.cursor()
cursar = cann.cursor()
cursor.execute("""SELECT uid, mail FROM users""")
rows = cursor.fetchall()
for row in rows:
if row[0] != 0:
print('{0} : {1} '.format(row[0], row[1]))
#print('UPDATE new_v4_users_probes_edit SET email = {0} WHERE uid = {1}'.format(row[1], row[0]))
cursar.execute("""UPDATE new_v4_users_probes_edit SET email = %s WHERE userid = %s""",(row[1], row[0]))
cursar.execute("""SELECT probename, probeid FROM new_v4_sonde""")
rows = cursar.fetchall()
for row in rows:
cursar.execute("""SHOW TABLES LIKE %s""",("%" + row[0] + "%",))
rowsbis = cursar.fetchall()
for rowbis in rowsbis:
result = rowbis[0].split("_")
month = 1 + int(result[4])
s = "01/" + str(month) + "/" + result[3]
timestamp = time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple())
print('{0} : {1} year: {2} month: {3} timestamp: {4}'.format(row[0], rowbis[0], result[3], result[4], round(timestamp,0)))
cursar.execute("""SELECT firsttime FROM new_v4_sonde WHERE probeid = %s""",(row[1],))
rowsbisbis = cursar.fetchall()
for rowbisbis in rowsbisbis:
if rowbisbis[0] == None:
cursar.execute("""UPDATE new_v4_sonde SET firsttime = %s WHERE probeid = %s""",(timestamp,row[1]))
print('firsttime: {0}'.format(rowbisbis[0],))
conn.close()
cann.close()
| gandalf-the-white/foundation | amaryl/scripts/initdatabase.py | Python | mit | 1,705 |
#!/usr/bin/env python
# pylint: disable=line-too-long
""" Start the containers """
import argparse
from lib.docker_compose_tools import set_up
def main():
""" Start the containers """
parser = argparse.ArgumentParser(description="Set up testing environment.")
parser.add_argument("--pg", help="PostgreSQL version")
parser.add_argument("--es", help="Elasticsearch version")
args = parser.parse_args()
pg_version = args.pg
es_version = args.es
print(
"Starting environment with PostgreSQL {pg_version} with Elasticsearch {es_version}...".format(
pg_version=pg_version, es_version=es_version
)
)
set_up(pg_version, es_version)
if __name__ == "__main__":
main()
| matthewfranglen/postgres-elasticsearch-fdw | tests/start.py | Python | mit | 738 |
"""
Shopify Trois
---------------
Shopify API for Python 3
"""
from setuptools import setup
setup(
name='shopify-trois',
version='1.1-dev',
url='http://masom.github.io/shopify-trois',
license='MIT',
author='Martin Samson',
author_email='[email protected]',
maintainer='Martin Samson',
maintainer_email='[email protected]',
description='Shopify API for Python 3',
long_description=__doc__,
packages=[
'shopify_trois', 'shopify_trois.models', 'shopify_trois.engines',
'shopify_trois.engines.http'
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'requests>=1.2.3'
],
test_suite='nose.collector',
tests_require=[
'pytest', 'nose', 'mock'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| masom/shopify-trois | setup.py | Python | mit | 1,267 |
#!/usr/bin/env python
"""
RPG: Operation
These are any operations we want to carry out from our YAML files. Operations
are strings that are tied to Python code, to carry out things that arent
possible to easily make YAML tags for directly.
"""
from rpg_log import Log
import rpg_combat
def HandleOperation(game, operation, data):
"""Handle the operation.
Args:
game: Game object
operation: string, name of the operation to look up
data: dict, data at the level the operation was specified in, which may
contain information the operation needs to operate. Operation specific.
"""
# Pay for a room's night sleep
if operation == 'RoomSleepPay':
Log('RoomSleepPay: You are rested!')
# Max up the player's current health
#NOTE(g): Uses a percentage based increase from the game data. If not
# present, assume full recovery.
modifier = game.data['game'].get('sleep_regeneration_percent', 1.0)
# Add the modified version of the full health
game.player.health_current += game.player.attributes['health'] * modifier
# Max out at full health
if game.player.health_current > game.player.attributes['health']:
game.player.health_current = game.player.attributes['health']
# No longer fatigued (running and such)
game.player.fatigued = False
# Combat with the Player
elif operation == 'CombatPlayer':
if game.dialogue:
# If a map is specified for the encouter, then fight
map = data.get('map', None)
# Set combat to be with the given actor
game.combat = rpg_combat.Combat(game, [game.dialogue.actor], map=map)
# Clear the dialogue. The time for talking is OVER!
game.dialogue = None
else:
Log('Operatino: CombatPlayer: Not initiated from Dialogue. Unknown actor.')
# Close the Dialogue
if operation == 'CloseDialogue':
game.dialogue = None
| ghowland/rpg | rpg_operation.py | Python | mit | 2,007 |
# -*- coding: utf-8 -*-
# @date 161103 - Export excel with get_work_order_report function
"""
Data exportor (Excel, CSV...)
"""
import io
import math
from datetime import datetime
from xlsxwriter.workbook import Workbook
import tablib
from utils.tools import get_product_size
def get_customers(customer_list=None, file_format='csv'):
"""Generate customer data file for download."""
if customer_list is None:
customer_list = []
data = tablib.Dataset()
data.headers = ('客戶代碼', '客戶名稱')
for c in customer_list:
data.append((c.c_code, c.c_name))
if file_format == 'csv':
return data.csv
return data
def get_maintenance_log(log_list=None, file_format='csv'):
"""Generate maintenance log to csv file for download."""
if log_list is None:
log_list = []
data = tablib.Dataset()
data.headers = ('機台', '維修項目', '開始時間',
'員工', '結束時間', '員工',
'總計時間')
for log in log_list:
m_code = log['m_code'].replace('<br>', '\n')
data.append((log['machine_id'], m_code, log['start_time'],
log['who_start'], log['end_time'], log['who_end'],
log['total_time'][0])
)
if file_format == 'csv':
return data.csv
return data
def get_w_m_performance_report(file_format='xls'):
"""Generate excel file for download by worker and machine performance."""
row_number = 11
data = tablib.Dataset()
data.append(['個人效率期間表 ({})'.format(
datetime.now().strftime("%Y/%m/%d"))] + [''] * (row_number - 1))
data.append(['工號', '姓名', '日期', '標準量', '效率標準量',
'實質生產量', '總稼動時間', '總停機時間', '稼動 %', '數量效率 %',
'平均效率 %'])
if file_format == 'xls':
return data.xls
return data
def get_loss_rate_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate."""
data = tablib.Dataset()
data.headers = ('機台', '機型', '良品數', '不良品數', '損耗率(%)',
'損耗金額(RMB)', '損耗率排名')
rank = 0
old_loss_rate = None
for r in sorted(report_data, key=lambda k: k['loss_rate'], reverse=True):
if old_loss_rate != r['loss_rate']:
rank += 1
old_loss_rate = r['loss_rate']
record = [r['machine_id'], r['machine_type'], r['count_qty'],
r['event_qty'], r['loss_rate'], r['total_loss_money'],
rank]
data.append(record)
if file_format == 'csv':
return data.csv
return data
def get_loss_rate_detail_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate detail."""
data = tablib.Dataset()
data.headers = ('日期', '良品數', '不良品數', '損耗率(%)',
'損耗金額(RMB)')
for r in sorted(report_data, key=lambda k: k['record_date']):
record = [r['record_date'], r['count_qty'], r['event_qty'],
r['loss_rate'], r['total_loss_money']]
data.append(record)
if file_format == 'csv':
return data.csv
return data
def get_uptime_report(report_data='', file_format='xls'):
"""Generate excel file for download by uptime information."""
data = tablib.Dataset()
data.append_separator('製造部各工程稼動率一覽表')
data.append(['月份:10', '星期', '', '', '', '', '',
'目標', '', '', '', ''])
data.append(['', '', '加締卷取(%)', '組立(%)', '老化(%)',
'CUTTING(%)', 'TAPPING(%)', '加締卷取',
'組立', '老化', 'CUTTING', 'TAPPING'])
if file_format == 'xls':
return data.xls
return data
def get_work_order_report(report_data, file_format='csv'):
"""Generate csv file for download by work order."""
# data = tablib.Dataset()
# data.headers = ('製令編號', '料號', '客戶', '產品規格',
# '投入數', '應繳庫數',
# '加締捲取', '組立', '老化', '選別', '加工切角')
# for r in sorted(report_data, key=lambda k: k['order_no']):
# try:
# intput_count = int(r['input_count'])
# except (TypeError, ValueError):
# intput_count = -1
# record = [r['order_no'], r['part_no'], r['customer'], r['product'],
# intput_count, math.floor(intput_count / 1.03),
# r['step1_status'], r['step2_status'], r['step3_status'],
# r['step4_status'], r['step5_status']]
# data.append(record)
# if file_format == 'csv':
# return data.csv
# return data
output = io.BytesIO()
if file_format == 'xls':
workbook = Workbook(output, {'in_memory': True})
worksheet = workbook.add_worksheet()
# merge_format = workbook.add_format({
# 'bold': 1,
# 'border': 1,
# 'align': 'center',
# 'valign': 'vcenter'})
worksheet.merge_range('A1:A3', '製令編號')
worksheet.merge_range('B1:B3', '料號')
worksheet.merge_range('C1:C3', '客戶')
worksheet.merge_range('D1:D3', '產品規格')
worksheet.merge_range('E1:E3', '投入數')
worksheet.merge_range('F1:F3', '應繳庫數')
worksheet.write('G1', '加締捲取')
worksheet.write('H1', '組立')
worksheet.write('I1', '老化')
worksheet.write('J1', '選別')
worksheet.write('K1', '加工切角')
for col_name in ('G', 'H', 'I', 'J', 'K'):
worksheet.write(col_name + '2', '機器')
worksheet.write(col_name + '3', '良品數')
row = 4
for r in sorted(report_data, key=lambda k: k['order_no']):
try:
intput_count = int(r['input_count'])
except (TypeError, ValueError):
intput_count = -1
worksheet.merge_range('A{}:A{}'.format(row, row + 2),
r['order_no'])
worksheet.merge_range('B{}:B{}'.format(row, row + 2), r['part_no'])
worksheet.merge_range('C{}:C{}'.format(row, row + 2),
r['customer'])
worksheet.merge_range('D{}:D{}'.format(row, row + 2), r['product'])
worksheet.merge_range('E{}:E{}'.format(row, row + 2), intput_count)
worksheet.merge_range('F{}:F{}'.format(row, row + 2),
math.floor(intput_count / 1.03))
for process in range(1, 6):
row_tag = chr(71 + process - 1)
worksheet.write_string('{}{}'.format(row_tag, row),
r['step{}_status'.format(process)])
machine = r['step{}_machine'.format(process)]
count = r['step{}_count'.format(process)]
worksheet.write_string('{}{}'.format(row_tag, row + 1),
machine if machine else '')
worksheet.write_string('{}{}'.format(row_tag, row + 2),
str(count) if count else '')
row += 3
workbook.close()
output.seek(0)
return output.read()
def get_order_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate detail."""
data = tablib.Dataset()
data.headers = ('製令編號', '客戶', '規格', '投入數', '需求數',
'加締捲曲', '組立', '老化', '選別', '加工切腳')
for r in sorted(report_data, key=lambda k: k['order_no']):
record = [r['order_no'], r['customer'], get_product_size(r['part_no']),
r['input_count'], r['require_count'],
r['step1_prod_qty'], r['step2_prod_qty'],
r['step3_prod_qty'], r['step4_prod_qty'],
r['step5_prod_qty']]
data.append(record)
if file_format == 'csv':
return data.csv
return data
| grtfou/data-analytics-web | website/utils/data_exportor.py | Python | mit | 8,235 |
{'level_mc': {'_txt': {'text': '6'},
'currentLabel': 'up',
'progress_mc': {'currentLabel': '_0'}}} | ethankennerly/hotel-vs-gozilla | user/h4.news.py | Python | mit | 126 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProviderOperationsOperations(object):
"""ProviderOperationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ProviderOperationResult"]
"""Result of the request to list REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderOperationResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.devtestlabs.models.ProviderOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ProviderOperationResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DevTestLab/operations'} # type: ignore
| Azure/azure-sdk-for-python | sdk/devtestlabs/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/operations/_provider_operations_operations.py | Python | mit | 4,782 |
import os
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty
from kivy.uix.anchorlayout import AnchorLayout
from cobiv.modules.core.hud import Hud
Builder.load_file(os.path.abspath(os.path.join(os.path.dirname(__file__), 'progresshud.kv')))
class ProgressHud(Hud, AnchorLayout):
value = NumericProperty(0)
caption = StringProperty("")
def __init__(self, **kwargs):
super(ProgressHud, self).__init__(**kwargs)
| gokudomatic/cobiv | cobiv/modules/hud_components/progresshud/progresshud.py | Python | mit | 473 |
"""Non-application-specific convenience methods for GPkit"""
import numpy as np
def te_exp_minus1(posy, nterm):
"""Taylor expansion of e^{posy} - 1
Arguments
---------
posy : gpkit.Posynomial
Variable or expression to exponentiate
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of e^{posy} - 1, carried to nterm terms
"""
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= i
res += posy**i / factorial_denom
return res
def te_secant(var, nterm):
"""Taylor expansion of secant(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of secant(x), carried to nterm terms
"""
# The first 12 Euler Numbers
E2n = np.asarray([1.0,
5,
61,
1385,
50521,
2702765,
199360981,
19391512145,
2404879675441,
370371188237525,
69348874393137901,
15514534163557086905])
if nterm > 12:
n_extend = np.asarray(range(13, nterm+1))
E2n_add = (8 * np.sqrt(n_extend/np.pi)
* (4*n_extend/(np.pi * np.exp(1)))**(2*n_extend))
E2n = np.append(E2n, E2n_add)
res = 1
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res = res + var**(2*i) * E2n[i-1] / factorial_denom
return res
def te_tangent(var, nterm):
"""Taylor expansion of tangent(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of tangent(x), carried to nterm terms
"""
if nterm > 15:
raise NotImplementedError("Tangent expansion not implemented above"
" 15 terms")
# The first 15 Bernoulli Numbers
B2n = np.asarray([1/6,
-1/30,
1/42,
-1/30,
5/66,
-691/2730,
7/6,
-3617/510,
43867/798,
-174611/330,
854513/138,
-236364091/2730,
8553103/6,
-23749461029/870,
8615841276005/14322])
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res += ((-1)**(i-1) * 2**(2*i) * (2**(2*i) - 1) *
B2n[i-1] / factorial_denom * var**(2*i-1))
return res
| hoburg/gpkit | gpkit/tools/tools.py | Python | mit | 3,097 |
from django.contrib import admin
from modeltranslation.admin import TabbedTranslationAdmin
from .models import Person, Office, Tag
class PersonAdmin(TabbedTranslationAdmin):
list_display = ('name', 'surname', 'security_level', 'gender')
list_filter = ('security_level', 'tags', 'office', 'name', 'gender')
actions = ['copy_100']
def copy_100(self, request, queryset):
for item in queryset.all():
item.populate()
copy_100.short_description = 'Copy 100 objects with random data'
class PersonStackedInline(admin.TabularInline):
model = Person
extra = 0
class OfficeAdmin(admin.ModelAdmin):
inlines = (PersonStackedInline,)
list_display = ('office', 'address')
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Person, PersonAdmin)
admin.site.register(Office, OfficeAdmin)
admin.site.register(Tag, TagAdmin)
| mtrgroup/django-mtr-utils | tests/app/admin.py | Python | mit | 908 |
import atexit
from flask import Flask, jsonify, g, request, make_response
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_jwt_extended import JWTManager, set_access_cookies, jwt_required, unset_jwt_cookies
from apscheduler.schedulers.background import BackgroundScheduler
import logging
logging.basicConfig()
app = Flask(__name__)
app.config.from_pyfile('./config/config.py')
db = SQLAlchemy(app)
CORS(app, supports_credentials=True)
migrate = Migrate(app, db)
jwt = JWTManager(app)
from tasks.next_reservation_check import check_all_users
from DB.User import User
from endpoints import reservation, user
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(check_all_users,'interval', hours=4)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
@app.route('/')
def index():
return "Hello, this is an API, Swagger documentation will follow here..."
@app.route('/token')
def get_auth_token():
if not request.authorization:
response = make_response(jsonify({'error':'Login required'}))
response.headers.set('WWW-Authenticate', 'Basic realm="patklaey.ch"')
return response, 401
if not verify_password(request.authorization.username, request.authorization.password):
response = jsonify({'error':'Invalid username or password'})
return response, 401
token = g.user.generate_auth_token()
response = jsonify({'token': token})
set_access_cookies(response, token)
return response, 200
def verify_password(username, password):
user = User.query.filter_by(username=username, active=True).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/logout', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
| patklaey/ZermattReservationAPI | main.py | Python | mit | 1,957 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.modelgenerators.generator Contains the abstract ModelGenerator class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import abstractmethod, ABCMeta
from collections import OrderedDict
# Import the relevant PTS classes and modules
from ....core.tools.logging import log
from ..component import FittingComponent
# -----------------------------------------------------------------
class ModelGenerator(FittingComponent):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ModelGenerator, self).__init__()
# The dictionary with the parameter ranges
self.ranges = OrderedDict()
# The dictionary with the list of the model parameters
self.parameters = OrderedDict()
# -----------------------------------------------------------------
@property
def parameter_names(self):
"""
This function ...
:return:
"""
return self.ranges.keys()
# -----------------------------------------------------------------
@property
def nparameters(self):
"""
This function ...
:return:
"""
return len(self.ranges)
# -----------------------------------------------------------------
@property
def nmodels(self):
"""
This function ...
:return:
"""
return len(self.parameters[self.ranges.keys()[0]])
# -----------------------------------------------------------------
@property
def parameter_minima(self):
"""
This function ...
:return:
"""
minima = []
for name in self.ranges: minima.append(self.ranges[name].min)
# Return the minimal parameter values
return minima
# -----------------------------------------------------------------
@property
def parameter_maxima(self):
"""
This function ...
:return:
"""
maxima = []
for name in self.ranges: maxima.append(self.ranges[name].max)
# Return the maximal parameter values
return maxima
# -----------------------------------------------------------------
def add_parameter(self, name, par_range):
"""
This function ...
:param name:
:param par_range:
:return:
"""
self.ranges[name] = par_range
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the necessary input
self.load_input()
# 3. Initialize the animations
self.initialize_animations()
# 4. Generate the model parameters
self.generate()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup of the base class
super(ModelGenerator, self).setup()
# -----------------------------------------------------------------
def load_input(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def initialize_animations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Initializing the animations ...")
# Initialize the scatter animation
self.scatter_animation = ScatterAnimation(self.ranges["FUV young"], self.ranges["FUV ionizing"],
self.ranges["Dust mass"])
self.scatter_animation.x_label = "FUV luminosity of young stars"
self.scatter_animation.y_label = "FUV luminosity of ionizing stars"
self.scatter_animation.z_label = "Dust mass"
# Initialize the young FUV luminosity distribution animation
self.fuv_young_animation = DistributionAnimation(self.ranges["FUV young"][0], self.ranges["FUV young"][1],
"FUV luminosity of young stars", "New models")
self.fuv_young_animation.add_reference_distribution("Previous models", self.distributions["FUV young"])
# Initialize the ionizing FUV luminosity distribution animation
self.fuv_ionizing_animation = DistributionAnimation(self.ranges["FUV ionizing"][0],
self.ranges["FUV ionizing"][1],
"FUV luminosity of ionizing stars", "New models")
self.fuv_ionizing_animation.add_reference_distribution("Previous models", self.distributions["FUV ionizing"])
# Initialize the dust mass distribution animation
self.dust_mass_animation = DistributionAnimation(self.ranges["Dust mass"][0], self.ranges["Dust mass"][1],
"Dust mass", "New models")
self.dust_mass_animation.add_reference_distribution("Previous models", self.distributions["Dust mass"])
# -----------------------------------------------------------------
@abstractmethod
def generate(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def update_animations(self, young_luminosity, ionizing_luminosity, dust_mass):
"""
This function ...
:param young_luminosity:
:param ionizing_luminosity:
:param dust_mass:
:return:
"""
# Add the point (and thus a frame) to the animation of parameter points
self.scatter_animation.add_point(young_luminosity, ionizing_luminosity, dust_mass)
# Update the distribution animations
if self.nmodels > 1:
# Add a frame to the animation of the distribution of the FUV luminosity of young starss
self.fuv_young_animation.add_value(young_luminosity)
# Add a frame to the animation of the distribution of the FUV luminosity of ionizing stars
self.fuv_ionizing_animation.add_value(ionizing_luminosity)
# Add a frame to the animation of the distribution of the dust mass
self.dust_mass_animation.add_value(dust_mass)
# -----------------------------------------------------------------
| Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/fitting/modelgenerators/generator.py | Python | mit | 7,222 |
from django.apps import AppConfig
class DonateAppConfig(AppConfig):
name = 'readthedocs.donate'
verbose_name = 'Donate'
def ready(self):
import readthedocs.donate.signals # noqa
| tddv/readthedocs.org | readthedocs/donate/apps.py | Python | mit | 202 |
from flask import Flask, Response, request
from twilio.util import TwilioCapability
app = Flask(__name__)
@app.route('/token', methods=['GET'])
def get_capability_token():
"""Respond to incoming requests."""
# Find these values at twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
capability = TwilioCapability(account_sid, auth_token)
# Twilio Application Sid
application_sid = 'APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
capability.allow_client_outgoing(application_sid)
capability.allow_client_incoming(request.form["ClientName"])
token = capability.generate()
return Response(token, mimetype='application/jwt')
if __name__ == "__main__":
app.run(debug=True)
| teoreteetik/api-snippets | client/capability-token-2way/capability-token.5.x.py | Python | mit | 763 |
#! /usr/bin/env python
import os.path
import os
import argparse
import pickle
from util import *
from collections import defaultdict
import base64
import logging
import sys
import json
import shelve
#from FeatureExtraction import Tf_Idf
from LangProc import docTerms
# todo : remove this assumptions
# Indexer assumes thast collection fits in memory()
class DBMIndex:
pass
class BaseIndexer():
def __init__(self):
self.invertedIndex = defaultdict(list)
self.forwardIndex = dict()
self.idToUrl = dict() #url is too long
self.docCount =0
class ShelveIndexer():
def __init__(self):
self.invertedIndex = None
self.forwardIndex = None
self.idToUrl = None #url is too long
self.urlToId =dict()
self.docCount =0
def addDocument(self,url,parsedText):
assert url.encode("utf8") not in self.urlToId
self.docCount += 1
currentId = self.docCount
self.urlToId[url.encode("utf8")] = currentId
self.idToUrl[str(currentId)] = url;
self.forwardIndex[str(currentId)] = parsedText
for position,term in enumerate(parsedText):
stem = term.stem.encode("utf8")
documents = self.invertedIndex[stem] if stem in self.invertedIndex else []
documents.append((position,currentId))
self.invertedIndex[stem] = documents
def startIndexer(self,indexDir):
self.invertedIndex = shelve.open(os.path.join(indexDir,"invertedIndex"),'c')
self.forwardIndex = shelve.open(os.path.join(indexDir,"forwardIndex"),'c')
self.idToUrl = shelve.open(os.path.join(indexDir,"idToUrl"),'c')
def finishIndexer(self):
self.invertedIndex.close()
self.forwardIndex.close()
self.idToUrl.close()
def loadIndexer(self,indexDir):
self.invertedIndex = shelve.open(os.path.join(indexDir,"invertedIndex"),'r')
self.forwardIndex = shelve.open(os.path.join(indexDir,"forwardIndex"),'r')
self.idToUrl = shelve.open(os.path.join(indexDir,"idToUrl"),'r')
def getDocumentOfQuery(self,query):
return self.invertedIndex.get(query.stem.encode("utf8"),[])
def getDocumentOfId(self,id):
return self.forwardIndex.get(str(id),[])
def getUrl(self,id): # here we load all data from files thus the type is string !
return self.idToUrl[str(id)]
class MemoryIndexer():
def __init__(self):
self.invertedIndex = defaultdict(list)
self.forwardIndex = dict()
self.idToUrl = dict() #url is too long
self.docCount =0
# TOdo: remove this assumptions
# assumes that adddocument () is never called twice for a document
# assumes that a document has an unique url
# parsed text is a listed of terms
def addDocument(self,url,parsedText):
self.docCount += 1
currentId = self.docCount
self.idToUrl[currentId] = url;
self.forwardIndex[currentId] = parsedText
for position,term in enumerate(parsedText):
self.invertedIndex[term].append((position,currentId))
# dump as json
def dumpToDisk(self,IndexDir):
def pickleDumpToFile(source,fileName):
file = open(os.path.join(IndexDir,fileName),"w")
pickle.dump(source,file)
pickleDumpToFile(self.idToUrl,"idToUrl")
pickleDumpToFile(self.invertedIndex,"inverted")
pickleDumpToFile(self.forwardIndex,"forward")
def loadFromDisk(self,indexDir):
def pickleLoadFromFile(fileName):
file = open(os.path.join(indexDir,fileName),"r")
return pickle.load(file)
self.invertedIndex=pickleLoadFromFile("inverted")
self.idToUrl=pickleLoadFromFile("idToUrl")
self.forwardIndex=pickleLoadFromFile("forward")
def getDocumentOfQuery(self,query):
return self.invertedIndex.get(query,[])
def getDocumentOfId(self,id):
return self.forwardIndex.get(id,[])
def getUrl(self,id): # here we load all data from files thus the type is string !
return self.idToUrl[str(id)]
class Searcher():
def __init__(self,indexDir,implemention=ShelveIndexer):
self.index = implemention()
self.index.loadIndexer(indexDir)
def findDocument_AND(self,queryStr):
documentIdList = defaultdict(lambda:0)
for term in queryStr:
for id in set([item[1] for item in self.index.getDocumentOfQuery(term)]):
documentIdList[id] += 1
return [docId for docId,cnt in documentIdList.iteritems() if cnt ==len(queryStr)]
def getUrl(self,id):
return self.index.idToUrl[str(id)]
def getSnippets(self,queryStr,id):
currentWindow = [-1]*(len(queryStr))
keyLen = 0
minWindow = []
minSize = sys.maxint
bestIndenticaltermSize = 0
for pos,term in enumerate(self.index.getDocumentOfId(id)):
if term in queryStr:
currentWindow[queryStr.index(term)] = pos
if -1 not in currentWindow:
start = min(currentWindow)
end = pos
indenticaltermSize = len(set(self.index.getDocumentOfId(id)[start : end+1]))
if(minSize > end-start+1) or (indenticaltermSize > bestIndenticaltermSize and minSize+2 >= end-start+1):
minWindow = currentWindow[:]
minSize = end-start + 1
bestIndenticaltermSize = indenticaltermSize
docLength = len(self.index.getDocumentOfId(id))
snippetsStart = max(min(minWindow)-10,0)
snippetsEnd = min(docLength, max(minWindow)+1+10)
return [(term.originalWord,term in queryStr) for term in self.index.getDocumentOfId(id)[snippetsStart:snippetsEnd]] #excellent implemention:return list of truple make critical term be true in turple
'''
def createIndexDir(storedDocumentDir,indexDir):
indexer = MemoryIndexer()
indexCount = 0
for fileName in os.listdir(storedDocumentDir):
indexCount +=1
if indexCount % 100 ==0:
logging.info(u"Indexed {} documents".format(indexCount))
logging.info(u"Adding Document: {}".format(base64.b16decode(fileName)))
openFile = open(os.path.join(storedDocumentDir,fileName))
parsedText = docTerms(parseRedditPost(openFile.read()))
indexer.addDocument(base64.b16decode(fileName),parsedText)
indexer.dumpToDisk(indexDir)
'''
def createIndexDirApi(storedDocumentDir,indexDir,implemention=ShelveIndexer):
indexer = implemention()
indexer.startIndexer(indexDir)
indexCount = 0
for fileName in os.listdir(storedDocumentDir):
#logging.info(u"Adding Document: {}".format(base64.b16decode(fileName)))
openFile = open(os.path.join(storedDocumentDir,fileName))
try:
jsonFile = json.load(openFile)
parsedText = docTerms(jsonFile['text'])
indexer.addDocument(jsonFile['url'],parsedText)
indexCount +=1
if indexCount % 100 ==0:
logging.info(u"Indexed {} documents".format(indexCount))
except Exception as e:
logging.exception(e)
openFile.close()
indexer.finishIndexer()
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description = "Index/r/learnprogramming")
parser.add_argument("--storedDocumentDir", dest = "storedDocumentDir", required= True)
parser.add_argument("--indexDir", dest = "indexDir", required = True)
args = parser.parse_args()
createIndexDirApi(args.storedDocumentDir,args.indexDir)
if __name__ == "__main__": # if invoke from command line
main() | zkmake520/SearchEngine | Indexer.py | Python | mit | 6,844 |
import mobula.layers as L
import numpy as np
def test_acc():
X = np.array([[0, 1, 2],
[1, 2, 0],
[0, 1, 2],
[1, 2, 0]])
Y = np.array([1, 0, 2, 1]).reshape((-1, 1))
# top-k
# 1 [False, False, True, True]
# 2 [True, True, True, True]
target = [np.array([False, False, True, True]), np.array([True, True, True, True])]
[data, label] = L.Data([X, Y])
for i in range(2):
l = L.Accuracy(data, label = label, top_k = 1 + i)
l.reshape()
l.forward()
assert l.Y == np.mean(target[i])
| wkcn/mobula | tests/test_layers/test_acc.py | Python | mit | 625 |
#!/usr/bin/env python
from distutils.core import setup
import strfrag
setup(
name='Strfrag',
version=strfrag.__version__,
description=('StringFragment type to represent parts of str objects; '
'used to avoid copying strings during processing'),
url="https://github.com/ludios/Strfrag",
author="Ivan Kozik",
author_email="[email protected]",
classifiers=[
'Programming Language :: Python :: 2',
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
py_modules=['strfrag', 'test_strfrag'],
)
| ludios/Strfrag | setup.py | Python | mit | 611 |
from __future__ import unicode_literals
from django.db import models
from django.core.paginator import Paginator, PageNotAnInteger
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
class EventPage(Page):
date = models.DateField("Event Date", blank=True)
time = models.TimeField("Time", blank=True)
location = models.CharField(max_length=250, blank=True)
address = models.CharField(max_length=250, blank=True)
intro = models.CharField(max_length=250, blank=True)
body = RichTextField(blank=True)
main_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = Page.search_fields + (
index.SearchField('intro'),
index.SearchField('body'),
)
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('time'),
ImageChooserPanel('main_image'),
FieldPanel('location'),
FieldPanel('address'),
FieldPanel('intro'),
FieldPanel('body', classname="full"),
]
class EventIndexPage(Page):
intro = RichTextField(blank=True)
def get_context(self, request):
context = super(EventIndexPage, self).get_context(request)
context['event_entries'] = EventPage.objects.child_of(self).live()
return context
content_panels = Page.content_panels + [
FieldPanel('intro'),
]
| samuelleeuwenburg/Samplate | event/models.py | Python | mit | 1,665 |
import re
import time
class BaseCounters:
def __init__(self):
self.keyre = re.compile('\A[\w.]+\Z')
def ping(self, key):
self.validate_key(key)
self.do_ping(key, int(time.time()))
def hit(self, key, n=1):
self.validate_key(key)
self.do_hit(key, n)
def validate_key(self, key):
if re.match(self.keyre, key):
pass
else:
raise ValueError("Counters keys must only contain letters, numbers, the underscore (_) and fullstop (.), received \"%s\"" % key)
| francois/pycounters | counters/base_counters.py | Python | mit | 499 |
from django.db import connection
from laboratory.settings import TIME_ZONE
from utils.db import namedtuplefetchall
def get_history_dir(d_s, d_e, card_id, who_create_dir, services, is_serv, iss_pk, is_parent, for_slave_hosp):
with connection.cursor() as cursor:
cursor.execute(
"""WITH
t_iss AS (SELECT
directions_issledovaniya.id as iss_id,
directions_napravleniya.client_id,
directory_researches.title as res_title,
directory_researches.id as res_id,
directory_researches.code,
directory_researches.is_hospital,
directory_researches.is_slave_hospital,
directory_researches.is_treatment,
directory_researches.is_stom,
directory_researches.is_doc_refferal,
directory_researches.is_paraclinic,
directory_researches.is_form,
directory_researches.is_microbiology,
directory_researches.podrazdeleniye_id,
directions_napravleniya.parent_id,
directions_napravleniya.data_sozdaniya,
directions_napravleniya.doc_who_create_id,
directions_issledovaniya.napravleniye_id,
directions_napravleniya.cancel,
directions_issledovaniya.time_confirmation,
directions_issledovaniya.maybe_onco,
to_char(directions_issledovaniya.time_save AT TIME ZONE %(tz)s, 'DD.MM.YYYY-HH24:MI:SS') as ch_time_save,
directions_issledovaniya.study_instance_uid,
directions_napravleniya.parent_slave_hosp_id,
directory_researches.is_application,
directory_researches.is_expertise,
person_contract.id as person_contract_id,
person_contract.dir_list as contract_dirs
FROM directions_issledovaniya
LEFT JOIN directory_researches
ON directions_issledovaniya.research_id = directory_researches.Id
LEFT JOIN directions_napravleniya
ON directions_issledovaniya.napravleniye_id = directions_napravleniya.id
LEFT JOIN directions_personcontract person_contract on directions_napravleniya.num_contract = person_contract.num_contract
WHERE directions_napravleniya.data_sozdaniya AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
AND NOT directory_researches.is_expertise
AND
CASE
WHEN %(is_parent)s = TRUE AND %(for_slave_hosp)s = FALSE THEN
directions_napravleniya.parent_id = %(iss_pk)s
WHEN %(is_parent)s = TRUE AND %(for_slave_hosp)s = TRUE THEN
directions_napravleniya.parent_slave_hosp_id = %(iss_pk)s
when %(card_id)s > -1 THEN
directions_napravleniya.client_id = %(card_id)s
when %(who_create)s > -1 THEN
directions_napravleniya.doc_who_create_id = %(who_create)s
END),
t_tubes AS (SELECT tubesregistration_id, issledovaniya_id as tubes_iss_id
FROM directions_issledovaniya_tubes
WHERE issledovaniya_id IN (SELECT iss_id FROM t_iss)),
t_iss_tubes AS (SELECT * from t_iss
LEFT JOIN t_tubes
ON t_iss.iss_id = t_tubes.tubes_iss_id),
t_recive AS (SELECT time_recive, id as id_t_recive FROM directions_tubesregistration
WHERE directions_tubesregistration.id in (SELECT tubesregistration_id FROM t_tubes)),
t_podrazdeleniye AS (SELECT id AS podr_id, can_has_pacs, title AS podr_title FROM podrazdeleniya_podrazdeleniya)
SELECT
napravleniye_id,
cancel,
iss_id,
tubesregistration_id,
res_id,
res_title,
to_char(data_sozdaniya AT TIME ZONE %(tz)s, 'DD.MM.YY') as date_create,
time_confirmation,
to_char(time_recive AT TIME ZONE %(tz)s, 'DD.MM.YY HH24:MI:SS.US'),
ch_time_save,
podr_title,
is_hospital,
maybe_onco,
can_has_pacs,
is_slave_hospital,
is_treatment,
is_stom,
is_doc_refferal,
is_paraclinic,
is_microbiology,
parent_id,
study_instance_uid,
parent_slave_hosp_id,
is_form,
is_application,
is_expertise,
person_contract_id,
contract_dirs
FROM t_iss_tubes
LEFT JOIN t_recive
ON t_iss_tubes.tubesregistration_id = t_recive.id_t_recive
LEFT JOIN t_podrazdeleniye
ON t_iss_tubes.podrazdeleniye_id = t_podrazdeleniye.podr_id
WHERE
CASE
WHEN %(is_serv)s = TRUE THEN
res_id = ANY(ARRAY[%(services_p)s])
WHEN %(is_serv)s = FALSE THEN
EXISTS (SELECT res_id FROM t_iss)
END
ORDER BY napravleniye_id DESC""",
params={
'd_start': d_s,
'd_end': d_e,
'card_id': card_id,
'who_create': who_create_dir,
'services_p': services,
'is_serv': is_serv,
'tz': TIME_ZONE,
'iss_pk': iss_pk,
'is_parent': is_parent,
'for_slave_hosp': for_slave_hosp,
},
)
row = cursor.fetchall()
return row
def get_patient_contract(d_s, d_e, card_pk,):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT
directions_napravleniya.num_contract,
directions_personcontract.id,
directions_personcontract.cancel,
directions_personcontract.create_at,
directions_personcontract.sum_contract,
to_char(directions_personcontract.create_at AT TIME ZONE %(tz)s, 'DD.MM.YY') as date_create,
directions_issledovaniya.napravleniye_id,
directions_issledovaniya.coast,
directions_issledovaniya.discount,
directory_researches.title,
directions_personcontract.dir_list
FROM directions_issledovaniya
LEFT JOIN directory_researches ON
directory_researches.id=directions_issledovaniya.research_id
LEFT JOIN directions_napravleniya ON
directions_napravleniya.id=directions_issledovaniya.napravleniye_id
LEFT JOIN directions_personcontract ON
directions_personcontract.num_contract=directions_napravleniya.num_contract
WHERE directions_issledovaniya.napravleniye_id::varchar in (
select regexp_split_to_table(directions_personcontract.dir_list, ',') from directions_personcontract
where directions_personcontract.patient_card_id=%(card_pk)s and directions_personcontract.create_at AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
)
order by directions_personcontract.create_at DESC
""",
params={
'd_start': d_s,
'd_end': d_e,
'tz': TIME_ZONE,
'card_pk': card_pk,
},
)
rows = namedtuplefetchall(cursor)
return rows
def get_lab_podr():
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT id FROM public.podrazdeleniya_podrazdeleniya
WHERE p_type=2
"""
)
row = cursor.fetchall()
return row
def get_confirm_direction(d_s, d_e, lab_podr, is_lab=False, is_paraclinic=False, is_doc_refferal=False):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT ON (napravleniye_id) napravleniye_id FROM public.directions_issledovaniya
WHERE time_confirmation AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
AND research_id IN (SELECT id FROM directory_researches WHERE CASE
WHEN %(is_lab)s = FALSE AND %(is_paraclinic)s = TRUE AND %(is_doc_refferal)s = FALSE THEN
is_paraclinic = TRUE
WHEN %(is_lab)s = FALSE AND %(is_paraclinic)s = FALSE AND %(is_doc_refferal)s = TRUE THEN
is_doc_refferal = TRUE
WHEN %(is_lab)s = FALSE AND %(is_paraclinic)s = TRUE AND %(is_doc_refferal)s = TRUE THEN
is_paraclinic = TRUE or is_doc_refferal = TRUE
WHEN %(is_lab)s = TRUE AND %(is_paraclinic)s = FALSE AND %(is_doc_refferal)s = FALSE THEN
podrazdeleniye_id = ANY(ARRAY[%(lab_podr)s])
WHEN %(is_lab)s = TRUE AND %(is_paraclinic)s = TRUE AND %(is_doc_refferal)s = FALSE THEN
is_paraclinic = TRUE and is_doc_refferal = FALSE or podrazdeleniye_id = ANY(ARRAY[%(lab_podr)s])
WHEN %(is_lab)s = TRUE AND %(is_paraclinic)s = FALSE AND %(is_doc_refferal)s = TRUE THEN
is_paraclinic = FALSE and is_doc_refferal = TRUE or podrazdeleniye_id = ANY(ARRAY[%(lab_podr)s])
WHEN %(is_lab)s = TRUE AND %(is_paraclinic)s = TRUE AND %(is_doc_refferal)s = TRUE THEN
is_paraclinic = TRUE or is_doc_refferal = TRUE or podrazdeleniye_id = ANY(ARRAY[%(lab_podr)s])
END
)
""",
params={'d_start': d_s, 'd_end': d_e, 'tz': TIME_ZONE, 'is_lab': is_lab, 'is_paraclinic': is_paraclinic, 'is_doc_refferal': is_doc_refferal, 'lab_podr': lab_podr},
)
row = cursor.fetchall()
return row
def filter_direction_department(list_dirs, podrazdeleniye_id):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT ON (id) id FROM public.directions_napravleniya
WHERE id = ANY(ARRAY[%(num_dirs)s])
AND doc_id IN (SELECT id from users_doctorprofile WHERE podrazdeleniye_id = %(podrazdeleniye_id)s)
""",
params={'num_dirs': list_dirs, 'podrazdeleniye_id': podrazdeleniye_id},
)
row = cursor.fetchall()
return row
def filter_direction_doctor(list_dirs, doc_id):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT ON (id) id FROM public.directions_napravleniya
WHERE id = ANY(ARRAY[%(num_dirs)s]) AND doc_id = %(doc_id)s
""",
params={'num_dirs': list_dirs, 'doc_id': doc_id},
)
row = cursor.fetchall()
return row
def get_confirm_direction_pathology(d_s, d_e):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT ON (napravleniye_id) napravleniye_id FROM public.directions_issledovaniya
WHERE time_confirmation AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
AND research_id IN (SELECT id FROM public.directory_researches where title ILIKE '%%профпатолог%%')
""",
params={'d_start': d_s, 'd_end': d_e, 'tz': TIME_ZONE},
)
row = cursor.fetchall()
return row
def get_confirm_direction_patient_year(d_s, d_e, lab_podr, card_pk1, is_lab=False, is_paraclinic=False, is_doc_refferal=False):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT
directions_napravleniya.id as direction,
directions_issledovaniya.time_confirmation,
to_char(directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s, 'DD.MM.YYYY') as ch_time_confirmation,
directions_issledovaniya.research_id,
directory_researches.title as research_title
FROM directions_napravleniya
INNER JOIN directions_issledovaniya ON (directions_napravleniya.id = directions_issledovaniya.napravleniye_id)
AND directions_issledovaniya.research_id IN
(SELECT directory_researches.id FROM directory_researches WHERE CASE
WHEN %(is_lab)s = TRUE THEN directory_researches.podrazdeleniye_id = ANY(ARRAY[%(lab_podr)s])
WHEN %(is_doc_refferal)s = TRUE THEN is_doc_refferal = TRUE
WHEN %(is_paraclinic)s = TRUE THEN is_paraclinic = TRUE
END
)
LEFT JOIN directory_researches ON
directions_issledovaniya.research_id=directory_researches.id
WHERE directions_issledovaniya.time_confirmation IS NOT NULL
AND directions_issledovaniya.time_confirmation AT TIME ZONE 'ASIA/Irkutsk' BETWEEN %(d_start)s AND %(d_end)s
AND NOT EXISTS (SELECT directions_issledovaniya.napravleniye_id FROM directions_issledovaniya
WHERE time_confirmation IS NULL AND directions_issledovaniya.napravleniye_id = directions_napravleniya.id)
AND client_id=%(card_pk)s
ORDER BY directions_issledovaniya.time_confirmation DESC, directions_napravleniya.id
""",
params={
'd_start': d_s,
'd_end': d_e,
'tz': TIME_ZONE,
'is_lab': is_lab,
'is_paraclinic': is_paraclinic,
'is_doc_refferal': is_doc_refferal,
'lab_podr': lab_podr,
'card_pk': card_pk1,
},
)
rows = namedtuplefetchall(cursor)
return rows
def direction_by_card(d_s, d_e, card_id):
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT
directions_issledovaniya.id as iss_id,
directions_issledovaniya.napravleniye_id,
directions_issledovaniya.time_confirmation,
to_char(directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s, 'DD.MM.YYYY') date_confirm,
to_char(directions_issledovaniya.time_save AT TIME ZONE %(tz)s, 'DD.MM.YYYY-HH24:MI:SS') as ch_time_save,
directions_issledovaniya.study_instance_uid,
directory_researches.title as research_title,
directory_researches.id as research_id,
directory_researches.is_hospital,
directory_researches.is_slave_hospital,
directory_researches.is_treatment,
directory_researches.is_stom,
directory_researches.is_doc_refferal,
directory_researches.is_paraclinic,
directory_researches.is_form,
directory_researches.is_microbiology,
directory_researches.is_application,
directory_researches.is_expertise,
directory_researches.podrazdeleniye_id,
directions_napravleniya.parent_slave_hosp_id,
directions_napravleniya.client_id,
directions_napravleniya.parent_id,
directions_napravleniya.data_sozdaniya,
to_char(data_sozdaniya AT TIME ZONE %(tz)s, 'DD.MM.YY') as date_create,
directions_napravleniya.cancel
FROM directions_issledovaniya
LEFT JOIN directory_researches
ON directions_issledovaniya.research_id = directory_researches.id
LEFT JOIN directions_napravleniya
ON directions_issledovaniya.napravleniye_id = directions_napravleniya.id
WHERE directions_napravleniya.data_sozdaniya AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
AND directions_napravleniya.client_id = %(card_id)s
AND NOT directory_researches.is_expertise AND NOT directory_researches.is_hospital AND NOT
directory_researches.is_slave_hospital AND NOT directory_researches.is_application
ORDER BY directions_issledovaniya.napravleniye_id DESC""",
params={
'd_start': d_s,
'd_end': d_e,
'card_id': card_id,
'tz': TIME_ZONE,
},
)
rows = namedtuplefetchall(cursor)
return rows
def get_type_confirm_direction(directions_tuple):
if not directions_tuple:
return []
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT
DISTINCT (directions_issledovaniya.napravleniye_id) as napravleniye_id,
directory_researches.podrazdeleniye_id,
directory_researches.is_stom,
directory_researches.is_doc_refferal,
directory_researches.is_paraclinic,
directory_researches.is_form,
directory_researches.is_microbiology,
directory_researches.is_application
FROM directions_issledovaniya
LEFT JOIN directory_researches
ON directions_issledovaniya.research_id = directory_researches.id
WHERE directions_issledovaniya.napravleniye_id in %(directions_tuple)s
ORDER BY directions_issledovaniya.napravleniye_id DESC""",
params={
'directions_tuple': directions_tuple,
},
)
rows = namedtuplefetchall(cursor)
return rows
| moodpulse/l2 | api/directions/sql_func.py | Python | mit | 16,657 |
from django import forms
from .models import PassType, Registration
class SignupForm(forms.ModelForm):
pass_type = forms.ModelChoiceField(
queryset=PassType.objects.filter(active=True),
widget=forms.widgets.RadioSelect(),
)
class Meta:
model = Registration
fields = (
"first_name",
"last_name",
"email",
"residing_country",
"dance_role",
"pass_type",
"workshop_partner_name",
"workshop_partner_email",
"lunch",
)
widgets = {
"dance_role": forms.widgets.RadioSelect(),
"lunch": forms.widgets.RadioSelect(),
}
class Media:
css = {"all": ("css/forms.css",)}
email_repeat = forms.EmailField()
agree_to_terms = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["pass_type"].empty_label = None
self.fields["lunch"].empty_label = None
def clean_workshop_partner_email(self):
"""
Take care of uniqueness constraint ourselves
"""
email = self.cleaned_data.get("workshop_partner_email")
qs = Registration.objects.filter(workshop_partner_email=email).exists()
if email and qs:
raise forms.ValidationError("Workshop parter already taken.")
return email
def clean_agree_to_terms(self):
data = self.cleaned_data["agree_to_terms"]
if data is False:
raise forms.ValidationError("You must agree to the terms.")
return data
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get("email")
email_repeat = cleaned_data.get("email_repeat")
ws_partner_email = cleaned_data.get("workshop_partner_email")
if email != email_repeat:
raise forms.ValidationError("Ensure email verfication matches.")
if email and ws_partner_email and email == ws_partner_email:
raise forms.ValidationError("You can't partner with yourself.")
| smokeyfeet/smokeyfeet-registration | src/smokeyfeet/registration/forms.py | Python | mit | 2,149 |
import urllib.request as urllib
from bs4 import BeautifulSoup as bs
import unicodedata
import pandas as pd
import os
baseURL='http://snre.ifas.ufl.edu/academics/graduate/courses-syllabi-and-curriculum/'
classListFile='majorClassLists/SNREList.csv'
html_titles = ['Principles of Ecology Courses','Particular Perspectives and Systems Ecology Courses',
'Natural Science Courses','Social Sciences Courses',
'Sustainability Studies Courses','Research and Design Methods Courses']
short_names = ['Principles of Ecology', 'Particular Systems', 'Natural Science',
'Social Science', 'Sustainability', 'Research & Design']
catagories = pd.DataFrame({'html_title':html_titles,'subCatagory':short_names})
#Only run if this datafile doesn't exist
if os.path.exists(classListFile):
print('SNRE List exists. Delete it if you want to remake it: ', classListFile)
exit()
pageSoup=bs(urllib.urlopen(baseURL), 'lxml')
# deal with unicode
def convert_u(t):
return unicodedata.normalize('NFKD', t)
################################################
# functions defining different html sections for
# use with beautifulsoup
# Class rows are 'tr' elements with exactly 4 'td' elements
def is_class_listing(tag):
if tag.name=='tr':
return len(tag.find_all('td')) == 4
else:
return False
######################################################
# Primary scraping code
class_list = []
for catagory_section in pageSoup.find_all('table'):
html_title = convert_u(catagory_section.find('h3').text)
subCatagory = catagories['subCatagory'][catagories.html_title==html_title].tolist()[0]
for class_listing in catagory_section.find_all(is_class_listing):
prefix_and_number = convert_u(class_listing.find_all('td')[0].text)
title = convert_u(class_listing.find_all('td')[1].text).strip()
prefix = prefix_and_number.split(' ')[0].strip()
number = prefix_and_number.split(' ')[1].strip()
class_list.append({'coursePrefix':prefix,
'courseNum':number,
'title':title,
'subCategory':subCatagory})
class_list = pd.DataFrame(class_list)
############################
#Some class have multiple sub catagories. Go thru and make one row per class
#with multiple subCatagoeries.
#There are duplicate rows where the only difference is the subcategory. First find
#all unique rows.
class_list_temp=class_list[['coursePrefix','courseNum','title']].drop_duplicates()
#Initialize a subCategory for the unique rows
class_list_temp['subCategory']=''
#Go thru row by row and pull the subCategories out, combining them where there are multiple
for index, row in class_list_temp.iterrows():
#pull out the subcategories in a list
subCats=class_list['subCategory'][class_list['title']==row['title']].drop_duplicates().tolist()
#Clear any nan values that sneak in
subCats=[x for x in subCats if str(x) != 'nan']
#Combine them in a string and put them in the temp dataframe
row['subCategory']=','.join(subCats)
class_list = class_list_temp
class_list.to_csv(classListFile, index=False)
| sdtaylor/scheduleCrossRef | getSNREClassList.py | Python | mit | 3,204 |
# -*- coding: utf8 -*-
"""
The ``config`` module
=====================
Create a named logger and return it so users can log in different log files : one for each module.
"""
__author__ = 'Salas'
__copyright__ = 'Copyright 2014 LTL'
__credits__ = ['Salas']
__license__ = 'MIT'
__version__ = '0.2.0'
__maintainer__ = 'Salas'
__email__ = '[email protected]'
__status__ = 'Pre-Alpha'
import os
import os.path as path
import errno
import sys
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import utils.log
logger = utils.log.get_logger('config')
def get_config(config_file_path=None, config_prefix='bot'):
"""
Return the config from a yaml file as a dictionary. Create one file if not existing, using example file as template.
:param config_file_path:
:return:
"""
if config_file_path is None:
config_dir_path = path.abspath(path.join(os.sep, path.dirname(__file__), path.pardir, 'config'))
config_file_path = path.join(config_dir_path, '{}.config.yaml'.format(config_prefix))
config_example_path = path.join(config_dir_path, '{}.example.yaml'.format(config_prefix))
try:
with open(config_file_path, 'rb') as config_stream:
config_dict = yaml.load(config_stream, Loader=Loader)
except IOError:
logger.info('')
try:
os.makedirs(config_dir_path)
except OSError as exc:
if exc.errno == errno.EEXIST and path.isdir(config_dir_path):
pass
else:
raise
with open(config_file_path, 'a'):
os.utime(config_file_path, None)
try:
with open(config_example_path, 'rb') as config_example_stream:
config_dict_example = yaml.load(config_example_stream, Loader=Loader)
# TODO : console based example file modification
with open(config_file_path, 'wb') as config_stream:
yaml.dump(config_dict_example, config_stream, Dumper=Dumper, encoding='utf-8')
except IOError:
logger.critical("No example file. Exiting.")
sys.exit(0)
try:
with open(config_file_path, 'rb') as config_stream:
config_dict = yaml.load(config_stream, Loader=Loader)
except IOError:
sys.exit(0)
else:
with open(config_file_path, 'rb') as config_stream:
config_dict = yaml.load(config_stream, Loader=Loader)
return config_dict
| salas106/irc-ltl-framework | utils/config.py | Python | mit | 2,675 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import warnings
import datetime
import frappe
import frappe.defaults
import frappe.async
import re
import frappe.model.meta
from frappe.utils import now, get_datetime, cstr, cast_fieldtype
from frappe import _
from frappe.model.utils.link_count import flush_local_link_count
from frappe.model.utils import STANDARD_FIELD_CONVERSION_MAP
from frappe.utils.background_jobs import execute_job, get_queue
from frappe import as_unicode
import six
# imports - compatibility imports
from six import (
integer_types,
string_types,
binary_type,
text_type,
iteritems
)
# imports - third-party imports
from markdown2 import UnicodeWithAttrs
from pymysql.times import TimeDelta
from pymysql.constants import ER, FIELD_TYPE
from pymysql.converters import conversions
import pymysql
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0, local_infile = 0):
self.host = host or frappe.conf.db_host or 'localhost'
self.user = user or frappe.conf.db_name
self._conn = None
if ac_name:
self.user = self.get_db_login(ac_name) or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
# this param is to load CSV's with LOCAL keyword.
# it can be set in site_config as > bench set-config local_infile 1
# once the local-infile is set on MySql Server, the client needs to connect with this option
# Connections without this option leads to: 'The used command is not allowed with this MariaDB version' error
self.local_infile = local_infile or frappe.conf.local_infile
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""Connects to a database as set in `site_config.json`."""
warnings.filterwarnings('ignore', category=pymysql.Warning)
usessl = 0
if frappe.conf.db_ssl_ca and frappe.conf.db_ssl_cert and frappe.conf.db_ssl_key:
usessl = 1
self.ssl = {
'ca':frappe.conf.db_ssl_ca,
'cert':frappe.conf.db_ssl_cert,
'key':frappe.conf.db_ssl_key
}
conversions.update({
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.DATETIME: get_datetime,
UnicodeWithAttrs: conversions[text_type]
})
if six.PY2:
conversions.update({
TimeDelta: conversions[binary_type]
})
if usessl:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, ssl=self.ssl, conv = conversions, local_infile = self.local_infile)
else:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, conv = conversions, local_infile = self.local_infile)
# MYSQL_OPTION_MULTI_STATEMENTS_OFF = 1
# # self._conn.set_server_option(MYSQL_OPTION_MULTI_STATEMENTS_OFF)
self._cursor = self._conn.cursor()
if self.user != 'root':
self.use(self.user)
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
"""Throw exception for dangerous queries: `ALTER`, `DROP`, `TRUNCATE` if not `Administrator`."""
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and frappe.session.user != 'Administrator':
frappe.throw(_("Not permitted"), frappe.PermissionError)
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"[email protected]"})
"""
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
if debug:
try:
self.explain_query(query, values)
frappe.errprint(query % values)
except TypeError:
frappe.errprint([query, values])
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log("with values:")
frappe.log(values)
frappe.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
frappe.errprint(query)
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log(">>>>")
self._cursor.execute(query)
except Exception as e:
if ignore_ddl and e.args[0] in (ER.BAD_FIELD_ERROR, ER.NO_SUCH_TABLE,
ER.CANT_DROP_FIELD_OR_KEY):
pass
# NOTE: causes deadlock
# elif e.args[0]==2006:
# # mysql has gone away
# self.connect()
# return self.sql(query=query, values=values,
# as_dict=as_dict, as_list=as_list, formatted=formatted,
# debug=debug, ignore_ddl=ignore_ddl, as_utf8=as_utf8,
# auto_commit=auto_commit, update=update)
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
frappe.db.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = frappe._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is text_type:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
"""Format date, time, longint values."""
return v
from frappe.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
if isinstance(v, datetime.date):
v = text_type(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = text_type(v)
# long
elif isinstance(v, integer_types):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = text_type(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is text_type:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
"""Encode result as UTF-8."""
nres = []
for r in res:
nr = []
for c in r:
if type(c) is text_type:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuble like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
inner_list = []
for i, v in enumerate(value[1]):
inner_key = "{0}_{1}".format(key, i)
values[inner_key] = v
inner_list.append("%({0})s".format(inner_key))
_rhs = " ({0})".format(", ".join(inner_list))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
# docname is a number, convert to string
filters = str(filters)
if isinstance(filters, string_types):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `[email protected]`
frappe.db.get_value("User", "[email protected]", "last_login")
last_login, last_ip = frappe.db.get_value("User", "[email protected]",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `[email protected]`
user = frappe.db.get_values("User", "[email protected]", "*")[0]
"""
out = None
if cache and isinstance(filters, string_types) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, string_types):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update)
except Exception as e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
out = None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, string_types):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
# r = _cast_result(doctype, r)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype, debug = False):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
# result = _cast_result(doctype, result)
dict_ = frappe._dict(result)
return dict_
def get_all(self, *args, **kwargs):
return frappe.get_all(*args, **kwargs)
def get_list(self, *args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
value = self.value_cache.setdefault(doctype, {}).get(fieldname)
if value is not None:
return value
val = self.sql("""select value from
tabSingles where doctype=%s and field=%s""", (doctype, fieldname))
val = val[0][0] if val else None
if val=="0" or val=="1":
# check type
val = int(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {0} from `tab{1}` {2} {3} {4}"
.format(fl, doctype, "where" if conditions else "", conditions, order_by), values,
as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val, modified=None, modified_by=None,
update_modified=True, debug=False):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
conditions, values = self.build_conditions(dn)
values.update(to_update)
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
self.sql("""update `tab{0}`
set {1} where {2}""".format(dt, ', '.join(set_values), conditions),
values, debug=debug)
else:
# for singles
keys = list(to_update)
self.sql('''
delete from tabSingles
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in iteritems(to_update):
self.sql('''insert into tabSingles(doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
def set(self, doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
from frappe.utils import now
modified = now()
frappe.db.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
def set_temp(self, value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
def get_temp(self, key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def set_default(self, key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
def add_default(self, key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
def get_defaults(self, key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("start transaction")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
def flush_realtime_log(self):
for args in frappe.local.realtime_log:
frappe.async.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, doctype):
"""Returns True if table for given doctype exists."""
return ("tab" + doctype) in self.get_tables()
def get_tables(self):
return [d[0] for d in self.sql("show tables")]
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, string_types):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name")
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, cstr(dt[d]).replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def count(self, dt, filters=None, debug=False, cache=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = frappe.db.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = frappe.db.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return frappe.db.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
"""Returns list of column names from given table."""
return [r[0] for r in self.sql("DESC `%s`" % table)]
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
return self.get_db_table_columns('tab' + doctype)
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return frappe.db.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND COLUMN_NAME = "{1}"'''.format(doctype, column))[0][0]
def add_index(self, doctype, fields, index_name=None):
"""Creates an index with given fields if not already created.
Index name will be `fieldname1_fieldname2_index`"""
if not index_name:
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
if not frappe.db.sql("""show index from `tab%s` where Key_name="%s" """ % (doctype, index_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add index `%s`(%s)""" % (doctype, index_name, ", ".join(fields)))
def add_unique(self, doctype, fields, constraint_name=None):
if isinstance(fields, string_types):
fields = [fields]
if not constraint_name:
constraint_name = "unique_" + "_".join(fields)
if not frappe.db.sql("""select CONSTRAINT_NAME from information_schema.TABLE_CONSTRAINTS
where table_name=%s and constraint_type='UNIQUE' and CONSTRAINT_NAME=%s""",
('tab' + doctype, constraint_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add unique `%s`(%s)""" % (doctype, constraint_name, ", ".join(fields)))
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
# self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
def escape(self, s, percent=True):
"""Excape quotes and percent in given string."""
# pymysql expects unicode argument to escape_string with Python 3
s = as_unicode(pymysql.escape_string(as_unicode(s)), "utf-8").replace("`", "\\`")
# NOTE separating % escape, because % escape should only be done when using LIKE operator
# or when you use python format string to generate query that already has a %s
# for example: sql("select name from `tabUser` where name=%s and {0}".format(conditions), something)
# defaulting it to True, as this is the most frequent use case
# ideally we shouldn't have to use ESCAPE and strive to pass values via the values argument of sql
if percent:
s = s.replace("%", "%%")
return s
def get_descendants(self, doctype, name):
'''Return descendants of the current record'''
lft, rgt = self.get_value(doctype, name, ('lft', 'rgt'))
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
def enqueue_jobs_after_commit():
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), async=job.get("async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
| manassolanki/frappe | frappe/database.py | Python | mit | 30,790 |
#
# Copyright (c) 2009-2012 Joshua Hughes <[email protected]>
#
import webbrowser
import qmk
class BrowseCommand(qmk.Command):
'''Open the supplied URL in the default web browser.'''
def __init__(self):
self._name = 'browse'
self._help = self.__doc__
@qmk.Command.actionRequiresArgument
def action(self, arg):
webbrowser.open_new_tab(arg)
def commands(): return [ BrowseCommand() ]
| kivhift/qmk | src/commands/browse.py | Python | mit | 426 |
import sys
sys.path.append("../../")
from unittest.mock import patch, MagicMock
MockRPi = MagicMock()
MockSpidev = MagicMock()
modules = {
"RPi": MockRPi,
"RPi.GPIO": MockRPi.GPIO,
"spidev": MockSpidev
}
patcher = patch.dict("sys.modules", modules)
patcher.start()
from gfxlcd.driver.ssd1306.spi import SPI
from gfxlcd.driver.ssd1306.ssd1306 import SSD1306
class TestNJU6450(object):
def test_initialize(self):
SSD1306(128, 64, SPI())
| bkosciow/gfxlcd | gfxlcd/tests/test_ssd1306.py | Python | mit | 464 |
from functools import reduce
import numpy as np
# Qubit representation on a Ket or Bra state. Allows to create only
# basis qubits |0> or |1>. By performing operations defined operations
# it's possible to get other qubits or qubit registers.
class Qubit:
KET = True
BRA = False
# Creates a new qubit |n> or <n| where n is one or zero
def __init__(self, n, state=KET):
if state != Qubit.KET and state != Qubit.BRA:
raise ValueError("State must be either KET or BRA")
self.vector = np.matrix([[1], [0]] if n == 0 else [[0], [1]])
self.state = state
# Private helpler method to create a new qubit or qubit register from a
# vector
def __new(self, vector, state=KET):
q = Qubit(1, state)
q.vector = vector
return q
# Computes the conjugate of a qubit
def conjugate(self):
return self.__new(np.transpose(np.conjugate(self.vector)),
not self.state)
# Tensor of the qubit with another one
def tensor(self, other):
return self.__new(np.kron(self.vector, other.vector), self.state)
# Applies the given gate
def apply_gate(self, gate):
if self.state != Qubit.KET:
raise ValueError("State must be a Ket")
return self.__new(gate.matrix * self.vector)
# Performs the tensor product of a given list of qubits to create a
# qubit register
def to_register(qubits):
return reduce(lambda acc, q: acc.tensor(q), qubits)
# Performs the inner product <self|other> of the qubit with another qubit
def inner(self, other):
if self.state != Qubit.KET and other.state != Qubit.KET:
raise ValueError("Both qubits must be kets")
return (self.conjugate().vector * other.vector)[0, 0]
# Performs the outer product |self><other| of the qubit with another qubit
def outer(self, other):
if self.state != Qubit.KET and other.state != Qubit.KET:
raise ValueError("Both qubits must be kets")
return self.vector * other.conjugate().vector
# Adds two qubits
def __add__(self, other):
return self.__operation(np.add, other)
# Subtracts two qubits
def __sub__(self, other):
return self.__operation(np.subtract, other)
# Negates a qubit
def __neg__(self):
return self.__new(-self.vector, self.state)
# Multiplies two qubits. If the argument is an int or a float, it performs
# a multiplication by a scalar. If it's another qubit, it performs the
# tensor product
def __mul__(self, other):
if isinstance(other, Qubit):
if self.state != Qubit.KET or other.state != Qubit.KET:
raise ValueError("Both qubits have to be kets")
return self.tensor(other)
elif isinstance(other, int) or isinstance(other, float):
return self.__new(other * self.vector, state = Qubit.KET)
else:
raise ValueError("* Qubit undefined for " + str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
# Private method that applies the given operation between the qubit and the
# other qubit
def __operation(self, operation, other):
if self.state != other.state:
raise ValueError("Both qubits must be on the same state")
return self.__new(operation(self.vector, other.vector), self.state)
# Vector representation of the qubit
def __repr__(self):
v = self.vector if self.state == Qubit.BRA else np.transpose(self.vector)
return repr(v)
q0 = Qubit(0)
q1 = Qubit(1)
| miguelfrde/pyquantum | pyquantum/qubit.py | Python | mit | 3,624 |
"""
Configuration parameters:
path.internal.ansi2html
"""
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz" : 'chubin/late.nz',
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
def visualize(answer_data, request_options):
query = answer_data['query']
answers = answer_data['answers']
topics_list = answer_data['topics_list']
editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
repository_button = ''
if len(answers) == 1:
repository_button = _github_button(answers[0]['topic_type'])
result, found = frontend.ansi.visualize(answer_data, request_options)
return _render_html(query, result, editable, repository_button, topics_list, request_options), found
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(query, result, editable, repository_button, topics_list, request_options):
def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8')
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in topics_list)))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET">'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>')
return result
| chubin/cheat.sh | lib/frontend/html.py | Python | mit | 4,663 |
#!/usr/bin/env python
import sys
import configparser
import os
import shutil
from PyQt5 import QtWidgets
from PyQt5 import QtWebKitWidgets
from PyQt5 import QtCore
# Read config file
home_dir = os.path.expanduser("~")
conf_path = os.path.join(home_dir, ".config/mrps/mrps.conf")
config = configparser.ConfigParser(delimiters=('='))
config.read(conf_path)
def clean_up():
os.remove(html_file_full)
shutil.rmtree(os.path.join(o_file_dir, "reveal.js"))
app = QtWidgets.QApplication(sys.argv)
app.aboutToQuit.connect(clean_up)
if len(sys.argv) == 2:
o_file_full = os.path.abspath(sys.argv[1])
else:
o_file_full = QtWidgets.QFileDialog.getOpenFileName()[0]
if o_file_full:
o_file_dir = os.path.dirname(o_file_full)
o_file_name = os.path.basename(os.path.normpath(o_file_full))
o_file_name_bare = os.path.splitext(o_file_name)[0]
html_file_full = os.path.join(o_file_dir, o_file_name_bare + ".html")
shutil.copytree(os.path.normpath(config['DEFAULT']['revealjs_path']), os.path.join(o_file_dir, "reveal.js"))
md_file = open(o_file_full, 'r')
md_content = md_file.read()
md_file.close()
f = open(html_file_full, 'w')
f.write(config['DEFAULT']['html_top'] + '\n\n' +
md_content + '\n\n' +
config['DEFAULT']['html_bottom'])
f.close()
web = QtWebKitWidgets.QWebView()
web.load(QtCore.QUrl('file://' + html_file_full))
web.show()
sys.exit(app.exec_())
else:
exit()
| cnodell/mrps | mrps.py | Python | mit | 1,477 |
#
# The Python Imaging Library
# $Id$
#
# simple postscript graphics interface
#
# History:
# 1996-04-20 fl Created
# 1999-01-10 fl Added gsave/grestore to image method
# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
#
# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import sys
from . import EpsImagePlugin
##
# Simple Postscript graphics interface.
class PSDraw:
"""
Sets up printing to the given file. If **fp** is omitted,
:py:attr:`sys.stdout` is assumed.
"""
def __init__(self, fp=None):
if not fp:
fp = sys.stdout
self.fp = fp
def _fp_write(self, to_write):
if self.fp == sys.stdout:
self.fp.write(to_write)
else:
self.fp.write(bytes(to_write, "UTF-8"))
def begin_document(self, id=None):
"""Set up printing of a document. (Write Postscript DSC header.)"""
# FIXME: incomplete
self._fp_write(
"%!PS-Adobe-3.0\n"
"save\n"
"/showpage { } def\n"
"%%EndComments\n"
"%%BeginDocument\n"
)
# self._fp_write(ERROR_PS) # debugging!
self._fp_write(EDROFF_PS)
self._fp_write(VDI_PS)
self._fp_write("%%EndProlog\n")
self.isofont = {}
def end_document(self):
"""Ends printing. (Write Postscript DSC footer.)"""
self._fp_write("%%EndDocument\nrestore showpage\n%%End\n")
if hasattr(self.fp, "flush"):
self.fp.flush()
def setfont(self, font, size):
"""
Selects which font to use.
:param font: A Postscript font name
:param size: Size in points.
"""
if font not in self.isofont:
# reencode font
self._fp_write("/PSDraw-{} ISOLatin1Encoding /{} E\n".format(font, font))
self.isofont[font] = 1
# rough
self._fp_write("/F0 %d /PSDraw-%s F\n" % (size, font))
def line(self, xy0, xy1):
"""
Draws a line between the two points. Coordinates are given in
Postscript point coordinates (72 points per inch, (0, 0) is the lower
left corner of the page).
"""
xy = xy0 + xy1
self._fp_write("%d %d %d %d Vl\n" % xy)
def rectangle(self, box):
"""
Draws a rectangle.
:param box: A 4-tuple of integers whose order and function is currently
undocumented.
Hint: the tuple is passed into this format string:
.. code-block:: python
%d %d M %d %d 0 Vr\n
"""
self._fp_write("%d %d M %d %d 0 Vr\n" % box)
def text(self, xy, text):
"""
Draws text at the given position. You must use
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
"""
text = "\\(".join(text.split("("))
text = "\\)".join(text.split(")"))
xy = xy + (text,)
self._fp_write("%d %d M (%s) S\n" % xy)
def image(self, box, im, dpi=None):
"""Draw a PIL image, centered in the given box."""
# default resolution depends on mode
if not dpi:
if im.mode == "1":
dpi = 200 # fax
else:
dpi = 100 # greyscale
# image size (on paper)
x = im.size[0] * 72 / dpi
y = im.size[1] * 72 / dpi
# max allowed size
xmax = float(box[2] - box[0])
ymax = float(box[3] - box[1])
if x > xmax:
y = y * xmax / x
x = xmax
if y > ymax:
x = x * ymax / y
y = ymax
dx = (xmax - x) / 2 + box[0]
dy = (ymax - y) / 2 + box[1]
self._fp_write("gsave\n{:f} {:f} translate\n".format(dx, dy))
if (x, y) != im.size:
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
sx = x / im.size[0]
sy = y / im.size[1]
self._fp_write("{:f} {:f} scale\n".format(sx, sy))
EpsImagePlugin._save(im, self.fp, None, 0)
self._fp_write("\ngrestore\n")
# --------------------------------------------------------------------
# Postscript driver
#
# EDROFF.PS -- Postscript driver for Edroff 2
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
EDROFF_PS = """\
/S { show } bind def
/P { moveto show } bind def
/M { moveto } bind def
/X { 0 rmoveto } bind def
/Y { 0 exch rmoveto } bind def
/E { findfont
dup maxlength dict begin
{
1 index /FID ne { def } { pop pop } ifelse
} forall
/Encoding exch def
dup /FontName exch def
currentdict end definefont pop
} bind def
/F { findfont exch scalefont dup setfont
[ exch /setfont cvx ] cvx bind def
} bind def
"""
#
# VDI.PS -- Postscript driver for VDI meta commands
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
VDI_PS = """\
/Vm { moveto } bind def
/Va { newpath arcn stroke } bind def
/Vl { moveto lineto stroke } bind def
/Vc { newpath 0 360 arc closepath } bind def
/Vr { exch dup 0 rlineto
exch dup neg 0 exch rlineto
exch neg 0 rlineto
0 exch rlineto
100 div setgray fill 0 setgray } bind def
/Tm matrix def
/Ve { Tm currentmatrix pop
translate scale newpath 0 0 .5 0 360 arc closepath
Tm setmatrix
} bind def
/Vf { currentgray exch setgray fill setgray } bind def
"""
#
# ERROR.PS -- Error handler
#
# History:
# 89-11-21 fl: created (pslist 1.10)
#
ERROR_PS = """\
/landscape false def
/errorBUF 200 string def
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
errordict begin /handleerror {
initmatrix /Courier findfont 10 scalefont setfont
newpath 72 720 moveto $error begin /newerror false def
(PostScript Error) show errorNL errorNL
(Error: ) show
/errorname load errorBUF cvs show errorNL errorNL
(Command: ) show
/command load dup type /stringtype ne { errorBUF cvs } if show
errorNL errorNL
(VMstatus: ) show
vmstatus errorBUF cvs show ( bytes available, ) show
errorBUF cvs show ( bytes used at level ) show
errorBUF cvs show errorNL errorNL
(Operand stargck: ) show errorNL /ostargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall errorNL
(Execution stargck: ) show errorNL /estargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall
end showpage
} def end
"""
| sserrot/champion_relationships | venv/Lib/site-packages/PIL/PSDraw.py | Python | mit | 6,735 |
"""Core XML support for Python.
This package contains four sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
etree -- The ElementTree XML library. This is a subset of the full
ElementTree XML release.
"""
__all__ = ["dom", "parsers", "sax", "etree"]
# When being checked-out without options, this has the form
# "<dollar>Revision: x.y </dollar>"
# When exported using -kv, it is "x.y".
__version__ = "$Revision: 41660 $".split()[-2:][0]
_MINIMUM_XMLPLUS_VERSION = (0, 8, 4)
import os
# only prefer _xmlplus if the environment variable PY_USE_XMLPLUS is defined
if 'PY_USE_XMLPLUS' in os.environ:
try:
import _xmlplus
except ImportError:
pass
else:
try:
v = _xmlplus.version_info
except AttributeError:
# _xmlplus is too old; ignore it
pass
else:
if v >= _MINIMUM_XMLPLUS_VERSION:
import sys
_xmlplus.__path__.extend(__path__)
sys.modules[__name__] = _xmlplus
else:
del v
| ekristen/mythboxee | xml/__init__.py | Python | mit | 1,360 |
import json
from google.appengine.ext import ndb
from models.account import Account
class Suggestion(ndb.Model):
"""
Suggestions are generic containers for user-submitted data corrections to
the site. The generally store a model, a key, and then a json blob of
fields to append or ammend in the model.
"""
MODELS = set(["event", "match", "media"])
REVIEW_ACCEPTED = 1
REVIEW_PENDING = 0
REVIEW_REJECTED = -1
review_state = ndb.IntegerProperty(default=0)
reviewed_at = ndb.DateTimeProperty()
reviewer = ndb.KeyProperty(kind=Account)
author = ndb.KeyProperty(kind=Account, required=True)
contents_json = ndb.StringProperty(indexed=False) # a json blob
target_key = ndb.StringProperty() # "2012cmp"
target_model = ndb.StringProperty(choices=MODELS, required=True) # "event"
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
self._contents = None
super(Suggestion, self).__init__(*args, **kw)
@property
def contents(self):
"""
Lazy load contents_json
"""
if self._contents is None:
self._contents = json.loads(self.contents_json)
return self._contents
@contents.setter
def contents(self, contents):
self._contents = contents
self.contents_json = json.dumps(self._contents)
@property
def youtube_video(self):
if "youtube_videos" in self.contents:
return self.contents["youtube_videos"][0]
@classmethod
def render_media_key_name(cls, year, target_model, target_key, foreign_type, foreign_key):
"""
Keys aren't required for this model. This is only necessary if checking
for duplicate suggestions is desired.
"""
return 'media_{}_{}_{}_{}_{}'.format(year, target_model, target_key, foreign_type, foreign_key)
@classmethod
def render_webcast_key_name(cls, event_key, webcast_dict):
"""
Keys aren't required for this model. This is only necessary if checking
for duplicate suggestions is desired.
"""
return 'webcast_{}_{}_{}_{}'.format(
event_key,
webcast_dict.get('type', None),
webcast_dict.get('channel', None),
webcast_dict.get('file', None))
| synth3tk/the-blue-alliance | models/suggestion.py | Python | mit | 2,411 |
import argparse
import structlog
import logging
from pyramid.paster import get_app
from snovault.elasticsearch.create_mapping import run as run_create_mapping
from dcicutils.log_utils import set_logging
from dcicutils.deployment_utils import CreateMappingOnDeployManager
log = structlog.getLogger(__name__)
EPILOG = __doc__
# This order determines order that items will be mapped + added to the queue
# Can use item type (e.g. file_fastq) or class name (e.g. FileFastq)
ITEM_INDEX_ORDER = [
'Award',
'Lab',
'AccessKey',
'User',
'Ontology',
'OntologyTerm',
'StaticSection',
'Document',
'Protocol',
'FileFormat',
'ExperimentType',
'Vendor',
'Organism',
'Gene',
'GenomicRegion',
'BioFeature',
'Target',
'Construct',
'Enzyme',
'Antibody',
'FileReference',
'IndividualChicken',
'IndividualFly',
'IndividualHuman',
'IndividualMouse',
'IndividualPrimate',
'IndividualZebrafish',
'Image',
'Modification',
'Biosource',
'BiosampleCellCulture',
'Biosample',
'Workflow',
'WorkflowMapping',
'PublicationTracking',
'Software',
'AnalysisStep',
'Badge',
'SopMap',
'SummaryStatistic',
'SummaryStatisticHiC',
'TrackingItem',
'TreatmentAgent',
'TreatmentRnai',
'ImagingPath',
'MicroscopeSettingA1',
'MicroscopeSettingA2',
'MicroscopeSettingD1',
'MicroscopeSettingD2',
'MicroscopeConfiguration',
'HiglassViewConfig',
'QualityMetricAtacseq',
'QualityMetricBamqc',
'QualityMetricBamcheck',
'QualityMetricChipseq',
'QualityMetricDedupqcRepliseq',
'QualityMetricFastqc',
'QualityMetricFlag',
'QualityMetricPairsqc',
'QualityMetricMargi',
'QualityMetricRnaseq',
'QualityMetricRnaseqMadqc',
'QualityMetricWorkflowrun',
'QualityMetricQclist',
'QualityMetricMcool',
'ExperimentAtacseq',
'ExperimentCaptureC',
'ExperimentChiapet',
'ExperimentDamid',
'ExperimentHiC',
'ExperimentMic',
'ExperimentRepliseq',
'ExperimentSeq',
'ExperimentTsaseq',
'ExperimentSet',
'ExperimentSetReplicate',
'Publication',
'FileCalibration',
'FileFastq',
'FileMicroscopy',
'FileProcessed',
'FileSet',
'FileSetCalibration',
'FileSetMicroscopeQc',
'FileVistrack',
'DataReleaseUpdate',
'WorkflowRun',
'WorkflowRunAwsem',
'WorkflowRunSbg',
'Page',
]
def get_my_env(app):
"""
Gets the env name of the currently running environment
:param app: handle to Pyramid app
:return: current env
"""
# Return value is presumably one of the above-declared environments
return app.registry.settings.get('env.name')
def _run_create_mapping(app, args):
"""
Runs create_mapping with deploy options and report errors. Allows args passed from argparse in main to override
the default deployment configuration.
:param app: pyramid application handle
:param args: args from argparse
:return: None
"""
try:
deploy_cfg = CreateMappingOnDeployManager.get_deploy_config(env=get_my_env(app), args=args, log=log,
client='create_mapping_on_deploy')
if not deploy_cfg['SKIP']:
log.info('Calling run_create_mapping for env %s.' % deploy_cfg['ENV_NAME'])
run_create_mapping(app=app,
check_first=(not deploy_cfg['WIPE_ES']),
purge_queue=args.clear_queue, # this option does not vary, so no need to override
item_order=ITEM_INDEX_ORDER,
strict=deploy_cfg['STRICT'])
else:
log.info('NOT calling run_create_mapping for env %s.' % deploy_cfg['ENV_NAME'])
exit(0)
except Exception as e:
log.error("Exception encountered while gathering deployment information or running create_mapping")
log.error("%s: %s" % (e.__class__.__name__, e))
exit(1)
def main():
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Create Elasticsearch mapping on deployment", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--clear-queue', help="Specify to clear the SQS queue", action='store_true', default=False)
CreateMappingOnDeployManager.add_argparse_arguments(parser)
args = parser.parse_args()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
set_logging(in_prod=app.registry.settings.get('production'), log_name=__name__, level=logging.DEBUG)
# set_logging(app.registry.settings.get('elasticsearch.server'),
# app.registry.settings.get('production'),
# level=logging.DEBUG)
_run_create_mapping(app, args)
exit(0)
if __name__ == '__main__':
main()
| hms-dbmi/fourfront | src/encoded/commands/create_mapping_on_deploy.py | Python | mit | 5,218 |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
import datetime
from participantCollection import ParticipantCollection
# Edit Me!
participantFileNames = ['../stayclean-2014-november/participants.txt',
'../stayclean-2014-december/participants.txt',
'../stayclean-2015-january/participants.txt',
'../stayclean-2015-february/participants.txt',
'../stayclean-2015-march/participants.txt',
'../stayclean-2015-april/participants.txt',
'../stayclean-2015-may/participants.txt',
'../stayclean-2015-june/participants.txt',
'../stayclean-2015-july/participants.txt',
'../stayclean-2015-august/participants.txt',
'../stayclean-2015-september/participants.txt',
'../stayclean-2015-october/participants.txt',
'../stayclean-2015-november/participants.txt',
'../stayclean-2015-december/participants.txt',
'../stayclean-2016-january/participants.txt',
'../stayclean-2016-february/participants.txt',
'../stayclean-2016-march/participants.txt',
'../stayclean-2016-april/participants.txt',
'../stayclean-2016-may/participants.txt',
'../stayclean-2016-june/participants.txt',
'../stayclean-2016-july/participants.txt',
'../stayclean-2016-august/participants.txt',
'../stayclean-2016-september/participants.txt',
'../stayclean-2016-october/participants.txt',
'../stayclean-2016-november/participants.txt',
'../stayclean-2016-december/participants.txt',
'../stayclean-2017-january/participants.txt',
'../stayclean-2017-february/participants.txt',
'../stayclean-2017-march/participants.txt',
'../stayclean-2017-april/participants.txt',
'../stayclean-2017-may/participants.txt',
'../stayclean-2017-june/participants.txt',
'../stayclean-2017-july/participants.txt',
'../stayclean-2017-august/participants.txt',
'../stayclean-2017-september/participants.txt',
'../stayclean-2017-october/participants.txt',
'../stayclean-2017-november/participants.txt',
'../stayclean-2017-december/participants.txt',
'../stayclean-2018-january/participants.txt',
'../stayclean-2018-february/participants.txt',
'../stayclean-2018-march/participants.txt',
'../stayclean-2018-april/participants.txt',
'../stayclean-2018-may/participants.txt',
'../stayclean-2018-june/participants.txt',
'../stayclean-2018-july/participants.txt',
'../stayclean-2018-august/participants.txt',
'../stayclean-2018-september/participants.txt',
'../stayclean-2018-october/participants.txt',
'../stayclean-2018-november/participants.txt',
'../stayclean-2018-december/participants.txt',
'../stayclean-2019-january/participants.txt',
'../stayclean-2019-february/participants.txt',
'../stayclean-2019-march/participants.txt',
'../stayclean-2019-april/participants.txt',
'../stayclean-2019-may/participants.txt',
'../stayclean-2019-june/participants.txt',
'../stayclean-2019-july/participants.txt',
'../stayclean-2019-august/participants.txt',
'../stayclean-2019-september/participants.txt',
'./participants.txt']
sortedRelapseDates = []
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
sortedRelapseDates = sortedRelapseDates + participants.allRelapseDates()
sortedRelapseDates.sort()
earliestReportDate = sortedRelapseDates[0]
latestReportDate = sortedRelapseDates[-1]
reportDates = []
numberOfRelapsesPerDate = []
reportDatesAndNumberOfRelapses = {}
dayOfWeekIndexesAndNumberOfInstances = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
reportDate = earliestReportDate
while reportDate <= latestReportDate:
reportDatesAndNumberOfRelapses[reportDate] = 0
# dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] = dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] + 1
dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] += 1
reportDate += datetime.timedelta(days=1)
for relapseDate in sortedRelapseDates:
# reportDatesAndNumberOfRelapses[relapseDate] = reportDatesAndNumberOfRelapses[relapseDate] + 1
reportDatesAndNumberOfRelapses[relapseDate] += 1
dayOfWeekIndexesAndTotalNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
# print participants.relapseDayOfWeekIndexesAndParticipants()
for index, parts in participants.relapseDayOfWeekIndexesAndParticipants().iteritems():
# dayOfWeekIndexesAndTotalNumberOfRelapses[index] = dayOfWeekIndexesAndTotalNumberOfRelapses[index] + len(parts)
dayOfWeekIndexesAndTotalNumberOfRelapses[index] += len(parts)
dayOfWeekIndexesAndAverageNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for index, instances in dayOfWeekIndexesAndNumberOfInstances.iteritems():
# dayOfWeekIndexesAndAverageNumberOfRelapses[index] = int(round(float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)))
dayOfWeekIndexesAndAverageNumberOfRelapses[index] = float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)
spreadsheetTitle = "StayClean monthly challenge relapse data"
# spreadsheetTitle = "Test spreadsheet"
json_key = json.load(open('../google-oauth-credentials.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
spreadSheet = None
try:
spreadSheet = gc.open(spreadsheetTitle)
except gspread.exceptions.SpreadsheetNotFound:
print "No spreadsheet with title " + spreadsheetTitle
exit(1)
workSheet = spreadSheet.get_worksheet(0)
columnACells = workSheet.range("A2:A" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnBCells = workSheet.range("B2:B" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnCCells = workSheet.range("C2:C8")
columnDCells = workSheet.range("D2:D8")
reportDate = earliestReportDate
rowIndex = 0
while reportDate <= latestReportDate:
columnACells[rowIndex].value = str(reportDate)
columnBCells[rowIndex].value = str(reportDatesAndNumberOfRelapses[reportDate])
rowIndex += 1
reportDate += datetime.timedelta(days=1)
for weekdayIndex in range(0, 7):
weekdayName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'][weekdayIndex]
# spreadsheetClient.UpdateCell(weekdayIndex + 2,3,weekdayName,spreadsheetId)
# spreadsheetClient.UpdateCell(weekdayIndex + 2,4,str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex]),spreadsheetId)
columnCCells[weekdayIndex].value = weekdayName
columnDCells[weekdayIndex].value = str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex])
allCells = columnACells + columnBCells + columnCCells + columnDCells
workSheet.update_cells(allCells)
exit(0)
| foobarbazblarg/stayclean | stayclean-2019-october/update-google-chart.py | Python | mit | 8,223 |
"""
WSGI config for stormtrooper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stormtrooper.settings")
application = get_wsgi_application()
| CompileInc/stormtrooper | stormtrooper/stormtrooper/wsgi.py | Python | mit | 401 |
""" Unit tests for ``wheezy.templates.engine.Engine``.
"""
import unittest
class EngineTestCase(unittest.TestCase):
""" Test the ``Engine``.
"""
def setUp(self):
from wheezy.template.engine import Engine
from wheezy.template.loader import DictLoader
self.engine = Engine(
loader=DictLoader(templates={}),
extensions=[])
def test_template_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.get_template('x'))
def test_import_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.import_name('x'))
def test_remove_unknown_name(self):
""" Invalidate name that is not known to engine.
"""
self.engine.remove('x')
def test_remove_name(self):
""" Invalidate name that is known to engine.
"""
self.engine.templates['x'] = 'x'
self.engine.renders['x'] = 'x'
self.engine.modules['x'] = 'x'
self.engine.remove('x')
| ezotrank/wheezy.template | src/wheezy/template/tests/test_engine.py | Python | mit | 1,081 |
from collections import defaultdict
import re
import json
import os
import Pubnub as PB
PUB_KEY = os.environ["PUB_KEY"]
SUB_KEY = os.environ["SUB_KEY"]
SEC_KEY = os.environ["SEC_KEY"]
CHANNEL_NAME = os.environ["CHANNEL_NAME"]
def frequency_count(text):
"determine count of each letter"
count = defaultdict(int)
for char in text:
count[char] += 1
return count
def callback(message, channel):
"print message, channel, and frequency count to STDOUT"
print("python recevied:" + str({
"channel": channel,
"message": message,
"frequency count":dict(frequency_count(message)),
}))
def error(message):
print({
"error" : message
})
if __name__ == "__main__":
PB.Pubnub(
publish_key = PUB_KEY,
subscribe_key = SUB_KEY,
secret_key = SEC_KEY,
cipher_key = '',
ssl_on = False,
).subscribe(
channels=CHANNEL_NAME,
callback=callback,
error=error
)
| gregory-nisbet/pubnub-api-example | subscriber.py | Python | mit | 994 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import random
import time
from pyquery import PyQuery as pq
from mongodb import db, conn
from requests.exceptions import ConnectionError
from chem_log import log
# urls = [
# 'http://www.sigmaaldrich.com/china-mainland/zh/analytical-chromatography/analytical-chromatography-catalog.html',
# 'http://www.sigmaaldrich.com/china-mainland/chemistry-product.html',
# 'http://www.sigmaaldrich.com/china-mainland/zh/materials-science/material-science-products.html?TablePage=9540636'
# ]
base_url = 'http://www.sigmaaldrich.com'
chromatography_db_collection = {
0: db.sigma_chromatography_urls_0,
1: db.sigma_chromatography_urls_1,
2: db.sigma_chromatography_urls_2,
3: db.sigma_chromatography_urls_3,
4: db.sigma_chromatography_urls_4,
5: db.sigma_chromatography_urls_5,
6: db.sigma_chromatography_urls_6,
7: db.sigma_chromatography_urls_7,
8: db.sigma_chromatography_urls_8,
9: db.sigma_chromatography_urls_9
}
def get_chromatography_base_urls():
"""
分析/色谱, 基本url
:return:
"""
url = 'http://www.sigmaaldrich.com/china-mainland/zh/analytical-chromatography/analytical-chromatography-catalog.html'
res = get_res(url)
if res:
p = pq(res.content)
section = p('div.text.parbase.section').eq(0)
tables = pq(section).find('table.normal')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td a').attr('href')
d = {'url': base_url+href}
db.sigma_chromatography_urls.update(d, d, upsert=True)
def get_chromatography_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(4, 10):
if i == 0:
base_urls = db.sigma_chromatography_urls.find(timeout=False)
else:
base_urls = chromatography_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
chromatography_db_collection[i].update({'url': item}, {'url': item}, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
chromatography_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def extract_li(p):
"""
提取出非具体产品列表页的产品分类url
:param p: pyquery 对象
:return:
"""
url_list = []
uls = p('div.opcContainer table#opcmaintable table ul.opcsectionlist')
for ul in uls:
lis = pq(ul).find('li')
for li in lis:
url_list.append(base_url + pq(li)('a').attr('href'))
return url_list
def chromatography_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
d = {'url': base_url+href}
db.sigma_chromatography_product_urls.update(d, d, upsert=True)
def get_product_detail():
"""
抓取产品详情
:return: None
"""
# urls = db.sigma_chromatography_product_urls.find(timeout=False)
urls = [{'url': 'http://www.sigmaaldrich.com/catalog/product/aldrich/452238?lang=zh®ion=CN'}]
print 2323232
for url in urls:
res = get_res(url['url'])
print 124
if res:
p = pq(res.text)
print p
pro_list = get_pro_list(p) # 获取产品具体规格
def get_pro_list(p):
url = 'http://www.sigmaaldrich.com/catalog/PricingAvailability.do?productNumber=452238&brandKey=ALDRICH&divId=pricingContainerMessage'
container_message = p('div#pricingContainer div#pricingContainerMessage div.product-discontinued li.longMessageContainer').text()
pro_trs = list(p('div#pricingContainer div#pricingContainerMessage table').find('tr'))[1:]
print container_message
print pro_trs
chemistry_db_collection = {
0: db.sigma_chemistry_urls_0,
1: db.sigma_chemistry_urls_1,
2: db.sigma_chemistry_urls_2,
3: db.sigma_chemistry_urls_3,
4: db.sigma_chemistry_urls_4,
5: db.sigma_chemistry_urls_5,
6: db.sigma_chemistry_urls_6,
7: db.sigma_chemistry_urls_7,
8: db.sigma_chemistry_urls_8,
9: db.sigma_chemistry_urls_9
}
def get_chemistry_base_urls():
"""
化学
一级一级获取url,直到最终的产品页面,获取到每个产品详情页的url
:return:
"""
url = 'http://www.sigmaaldrich.com/china-mainland/chemistry-product.html'
res = get_res(url)
if res:
p = pq(res.content)
# print res.content
section = p('#duoamidcol div.sides div.parsys.mainpar div.parbase.section').eq(1)
trs = pq(section)('table').find('tr')
for t in trs:
td_0 = pq(t)('td').eq(0)
td_1 = pq(t)('td').eq(2)
td_2 = pq(t)('td').eq(4)
for td in [td_0, td_1, td_2]:
lis = pq(td)('ul').find('li')
for li in lis:
href = pq(li)('a').attr('href')
if href:
d = {'url': base_url + href}
db.sigma_chemistry_urls.update(d, d, upsert=True)
more = pq(td)('div.one a').attr('href')
if more:
d_1 = {'url': base_url + more}
db.sigma_chemistry_urls.update(d_1, d_1, upsert=True)
def get_chemistry_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(5, 10):
if i == 0:
base_urls = db.sigma_chemistry_urls.find(timeout=False)
else:
base_urls = chemistry_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
d = {'url': item}
chemistry_db_collection[i].update(d, d, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
chemistry_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def chemistry_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
db.sigma_chemistry_product_urls.update({'url': base_url+href}, {'url': base_url+href}, upsert=True)
materials_db_collection = {
0: db.sigma_materials_urls_0,
1: db.sigma_materials_urls_1,
2: db.sigma_materials_urls_2,
3: db.sigma_materials_urls_3,
4: db.sigma_materials_urls_4,
5: db.sigma_materials_urls_5,
6: db.sigma_materials_urls_6,
7: db.sigma_materials_urls_7,
8: db.sigma_materials_urls_8,
9: db.sigma_materials_urls_9
}
def get_materials_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(6, 10):
if i == 0:
base_urls = [{
'url': 'http://www.sigmaaldrich.com/china-mainland/zh/materials-science/material-science-products.html?TablePage=9540636'
}]
else:
base_urls = materials_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
materials_db_collection[i].update({'url': item}, {'url': item}, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
materials_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def materials_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
d = {'url': base_url+href}
db.sigma_materials_product_urls.update(d, d, upsert=True)
def get_res(url):
"""
使用requests获取结果
:param url:
:return:
"""
try:
requests.adapters.DEFAULT_RETRIES = 5
res = requests.get(url)
time.sleep(random.randint(0, 3))
if res.status_code == 200:
return res
return None
except Exception, e:
time.sleep(20)
log.debug(str(e) + ' error')
return None
| mutoulbj/chem_spider | chem_spider/sigma_urls.py | Python | mit | 10,348 |
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
# pylint: disable=line-too-long
from azure.cli.command_modules.resource._validators import (validate_resource_type,
validate_parent,
_resolve_api_version as resolve_api_version)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_resolve_api_provider_backup(self):
""" Verifies provider is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/test')
self.assertEqual(resolve_api_version(self._get_mock_client(), resource_type), "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
""" Verifies provider (with parent) is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/bar')
parent = validate_parent('foo/testfoo123')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type, parent),
"1999-01-01"
)
def test_resolve_api_all_previews(self):
""" Verifies most recent preview version returned only if there are no non-preview versions. """
resource_type = validate_resource_type('Mock/preview')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type),
"2005-01-01-preview"
)
def _get_mock_client(self):
client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo/bar', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview'])
]
client.providers.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): #pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main()
| BurtBiel/azure-cli | src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_api_check.py | Python | mit | 2,871 |
from grslra import testdata
from grslra.grslra_batch import grslra_batch, slra_by_factorization
from grslra.structures import Hankel
from grslra.scaling import Scaling
import numpy as np
import time
# The goal of this experiment is to identify an LTI system from a noisy outlier-contaminated and subsampled observation of its impulse response
PROFILE = 0
if PROFILE:
import cProfile
N = 80
m = 20
k = 5
sigma=0.05
outlier_rate = 0.05
outlier_amplitude = 1
rate_Omega=0.5
N_f = 20
scaling = Scaling(centering=True)
p = 0.1
x, x_0, U, Y = testdata.testdata_lti_outliers(N + N_f, m, k, rho=outlier_rate, amplitude=outlier_amplitude, sigma=sigma)
# determine scaling factor
scaling.scale_reference(x)
mu = (1-p) * (3 * sigma / scaling.factor) ** 2
# draw sampling set
card_Omega = np.int(np.round(rate_Omega * N))
Omega = np.random.choice(N, card_Omega, replace=False)
# create binary support vectors for Omega and Omega_not
entries = np.zeros((N + N_f, ))
entries[Omega] = 1
entries_not = np.ones_like(entries) - entries
# set unobserved entries in x to zero
x *= entries
x_Omega = x[Omega]
n = N + N_f - m + 1
hankel = Hankel(m, n)
grslra_params = {"PRINT": None, "VERBOSE": 1}
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_grslra, U, Y = grslra_batch(x_Omega, hankel, k, p, mu, params=grslra_params, Omega=Omega, x_0=x_0, scaling=scaling)
t_grslra = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("grslra.bin")
print "error GRSLRA: ", np.linalg.norm(l_grslra - x_0) / np.linalg.norm(x_0)
print "time GRSLRA: ", t_grslra
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_slrabyF = slra_by_factorization(x_Omega, m, k, PRINT=0, x_0=x_0, Omega=Omega, N=N + N_f)
t_slrabyf = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("slrabyf.bin")
print "error SLRA by F: ", np.linalg.norm(l_slrabyF - x_0) / np.linalg.norm(x_0)
print "time SLRA by F: ", t_slrabyf
np.savez('result_sysid_lti.npz', x_Omega=x_Omega, Omega=Omega, x_0=x_0, t_grslra=t_grslra, l_grslra=l_grslra, t_slrabyf=t_slrabyf, l_slrabyF=l_slrabyF) | clemenshage/grslra | experiments/6_grslra/system_identification_lti/system_identification.py | Python | mit | 2,175 |
"""segy.py - read and write SEG-Y files
From command line:
python segy.py <path-to-segy-file>
"""
from collections import OrderedDict
from pprint import pprint
import numpy as np
from sacker import Sacker
# SEG-Y spec: http://www.tritonimaginginc.com/site/content/public/downloads/FileFormatInfo/seg_y_rev1.pdf
SAMPLE_FORMATS = {
'f': 5, # 4-byte, IEEE floating-point
'i': 2, # 4-byte, two's complement integer
'h': 3, # 2-byte, two's complement integer
'b': 8, # 1-byte, two's complement integer
}
SEGY_HEADER = Sacker('>', '''
I job_id # Job identification number
i line_num # Line number
i reel_num # Reel number
h n_traces_per_ensemble # Number of data traces per ensemble
h n_auxtraces_per_ensemble # Number of auxilary traces per ensemble
h sample_interval # Sample interval (us)
h orig_sample_interval # Sample interval of original field recording
h n_trace_samples # Number of samples per data trace
h orig_n_trace_samples # Number of samples per data trace for original
# field recording
h sample_format # Data sample format code
h ensemble_fold # Expected number of data traces per
# trace ensemble (e.g. the CMP fold)
h trace_sorting_code
h vertical_sum_code
h sweep_freq_at_start # (Hz)
h sweep_freq_at_end # (Hz)
h sweep_length # (ms)
h sweep_type_code
h sweep_channel_trace_number
h start_taper_length # (ms)
h end_taper_length # (ms)
h taper_type
h correlated_traces
h binary_gain_recovered
h amplitude_recovery_method
h measurement_system # (1: meters, 2: feet)
h impulse_signal_polarity
h vibratory_polarity_code
240x
h segy_rev
h fixed_length_trace_flag
h n_extended_headers
94x''', length = 400)
TRACE_HEADER = Sacker('>', '''
i trace_seq_in_line # Trace sequence number within line - Numbers
# continue to increase if the same line
# continues across multiple SEG Y files
i trace_seq_in_file # Trace sequence number within SEG Y file.
# Each file starts with trace sequence one.
i orig_field_record_num
i trace_num_in_orig_record
i energy_source_point_number
i ensemble_num # i.e. CDP, CMP, CRP, etc
i trace_num_in_ensemble # Each ensemble starts with trace 1
h trace_id_code
h n_of_vertically_summed_traces # yielding this trace
h n_of_horizontally_summed_traces # yielding this trace
h data_use # (1 - production, 2 - test)
i source_reciever_dist
i reciever_elevation
i surface_elevation_at_source
i source_depth_below_surface # (a positive number)
i datum_elevation_at_reciever
i datum_elevation_at_source
i water_depth_at_source
i water_depth_at_reciever
h elevations_scaler # (1, 10, 100, 1000, 10000)
h coordinates_scaler # (1, 10, 100, 1000, 10000)
i source_coord_x
i source_coord_y
i reciever_coord_x
i reciever_coord_y
h coordinate_units # (1: length, 2: secs of arc, 3: decimal degrees,
# 4: degrees, minutes, seconds)
h weathering_velocity # (m/s or ft/s)
h subweathering_velocity # (m/s or ft/s)
h uphole_time_at_source # (ms)
h uphole_time_at_reciever # (ms)
h static_correction_at_source # (ms)
h static_correction_at_reciever # (ms)
h total_static # (ms)
h lag_time_A # (ms)
h lag_time_B # (ms)
h delay_recording_time # (ms)
h mute_time_start # (ms)
h mute_time_end # (ms)
h n_samples # Number of samples in this trace
h sample_interval # (us)
h field_instruments_gain_type # (1: fixed, 2: binary, 3: float)
h instrument_gain_const # (dB)
h instrument_early_gain # (dB)
h correlated # (1: no, 2: yes)
h sweep_freq_at_start # (Hz)
h sweep_freq_at_end # (Hz)
h sweep_length # (ms)
h sweep_type_code
h start_taper_length # (ms)
h end_taper_length # (ms)
h taper_type
h alias_filter_freq # (Hz)
h alias_filter_slope # (dB/octave)
h notch_filter_freq # (Hz)
h notch_filter_slope # (dB/octave)
h low_cut_filter_freq # (Hz)
h high_cut_filter_freq # (Hz)
h low_cut_filter_slope # (dB/octave)
h high_cut_filter_slope # (dB/octave)
h year
h day_of_year
h hour
h minute
h second
h time_basis_code # (1: local, 2: GMT, 3: Other, 4: UTC)
h trace_weighting_factor
h geophone_group_num_of_roll_switch
h geophone_group_num_of_first_trace
h geophone_group_num_of_last_trace
h gap_size # (total number of groups dropped)
h over_travel # associated with taper (1: down, 2: up)
60x''', length = 240)
TEXT_LEN = 3200
def decode_text(s):
text = s.decode('ibm037')
return '\n'.join(text[i:i+80] for i in range(0, len(text), 80))
def encode_text(s):
t = ''.join(line.ljust(80,' ')
for line in s.split('\n')).ljust(TEXT_LEN,' ')
return t.encode('ibm037')
def write_SEGY(outfile, file_header, text, traces):
with open(outfile, 'wb') as out:
out.write(encode_text(text))
out.write(SEGY_HEADER.wrap(file_header))
for header, data in traces:
out.write(TRACE_HEADER.wrap(header))
out.write(np.getbuffer(data.byteswap()))
def read_SEGY(infile):
file_data = memoryview(open(infile, 'rb').read())
print decode_text(file_data[:TEXT_LEN].tobytes())
data = file_data[TEXT_LEN:]
header_len, header = SEGY_HEADER.unwrap(data, data_factory = OrderedDict)
pprint([(k, v) for k, v in header.items() if v != 0])
i = 0
data = data[header_len:]
while data:
trace_len, trace = TRACE_HEADER.unwrap(data, data_factory = OrderedDict)
print 'TRACE', i, '[%d]' % trace['trace_num_in_orig_record'],
pprint([(k, v) for k, v in trace.items() if v != 0])
print np.frombuffer(data[trace_len:trace_len + trace['n_samples']*2].tobytes(), np.int16).byteswap()
data = data[trace_len + trace['n_samples'] * 2:]
i += 1
if i > 10:
break
def main(infile):
read_SEGY(infile)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
sys.exit('Error: wrong arguments\n' + __doc__.rstrip())
main(*sys.argv[1:])
| shamrin/pyxtf | segy.py | Python | mit | 6,783 |
import pytoolkit as tk
module = tk.applications.darknet53
def test_model():
model = module.create(input_shape=(256, 256, 3), weights=None)
assert tuple(module.get_1_over_1(model).shape[1:3]) == (256, 256)
assert tuple(module.get_1_over_2(model).shape[1:3]) == (128, 128)
assert tuple(module.get_1_over_4(model).shape[1:3]) == (64, 64)
assert tuple(module.get_1_over_8(model).shape[1:3]) == (32, 32)
assert tuple(module.get_1_over_16(model).shape[1:3]) == (16, 16)
assert tuple(module.get_1_over_32(model).shape[1:3]) == (8, 8)
def test_save_load(tmpdir):
model = module.create(input_shape=(256, 256, 3), weights=None)
tk.models.save(model, str(tmpdir / "model.h5"))
tk.models.load(str(tmpdir / "model.h5"))
| ak110/pytoolkit | pytoolkit/applications/darknet53_test.py | Python | mit | 752 |
from oauth2_provider.settings import oauth2_settings
from oauthlib.common import generate_token
from django.http import JsonResponse
from oauth2_provider.models import AccessToken, Application, RefreshToken
from django.utils.timezone import now, timedelta
def get_token_json(access_token):
"""
Takes an AccessToken instance as an argument
and returns a JsonResponse instance from that
AccessToken
"""
token = {
'access_token': access_token.token,
'expires_in': oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
'token_type': 'Bearer',
'refresh_token': access_token.refresh_token.token,
'scope': access_token.scope
}
return JsonResponse(token)
def get_access_token(user):
"""
Takes a user instance and return an access_token as a JsonResponse
instance.
"""
# our oauth2 app
app = Application.objects.get(name="omics")
# We delete the old access_token and refresh_token
try:
old_access_token = AccessToken.objects.get(
user=user, application=app)
old_refresh_token = RefreshToken.objects.get(
user=user, access_token=old_access_token
)
except:
pass
else:
old_access_token.delete()
old_refresh_token.delete()
# we generate an access token
token = generate_token()
# we generate a refresh token
refresh_token = generate_token()
expires = now() + timedelta(seconds=oauth2_settings.
ACCESS_TOKEN_EXPIRE_SECONDS)
scope = "read write"
# we create the access token
access_token = AccessToken.objects.\
create(user=user,
application=app,
expires=expires,
token=token,
scope=scope)
# we create the refresh token
RefreshToken.objects.\
create(user=user,
application=app,
token=refresh_token,
access_token=access_token)
# we call get_token_json and returns the access token as json
return get_token_json(access_token)
| light940929/omics_api | bio/tools.py | Python | mit | 2,096 |
import Queue
import atexit
import logging
import threading
import traceback
class WorkerPool(object):
""" Pool of worker threads; grows as necessary. """
_lock = threading.Lock()
_pool = None # Singleton.
def __init__(self):
self._idle = [] # Queues of idle workers.
self._workers = {} # Maps queue to worker.
atexit.register(self.cleanup)
@staticmethod
def get_instance():
""" Return singleton instance. """
with WorkerPool._lock:
if WorkerPool._pool is None:
WorkerPool._pool = WorkerPool()
return WorkerPool._pool
@staticmethod
def cleanup():
""" Cleanup resources (worker threads). """
WorkerPool.get_instance()._cleanup()
def _cleanup(self):
""" Cleanup resources (worker threads). """
with self._lock:
for queue in self._workers:
queue.put((None, None, None, None))
self._workers[queue].join(1)
if self._workers[queue].is_alive():
logging.debug('WorkerPool: worker join timed-out.')
try:
self._idle.remove(queue)
except ValueError:
pass # Never released due to some other issue...
self._idle = []
self._workers = {}
@staticmethod
def get(one_shot=False):
"""
Get a worker queue from the pool. Work requests should be of the form:
``(callable, *args, **kwargs, reply_queue)``
Work replies are of the form:
``(queue, retval, exc, traceback)``
one_shot: bool
If True, the worker will self-release after processing one request.
"""
return WorkerPool.get_instance()._get(one_shot)
def _get(self, one_shot):
""" Get a worker queue from the pool. """
with self._lock:
try:
return self._idle.pop()
except IndexError:
queue = Queue.Queue()
worker = threading.Thread(target=self._service_loop,
args=(queue, one_shot))
worker.daemon = True
worker.start()
self._workers[queue] = worker
return queue
@staticmethod
def release(queue):
"""
Release a worker queue back to the pool.
queue: Queue
Worker queue previously obtained from :meth:`get`.
"""
return WorkerPool.get_instance()._release(queue)
def _release(self, queue):
""" Release a worker queue back to the pool. """
with self._lock:
self._idle.append(queue)
def _service_loop(self, request_q, one_shot):
""" Get (callable, args, kwargs) from request_q and queue result. """
while True:
callable, args, kwargs, reply_q = request_q.get()
if callable is None:
request_q.task_done()
return # Shutdown.
exc = None
trace = None
retval = None
try:
retval = callable(*args, **kwargs)
except Exception as exc:
# Sometimes we have issues at shutdown.
try:
trace = traceback.format_exc()
except Exception: # pragma no cover
return
request_q.task_done()
if reply_q is not None:
reply_q.put((request_q, retval, exc, trace))
if one_shot:
self._release(request_q)
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/wrkpool.py | Python | mit | 3,630 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a StacyCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a StacyCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| stacycoin/stacycoin | contrib/bitrpc/bitrpc.py | Python | mit | 7,840 |
# -*- coding: utf-8 -*-
# 2005/12/06
# Version 0.2.4
# pathutils.py
# Functions useful for working with files and paths.
# http://www.voidspace.org.uk/python/recipebook.shtml#utils
# Copyright Michael Foord 2004
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# For information about bugfixes, updates and support, please join the Pythonutils mailing list.
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# E-mail [email protected]
from __future__ import generators
"""
This module contains convenience functions for working with files and paths.
"""
__version__ = '0.2.4'
import os
import sys
import time
__all__ = (
'readlines',
'writelines',
'readbinary',
'writebinary',
'readfile',
'writefile',
'tslash',
'relpath',
'splitall',
'walkfiles',
'walkdirs',
'walkemptydirs',
'formatbytes',
'fullcopy',
'import_path',
'onerror',
'get_main_dir',
'main_is_frozen',
'Lock',
'LockError',
'LockFile',
'__version__',
)
######################################
# Functions to read and write files in text and binary mode.
def readlines(filename):
"""Passed a filename, it reads it, and returns a list of lines. (Read in text mode)"""
filehandle = open(filename, 'r')
outfile = filehandle.readlines()
filehandle.close()
return outfile
def writelines(filename, infile, newline=False):
"""
Given a filename and a list of lines it writes the file. (In text mode)
If ``newline`` is ``True`` (default is ``False``) it adds a newline to each
line.
"""
filehandle = open(filename, 'w')
if newline:
infile = [line + '\n' for line in infile]
filehandle.writelines(infile)
filehandle.close()
def readbinary(filename):
"""Given a filename, read a file in binary mode. It returns a single string."""
filehandle = open(filename, 'rb')
thisfile = filehandle.read()
filehandle.close()
return thisfile
def writebinary(filename, infile):
"""Given a filename and a string, write the file in binary mode. """
filehandle = open(filename, 'wb')
filehandle.write(infile)
filehandle.close()
def readfile(filename):
"""Given a filename, read a file in text mode. It returns a single string."""
filehandle = open(filename, 'r')
outfile = filehandle.read()
filehandle.close()
return outfile
def writefile(filename, infile):
"""Given a filename and a string, write the file in text mode."""
filehandle = open(filename, 'w')
filehandle.write(infile)
filehandle.close()
####################################################################
# Some functions for dealing with paths
def tslash(apath):
"""
Add a trailing slash (``/``) to a path if it lacks one.
It doesn't use ``os.sep`` because you end up in trouble on windoze, when you
want separators for URLs.
"""
if apath and apath != '.' and not apath.endswith('/') and not apath.endswith('\\'):
return apath + '/'
else:
return apath
def relpath(origin, dest):
"""
Return the relative path between origin and dest.
If it's not possible return dest.
If they are identical return ``os.curdir``
Adapted from `path.py <http://www.jorendorff.com/articles/python/path/>`_ by Jason Orendorff.
"""
origin = os.path.abspath(origin).replace('\\', '/')
dest = os.path.abspath(dest).replace('\\', '/')
#
orig_list = splitall(os.path.normcase(origin))
# Don't normcase dest! We want to preserve the case.
dest_list = splitall(dest)
#
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
#
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
#
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return os.curdir
else:
return os.path.join(*segments).replace('\\', '/')
def splitall(loc):
"""
Return a list of the path components in loc. (Used by relpath_).
The first item in the list will be either ``os.curdir``, ``os.pardir``, empty,
or the root directory of loc (for example, ``/`` or ``C:\\).
The other items in the list will be strings.
Adapted from *path.py* by Jason Orendorff.
"""
parts = []
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
#######################################################################
# a pre 2.3 walkfiles function - adapted from the path module by Jason Orendorff
join = os.path.join
isdir = os.path.isdir
isfile = os.path.isfile
def walkfiles(thisdir):
"""
walkfiles(D) -> iterator over files in D, recursively. Yields full file paths.
Adapted from path.py by Jason Orendorff.
"""
for child in os.listdir(thisdir):
thischild = join(thisdir, child)
if isfile(thischild):
yield thischild
elif isdir(thischild):
for f in walkfiles(thischild):
yield f
def walkdirs(thisdir):
"""
Walk through all the subdirectories in a tree. Recursively yields directory
names (full paths).
"""
for child in os.listdir(thisdir):
thischild = join(thisdir, child)
if isfile(thischild):
continue
elif isdir(thischild):
for f in walkdirs(thischild):
yield f
yield thischild
def walkemptydirs(thisdir):
"""
Recursively yield names of *empty* directories.
These are the only paths omitted when using ``walkfiles``.
"""
if not os.listdir(thisdir):
# if the directory is empty.. then yield it
yield thisdir
for child in os.listdir(thisdir):
thischild = join(thisdir, child)
if isdir(thischild):
for emptydir in walkemptydirs(thischild):
yield emptydir
###############################################################
# formatbytes takes a filesize (as returned by os.getsize() )
# and formats it for display in one of two ways !!
def formatbytes(sizeint, configdict=None, **configs):
"""
Given a file size as an integer, return a nicely formatted string that
represents the size. Has various options to control it's output.
You can pass in a dictionary of arguments or keyword arguments. Keyword
arguments override the dictionary and there are sensible defaults for options
you don't set.
Options and defaults are as follows :
* ``forcekb = False`` - If set this forces the output to be in terms
of kilobytes and bytes only.
* ``largestonly = True`` - If set, instead of outputting
``1 Mbytes, 307 Kbytes, 478 bytes`` it outputs using only the largest
denominator - e.g. ``1.3 Mbytes`` or ``17.2 Kbytes``
* ``kiloname = 'Kbytes'`` - The string to use for kilobytes
* ``meganame = 'Mbytes'`` - The string to use for Megabytes
* ``bytename = 'bytes'`` - The string to use for bytes
* ``nospace = True`` - If set it outputs ``1Mbytes, 307Kbytes``,
notice there is no space.
Example outputs : ::
19Mbytes, 75Kbytes, 255bytes
2Kbytes, 0bytes
23.8Mbytes
.. note::
It currently uses the plural form even for singular.
"""
defaultconfigs = { 'forcekb' : False,
'largestonly' : True,
'kiloname' : 'Kbytes',
'meganame' : 'Mbytes',
'bytename' : 'bytes',
'nospace' : True}
if configdict is None:
configdict = {}
for entry in configs:
# keyword parameters override the dictionary passed in
configdict[entry] = configs[entry]
#
for keyword in defaultconfigs:
if not configdict.has_key(keyword):
configdict[keyword] = defaultconfigs[keyword]
#
if configdict['nospace']:
space = ''
else:
space = ' '
#
mb, kb, rb = bytedivider(sizeint)
if configdict['largestonly']:
if mb and not configdict['forcekb']:
return stringround(mb, kb)+ space + configdict['meganame']
elif kb or configdict['forcekb']:
if mb and configdict['forcekb']:
kb += 1024*mb
return stringround(kb, rb) + space+ configdict['kiloname']
else:
return str(rb) + space + configdict['bytename']
else:
outstr = ''
if mb and not configdict['forcekb']:
outstr = str(mb) + space + configdict['meganame'] +', '
if kb or configdict['forcekb'] or mb:
if configdict['forcekb']:
kb += 1024*mb
outstr += str(kb) + space + configdict['kiloname'] +', '
return outstr + str(rb) + space + configdict['bytename']
def stringround(main, rest):
"""
Given a file size in either (mb, kb) or (kb, bytes) - round it
appropriately.
"""
# divide an int by a float... get a float
value = main + rest/1024.0
return str(round(value, 1))
def bytedivider(nbytes):
"""
Given an integer (probably a long integer returned by os.getsize() )
it returns a tuple of (megabytes, kilobytes, bytes).
This can be more easily converted into a formatted string to display the
size of the file.
"""
mb, remainder = divmod(nbytes, 1048576)
kb, rb = divmod(remainder, 1024)
return (mb, kb, rb)
########################################
def fullcopy(src, dst):
"""
Copy file from src to dst.
If the dst directory doesn't exist, we will attempt to create it using makedirs.
"""
import shutil
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
shutil.copy(src, dst)
#######################################
def import_path(fullpath, strict=True):
"""
Import a file from the full path. Allows you to import from anywhere,
something ``__import__`` does not do.
If strict is ``True`` (the default), raise an ``ImportError`` if the module
is found in the "wrong" directory.
Taken from firedrop2_ by `Hans Nowak`_
.. _firedrop2: http://www.voidspace.org.uk/python/firedrop2/
.. _Hans Nowak: http://zephyrfalcon.org
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.insert(0, path)
try:
module = __import__(filename)
except ImportError:
del sys.path[0]
raise
del sys.path[0]
#
if strict:
path = os.path.split(module.__file__)[0]
# FIXME: doesn't *startswith* allow room for errors ?
if not fullpath.startswith(path):
raise ImportError("Module '%s' found, but not in '%s'" % (
filename, fullpath))
#
return module
##############################################################################
# These functions get us our directory name
# Even if py2exe or another freeze tool has been used
def main_is_frozen():
"""Return ``True`` if we're running from a frozen program."""
import imp
return (
# new py2exe
hasattr(sys, "frozen") or
# tools/freeze
imp.is_frozen("__main__"))
def get_main_dir():
"""Return the script directory - whether we're frozen or not."""
if main_is_frozen():
return os.path.abspath(os.path.dirname(sys.executable))
return os.path.abspath(os.path.dirname(sys.argv[0]))
##############################
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
##########################################################
# A set of object for providing simple, cross-platform file locking
class LockError(IOError):
"""The generic error for locking - it is a subclass of ``IOError``."""
class Lock(object):
"""A simple file lock, compatible with windows and Unixes."""
def __init__(self, filename, timeout=5, step=0.1):
"""
Create a ``Lock`` object on file ``filename``
``timeout`` is the time in seconds to wait before timing out, when
attempting to acquire the lock.
``step`` is the number of seconds to wait in between each attempt to
acquire the lock.
"""
self.timeout = timeout
self.step = step
self.filename = filename
self.locked = False
def lock(self, force=True):
"""
Lock the file for access by creating a directory of the same name (plus
a trailing underscore).
The file is only locked if you use this class to acquire the lock
before accessing.
If ``force`` is ``True`` (the default), then on timeout we forcibly
acquire the lock.
If ``force`` is ``False``, then on timeout a ``LockError`` is raised.
"""
if self.locked:
raise LockError('%s is already locked' % self.filename)
t = 0
while t < self.timeout:
t += self.step
try:
os.mkdir(self._mungedname())
except os.error as err:
time.sleep(self.step)
else:
self.locked = True
return
if force:
self.locked = True
else:
raise LockError('Failed to acquire lock on %s' % self.filename)
def unlock(self, ignore=True):
"""
Release the lock.
If ``ignore`` is ``True`` and removing the lock directory fails, then
the error is surpressed. (This may happen if the lock was acquired
via a timeout.)
"""
if not self.locked:
raise LockError('%s is not locked' % self.filename)
self.locked = False
try:
os.rmdir(self._mungedname())
except os.error as err:
if not ignore:
raise LockError('unlocking appeared to fail - %s' %
self.filename)
def _mungedname(self):
"""
Override this in a subclass if you want to change the way ``Lock``
creates the directory name.
"""
return self.filename + '_'
def __del__(self):
"""Auto unlock when object is deleted."""
if self.locked:
self.unlock()
class LockFile(Lock):
"""
A file like object with an exclusive lock, whilst it is open.
The lock is provided by the ``Lock`` class, which creates a directory
with the same name as the file (plus a trailing underscore), to indicate
that the file is locked.
This is simple and cross platform, with some limitations :
* Unusual process termination could result in the directory
being left.
* The process acquiring the lock must have permission to create a
directory in the same location as the file.
* It only locks the file against other processes that attempt to
acquire a lock using ``LockFile`` or ``Lock``.
"""
def __init__(self, filename, mode='r', bufsize=-1, timeout=5, step=0.1,
force=True):
"""
Create a file like object that is locked (using the ``Lock`` class)
until it is closed.
The file is only locked against another process that attempts to
acquire a lock using ``Lock`` (or ``LockFile``).
The lock is released automatically when the file is closed.
The filename, mode and bufsize arguments have the same meaning as for
the built in function ``open``.
The timeout and step arguments have the same meaning as for a ``Lock``
object.
The force argument has the same meaning as for the ``Lock.lock`` method.
A ``LockFile`` object has all the normal ``file`` methods and
attributes.
"""
Lock.__init__(self, filename, timeout, step)
# may raise an error if lock is ``False``
self.lock(force)
# may also raise an error
self._file = open(filename, mode, bufsize)
def close(self, ignore=True):
"""
close the file and release the lock.
ignore has the same meaning as for ``Lock.unlock``
"""
self._file.close()
self.unlock(ignore)
def __getattr__(self, name):
"""delegate appropriate method/attribute calls to the file."""
if name not in self.__dict__:
return getattr(self._file, name)
else:
return self.__dict__[self, name]
def __setattr__(self, name, value):
"""Only allow attribute setting that don't clash with the file."""
if not '_file' in self.__dict__:
Lock.__setattr__(self, name, value)
elif hasattr(self._file, name):
return setattr(self._file, name, value)
else:
Lock.__setattr__(self, name, value)
def __del__(self):
"""Auto unlock (and close file) when object is deleted."""
if self.locked:
self.unlock()
self._file.close()
"""
Changelog
=========
2005/12/06 Version 0.2.4
-----------------------------
Fixed bug in ``onerror``. (Missing stat import)
2005/11/26 Version 0.2.3
-----------------------------
Added ``Lock``, ``LockError``, and ``LockFile``
Added ``__version__``
2005/11/13 Version 0.2.2
-----------------------------
Added the py2exe support functions.
Added ``onerror``.
2005/08/28 Version 0.2.1
-----------------------------
* Added ``import_path``
* Added ``__all__``
* Code cleanup
2005/06/01 Version 0.2.0
-----------------------------
Added ``walkdirs`` generator.
2005/03/11 Version 0.1.1
-----------------------------
Added rounding to ``formatbytes`` and improved ``bytedivider`` with ``divmod``.
Now explicit keyword parameters override the ``configdict`` in ``formatbytes``.
2005/02/18 Version 0.1.0
-----------------------------
The first numbered version.
"""
| amir-zeldes/rstWeb | modules/pathutils.py | Python | mit | 19,405 |
alpha = "abcdefghijklmnopqrstuvwxyz"
for n in range(0, 26, 1):
print alpha[0:n+1]
for n in range(26, 1, -1):
print alpha[0:n-1]
"""
alpha = "a"
m = ord(alpha)
n = 0
while n < m:
print chr(m + 1) in range(65, 122)
m += 1
for i in range(ord('a'), 123, 1):
print chr(i[0:m+1])
while m < 123:
print chr(m[0:])
""" | Valka7a/python-playground | projects/training/alpha-triangle.py | Python | mit | 327 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 Mael Valais <[email protected]>
#
# Distributed under terms of the MIT license.
#
# Pour lancer glpsol et générer en même temps le graphe.dot :
# python parseur_noeuds.py --dat noeuds.txt > data.dat && \
# glpsol -m ctp.mod -d data.dat -y resultats_solveur.txt && \
# python parseur_noeuds.py --numeros --dot noeuds.txt resultats_solveur.txt > graphe.dot
#
#
# Pour visualiser le graphe.dot :
# - Sur MacOS, ouvrir le fichier graphe.dot avec l'interface graphique graphviz
# - Sinon, utiliser la commande `dot -Tpng -s72 graphe.dot -o graphe.png`
# NOTE: -s72 permet de régler le ratio position/inch. Dans ce script, le ratio
# est réglé à 72 points par inch.
#
import re # pour re.split()
import sys # pour args
import math # pour sqrt
def float_or_int(value):
try:
int(value)
return int(value)
except ValueError:
return float(value)
def is_float_or_int(value):
try:
float(value)
return True
except ValueError:
return False
# Pour parser une chaine de la forme suivante :
# `id_point x_coord y_coord qty_demand...`
# Ex: `1 8 9 90`
# NOTE: ne prend que les nombre_infos premiers éléments entiers
# NOTE: si aucun élément, renvoit None
def parser_ligne(ligne):
element_string = [i for i in re.findall("(\d+[.\d+]*|\\S)", ligne) if i]
# \D -> le délimiteur est "tout" sauf les entiers
elements_entiers = []
for element in element_string:
if is_float_or_int(element):
elements_entiers += [float_or_int(element)]
elif element == "#":
return elements_entiers
else:
return []
return elements_entiers
# Fonction de parsing du fichier de définition des noeuds au format CTP/CTP+CCVRP
# - ligne 1: le dépôt (3 entiers)
# Format: `num_sommet x y`
# exemple: `0 4 5`
# - ligne 2: ensemble des noeuds atteignables et non atteignables
# Format: `num_sommet_début_atteignable`
# exemple: `5` pour 1->4 à couvrir, 5->fin atteignables
# - lignes >2: la définition de tous les noeuds
# Format: `num_sommet x y qté`
# exemple: `45 5 9 40`
def definir_noeuds_depuis_fichier_noeuds(nom_fichier):
fichier = open(nom_fichier, 'r')
numero_ligne = 1
numero_ligne_avec_donnee = 1
noeuds = []
noeud_depot = []
noeuds_atteignables = []
noeuds_a_couvrir = []
rayon_couverture = 0
nb_vehicules = 0
capacite = 0
for ligne in fichier:
ligne_entiers = parser_ligne(ligne)
if numero_ligne_avec_donnee == 1 and ligne_entiers:
if len(ligne_entiers) != 1:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne définit la capacité des camions"
print >> sys.stderr, "Format: `capacité`"
sys.exit(1)
capacite = ligne_entiers[0]
elif numero_ligne_avec_donnee == 2 and ligne_entiers:
if len(ligne_entiers) != 1:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne correspond au nombre de véhicules/camions"
print >> sys.stderr, "Format: `nombre_vehicules`"
sys.exit(1)
nb_vehicules = ligne_entiers[0]
elif numero_ligne_avec_donnee == 3 and ligne_entiers:
if len(ligne_entiers) != 1:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne définit le rayon de couverture des noeuds atteignables"
print >> sys.stderr, "Format: `valeur_rayon`"
sys.exit(1)
rayon_couverture = ligne_entiers[0]
elif numero_ligne_avec_donnee == 4 and ligne_entiers:
if len(ligne_entiers) != 1:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne correspond au num_sommet du premier sommet 'non-atteignable'"
print >> sys.stderr, "Tous les num_sommet < ce numéro seront des sommets à couvrir,"
print >> sys.stderr, "tous les num_sommet >= ce numéro seront des sommets atteignables"
print >> sys.stderr, "Format: `numero_sommet`"
print >> sys.stderr, "Exemple: `5` pour 1->4 à couvrir, 5->fin atteignables"
sys.exit(1)
debut_noeuds_atteignables = ligne_entiers[0]
elif numero_ligne_avec_donnee == 5 and ligne_entiers:
if len(ligne_entiers) != 3:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne définit la position du dépôt"
print >> sys.stderr, "Format: `0 x y`"
sys.exit(1)
noeud_depot = ligne_entiers
elif numero_ligne_avec_donnee > 5 and ligne_entiers:
if len(ligne_entiers) > 0 and ligne_entiers[0] < debut_noeuds_atteignables:
if len(ligne_entiers) != 4:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne correspond à une définition de noeud à couvrir (non atteignable)"
print >> sys.stderr, "car vous avez défini debut_noeuds_atteignables=%d"%debut_noeuds_atteignables
print >> sys.stderr, "Format: `num_sommet x y qté`"
sys.exit(1)
noeuds_a_couvrir += [ligne_entiers]
else:
if len(ligne_entiers) != 3:
print >> sys.stderr, "definir_noeuds_depuis_fichier_noeuds(): erreur ligne %d" % numero_ligne
print >> sys.stderr, "Cette ligne correspond à une définition de noeud atteignable (couvrants)"
print >> sys.stderr, "car vous avez défini debut_noeuds_atteignables=%d"%debut_noeuds_atteignables
print >> sys.stderr, "Format: `num_sommet x y`"
sys.exit(1)
noeuds_atteignables += [ligne_entiers]
numero_ligne += 1
if ligne_entiers:
numero_ligne_avec_donnee += 1
return [rayon_couverture, nb_vehicules, capacite, noeud_depot, noeuds_a_couvrir, noeuds_atteignables]
# Depuis les résultats du ctp :
# `id_route id1 id2 [...]`
def definir_chemins_depuis_resultat_glpsol(nom_fichier):
fichier = open(nom_fichier, 'r')
routes = [] # [num_camion, sommet1, sommet2]
numero_ligne_avec_donnee = 1
numero_ligne = 1
for ligne in fichier:
ligne_entiers = parser_ligne(ligne)
if ligne_entiers: # On vérifie qu'il y a au moins un élément
if len(ligne_entiers) < 3:
print >> sys.stderr, "definir_chemins_depuis_resultat_glpsol(): erreur ligne %d" % numero_ligne
sys.exit(1)
routes = routes + [ligne_entiers[:3]]
numero_ligne += 1
if ligne_entiers:
numero_ligne_avec_donnee += 1
return routes
def tracer_dot(rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,noeuds_atteignables,routes,\
avec_numeros,avec_demande):
# Calcul du noeud_depot_arrivée qu'on doit ajouter
noeud_depot_arr = [i for i in noeud_depot]
noeud_depot_arr[0] = noeuds_atteignables[len(noeuds_atteignables)-1][0] + 1
# Pourquoi j'utilise points_per_inch ?
# - les `pos="3,4"!` sont en points
# - les `width="0.1"` et `height="0.1"` sont en inch
# 72 correspond au nombre de "points" pour un "inch" dans .dot
# 1 inch * ratio_inch_point = points
points_per_inch = 72
normalisation = 300
# On veut que les pos=x,y se retrouvent dans l'espace [0,max]
x_max = max([p[1] for p in [noeud_depot]+noeuds_atteignables+noeuds_a_couvrir])
x_min = min([p[1] for p in [noeud_depot]+noeuds_atteignables+noeuds_a_couvrir])
y_max = max([p[2] for p in [noeud_depot]+noeuds_atteignables+noeuds_a_couvrir])
y_min = min([p[2] for p in [noeud_depot]+noeuds_atteignables+noeuds_a_couvrir])
pos_max = min([x_max,y_max])
pos_min = min([x_min,y_min])
def normalise(point) :
return (float(point)-float(pos_min))/(float(pos_max)-float(pos_min)) * normalisation
def point_to_inch(point) : return normalise(point)/float(points_per_inch)
sommets_atteignables_vus = []
noeuds_atteignables_et_depot = [noeud_depot] + noeuds_atteignables
couleurs_aretes = ['red','darkorchid','forestgreen','cyan4','orange','cadetblue']
print \
'graph RoutesCTP \n'\
'{ \n'\
'\t layout=neato; \n'\
'\t edge [dir=None splines=line] \n'\
'\t node [fontsize=10] \n'\
'\t rankdir = LR;'
# Traitement du sommet dépôt
print '\t%d [label="" xlabel="Dépôt" shape=square fixedsize=true\
style=filled width=%.2f color=black pos="%.2f,%.2f!"]; ' \
% (noeud_depot[0],\
point_to_inch(0.2),\
normalise(noeud_depot[1]),normalise(noeud_depot[2]))
# Traitement de chaque arc déterminé par le solveur
for chemin in routes:
sommets = chemin[1:2+1]
num_route = chemin[0]
# Traitement de l'arête
if sommets[0] not in sommets_atteignables_vus:
sommets_atteignables_vus += [sommets[0]]
if sommets[1] not in sommets_atteignables_vus:
sommets_atteignables_vus += [sommets[1]]
if sommets[0] != sommets[1] and sommets[1] != noeud_depot_arr[0]:
couleur = couleurs_aretes[(num_route-1)%len(couleurs_aretes)]
if sommets[0] == noeud_depot[0] and avec_numeros:
print '\t%d -- %d [color=%s label=<<font color=\'%s\'>%s</font>>];' \
% (sommets[0],sommets[1],\
couleur,couleur,num_route)
else:
print '\t%d -- %d [color=%s]; '\
% (sommets[0],sommets[1],couleur)
# Traitement des sommets atteignables
for [sommet,x,y] in noeuds_atteignables:
if sommet in sommets_atteignables_vus:
print '\t%d [xlabel="%s" pos="%.2f,%.2f!" label="" shape=circle color=black style=filled width=%.2f]; ' \
% (sommet,str(sommet) if avec_numeros else "",\
normalise(x),normalise(y),\
point_to_inch(0.15))
print '\trayon_%d [pos="%.2f,%.2f!" shape=circle fixedsize=true width=%.2f label=""]; '\
% (sommet,normalise(x),normalise(y),\
point_to_inch(rayon*2))
else:
print '\t%d [xlabel="%s" pos="%.2f,%.2f!" label="" shape=circle color=gray50 style=filled width=%.2f]; ' \
% (sommet,str(sommet) if avec_numeros else "", \
normalise(x),normalise(y),\
point_to_inch(0.15))
# Traitement des sommets à couvrir
for [sommet,x,y,qte] in noeuds_a_couvrir:
xlabel=""
xlabel+=("<font color=\'blue\'>"+"("+str(qte)+")" +"</font> ") if avec_demande else ""
xlabel+=("<font color=\'black\'>"+str(sommet)+"</font>") if avec_numeros else ""
print '\t%d [label="" xlabel=<%s> pos="%.2f,%.2f!" color="blue" style=filled shape=triangle fixedsize=true width=%.2f height=%.2f]; ' \
% (sommet, xlabel, normalise(x),normalise(y),\
point_to_inch(0.1),point_to_inch(0.2))
print("}")
# Ajoutons artificiellement un n+1ième point qui sera
# une copie de ce qui a été fait avec 0, le dépôt.
# C'est pour que le paramètre de durée, c[k,i,j] (dans le CCVRP)
# ait toutes les valeurs de i et j entre 0 et n+1
def produire_data_solveur(rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,noeuds_atteignables):
noeud_depot_arr = [i for i in noeud_depot]
noeud_depot_arr[0] = noeuds_atteignables[len(noeuds_atteignables)-1][0] + 1
print "data;"
print "# Nombre de sommets à couvrir (I)"
print "param n := %d;"% len(noeuds_a_couvrir)
print "# Nombre de sommets atteignables (J)"
print "param m := %d;"% len(noeuds_atteignables)
print "# Nombre de véhicules (L)"
print "param l := %d;"% nb_vehicules
print "# Capacité d\'un véhicule"
print "param Q := %d;"% capacite
print "set I :="
for [num,x,y,_] in noeuds_a_couvrir:
print "%d" % num
print ";"
print "set J :="
for [num,x,y] in noeuds_atteignables:
print "%d" % num
print ";"
print "# Rayon de couverture d\'un point atteignable (J)"
print "param cmax := %.2f;" % rayon
print "param : d :="
for [num,x,y,qte] in noeuds_a_couvrir:
print("%d %d" % (num,qte))
print(";")
print("param : E : c := ")
for p1 in noeuds_a_couvrir + noeuds_atteignables + [noeud_depot] + [noeud_depot_arr]:
for p2 in noeuds_a_couvrir + noeuds_atteignables + [noeud_depot] + [noeud_depot_arr]:
if p1 != p2:
dist = math.sqrt(pow(p1[1]-p2[1],2)+pow(p1[2]-p2[2],2))
print("%d %d %.2f" % (p1[0],p2[0],dist))
print(";")
print 'end;'
import argparse
parser = argparse.ArgumentParser(description='Parser pour .dat et .dot')
exclusive = parser.add_mutually_exclusive_group(required=True)
exclusive.add_argument('--dot', nargs=2, \
required=False,\
help='Commande permettant de produire un .dot',\
metavar=('fichier_noeuds', 'fichier_resultat_solveur')\
)
exclusive.add_argument('--dat', nargs=1, \
required=False,\
help='Commande permettant de produire un .dat',\
metavar='fichier_noeuds'\
)
parser.add_argument('--numeros',action='store_true',\
help="Pour la commande --dot, afficher les numéros des noeuds")
parser.add_argument('--demandes',action='store_true',\
help="Pour la commande --dot, afficher les demandes des noeuds à couvrir")
args = parser.parse_args()
if args.dot != None:
[rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,noeuds_atteignables] =\
definir_noeuds_depuis_fichier_noeuds(args.dot[0])
routes = definir_chemins_depuis_resultat_glpsol(args.dot[1])
tracer_dot(rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,\
noeuds_atteignables,routes,args.numeros,args.demandes)
if args.dat != None:
[rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,noeuds_atteignables] = \
definir_noeuds_depuis_fichier_noeuds(args.dat[0])
produire_data_solveur(rayon,nb_vehicules,capacite,noeud_depot,noeuds_a_couvrir,\
noeuds_atteignables)
| aheba/ctp | parseur_noeuds.py | Python | mit | 14,972 |
import lms_code.lib.rep2 as rep2
from lms_code.analysis.run_bem import bemify, boundary_conditions,\
assemble, constrain, solve, evaluate_surface_disp
from lms_code.analysis.simplified_bem import create_surface_mesh, \
set_params
from codim1.core import simple_line_mesh, combine_meshes, ray_mesh
def create_fault_mesh(d):
top_fault_vert = [0, -1e9]
top = d['intersection_pt']
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
bottom = [3.09134e5 + 1.1, -2.3376e4 - 3]
detach = simple_line_mesh(d['fault_elements'], bottom, joint)
d['fault_mesh'] = detach
if __name__ == "__main__":
d = dict()
set_params(d)
create_fault_mesh(d)
create_surface_mesh(d)
bemify(d)
boundary_conditions(d)
assemble(d)
# constrain(d)
solve(d)
evaluate_surface_disp(d)
rep2.save("bem_just_detach", d)
| tbenthompson/LMS_public | lms_code/analysis/just_detach_bem.py | Python | mit | 846 |
from django.contrib.auth.models import User
from game.models import Point, Team
def addPoint(blame_username, fixer_username):
blame = User.objects.get(username=blame_username)
fixer = User.objects.get(username=fixer_username)
point = Point()
point.blame = blame
point.fixer = fixer
point.save()
return point
def getPointsForTeam(team_id):
points = Point.objects.filter(team__id__exact = team_id)
players = User.objects.filter(team__id__exact = team_id)
rtn = {
'total': len(points),
'players': {}
}
# build basic data structure
for player in players:
rtn['players'][player.username] = {
'total_fixes': 0,
'total_breaks': 0,
'player': player,
'fixes': {}
}
for other_player in players:
rtn['players'][player.username]['fixes'][other_player.username] = 0
# loop over points adding to the above data structure
for point in points:
rtn['players'][point.fixer.username]['total_fixes'] += 1
rtn['players'][point.blame.username]['total_breaks'] += 1
rtn['players'][point.fixer.username]['fixes'][point.blame.username] += 1
return rtn | MitMaro/The-Blame-Game | game/service.py | Python | mit | 1,086 |
from django.utils import translation
from django.conf import settings
from froide.celery import app as celery_app
from froide.foirequest.models import FoiRequest
from .models import FoiRequestFollower
from .utils import run_batch_update
@celery_app.task
def update_followers(request_id, update_message, template=None):
translation.activate(settings.LANGUAGE_CODE)
try:
foirequest = FoiRequest.objects.get(id=request_id)
except FoiRequest.DoesNotExist:
return
followers = FoiRequestFollower.objects.filter(request=foirequest, confirmed=True)
for follower in followers:
FoiRequestFollower.objects.send_update(
follower.user or follower.email,
[
{
"request": foirequest,
"unfollow_link": follower.get_unfollow_link(),
"events": [update_message],
}
],
batch=False,
)
@celery_app.task
def batch_update():
return run_batch_update()
| fin/froide | froide/foirequestfollower/tasks.py | Python | mit | 1,029 |
import numpy as np
import matplotlib.pyplot as plt
import inspect # Used for storing the input
class AquiferData:
def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll,
ltype, topboundary, phreatictop, kzoverkh=None, model3d=False):
'''kzoverkh and model3d only need to be specified when model
is model3d'''
self.model = model
self.kaq = np.atleast_1d(kaq).astype('d')
self.z = np.atleast_1d(z).astype('d')
self.naq = len(self.kaq)
self.nlayers = len(self.z) - 1
self.Haq = np.atleast_1d(Haq).astype('d')
self.Hll = np.atleast_1d(Hll).astype('d')
self.T = self.kaq * self.Haq
self.Tcol = self.T.reshape(self.naq, 1)
self.c = np.atleast_1d(c).astype('d')
self.c[self.c > 1e100] = 1e100
self.Saq = np.atleast_1d(Saq).astype('d')
self.Sll = np.atleast_1d(Sll).astype('d')
self.Sll[self.Sll < 1e-20] = 1e-20 # Cannot be zero
self.poraq = np.atleast_1d(poraq).astype('d')
self.porll = np.atleast_1d(porll).astype('d')
self.ltype = np.atleast_1d(ltype)
self.zaqtop = self.z[:-1][self.ltype == 'a']
self.zaqbot = self.z[1:][self.ltype == 'a']
self.layernumber = np.zeros(self.nlayers, dtype='int')
self.layernumber[self.ltype == 'a'] = np.arange(self.naq)
self.layernumber[self.ltype == 'l'] = np.arange(self.nlayers - self.naq)
if self.ltype[0] == 'a':
self.layernumber[self.ltype == 'l'] += 1 # first leaky layer below first aquifer layer
self.topboundary = topboundary[:3]
self.phreatictop = phreatictop
self.kzoverkh = kzoverkh
if self.kzoverkh is not None:
self.kzoverkh = np.atleast_1d(self.kzoverkh).astype('d')
if len(self.kzoverkh) == 1:
self.kzoverkh = self.kzoverkh * np.ones(self.naq)
self.model3d = model3d
if self.model3d:
assert self.kzoverkh is not None, \
"model3d specified without kzoverkh"
#self.D = self.T / self.Saq
self.area = 1e200 # Smaller than default of ml.aq so that inhom is found
def __repr__(self):
return 'Inhom T: ' + str(self.T)
def initialize(self):
'''
eigval[naq, npval]: Array with eigenvalues
lab[naq, npval]: Array with lambda values
lab2[naq, nint, npint]: Array with lambda values reorganized per
interval
eigvec[naq, naq, npval]: Array with eigenvector matrices
coef[naq ,naq, npval]: Array with coefficients;
coef[ilayers, :, np] are the coefficients if the element is in
ilayers belonging to Laplace parameter number np
'''
# Recompute T for when kaq is changed
self.T = self.kaq * self.Haq
self.Tcol = self.T.reshape(self.naq, 1)
# Compute Saq and Sll
self.Scoefaq = self.Saq * self.Haq
self.Scoefll = self.Sll * self.Hll
if (self.topboundary == 'con') and self.phreatictop:
self.Scoefaq[0] = self.Scoefaq[0] / self.Haq[0]
elif (self.topboundary == 'lea') and self.phreatictop:
self.Scoefll[0] = self.Scoefll[0] / self.Hll[0]
self.D = self.T / self.Scoefaq
# Compute c if model3d for when kaq is changed
if self.model3d:
self.c[1:] = \
0.5 * self.Haq[:-1] / (self.kzoverkh[:-1] * self.kaq[:-1]) + \
0.5 * self.Haq[1:] / (self.kzoverkh[1:] * self.kaq[1:])
#
self.eigval = np.zeros((self.naq, self.model.npval), 'D')
self.lab = np.zeros((self.naq, self.model.npval), 'D')
self.eigvec = np.zeros((self.naq, self.naq, self.model.npval), 'D')
self.coef = np.zeros((self.naq, self.naq, self.model.npval), 'D')
b = np.diag(np.ones(self.naq))
for i in range(self.model.npval):
w, v = self.compute_lab_eigvec(self.model.p[i])
# Eigenvectors are columns of v
self.eigval[:, i] = w; self.eigvec[:, :, i] = v
self.coef[:, :, i] = np.linalg.solve(v, b).T
self.lab = 1.0 / np.sqrt(self.eigval)
self.lab2 = self.lab.copy()
self.lab2.shape = (self.naq, self.model.nint, self.model.npint)
self.lababs = np.abs(self.lab2[:, :, 0]) # used to check distances
self.eigvec2 = self.eigvec.copy()
self.eigvec2.shape = (self.naq, self.naq,
self.model.nint, self.model.npint)
def compute_lab_eigvec(self, p, returnA = False, B = None):
sqrtpSc = np.sqrt( p * self.Scoefll * self.c )
a, b = np.zeros_like(sqrtpSc), np.zeros_like(sqrtpSc)
small = np.abs(sqrtpSc) < 200
a[small] = sqrtpSc[small] / np.tanh(sqrtpSc[small])
b[small] = sqrtpSc[small] / np.sinh(sqrtpSc[small])
a[~small] = sqrtpSc[~small] / ((1.0 - np.exp(-2.0*sqrtpSc[~small])) /
(1.0 + np.exp(-2.0*sqrtpSc[~small])))
b[~small] = sqrtpSc[~small] * 2.0 * np.exp(-sqrtpSc[~small]) / \
(1.0 - np.exp(-2.0*sqrtpSc[~small]))
if (self.topboundary[:3] == 'sem') or (self.topboundary[:3] == 'lea'):
dzero = sqrtpSc[0] * np.tanh(sqrtpSc[0])
d0 = p / self.D
if B is not None:
d0 = d0 * B # B is vector of load efficiency paramters
d0[:-1] += a[1:] / (self.c[1:] * self.T[:-1])
d0[1:] += a[1:] / (self.c[1:] * self.T[1:])
if self.topboundary[:3] == 'lea':
d0[0] += dzero / ( self.c[0] * self.T[0] )
elif self.topboundary[:3] == 'sem':
d0[0] += a[0] / ( self.c[0] * self.T[0] )
dm1 = -b[1:] / (self.c[1:] * self.T[:-1])
dp1 = -b[1:] / (self.c[1:] * self.T[1:])
A = np.diag(dm1,-1) + np.diag(d0,0) + np.diag(dp1,1)
if returnA: return A
w, v = np.linalg.eig(A)
# sorting moved here
index = np.argsort(abs(w))[::-1]
w = w[index]
v = v[:, index]
return w, v
def head_to_potential(self, h, layers):
return h * self.Tcol[layers]
def potential_to_head(self, pot, layers):
return pot / self.Tcol[layers]
def isInside(self,x,y):
print('Must overload AquiferData.isInside method')
return True
def inWhichLayer(self, z):
'''Returns -9999 if above top of system,
+9999 if below bottom of system,
negative for in leaky layer.
leaky layer -n is on top of aquifer n'''
if z > self.zt[0]:
return -9999
for i in range(self.naq-1):
if z >= self.zb[i]:
return i
if z > self.zt[i+1]:
return -i-1
if z >= self.zb[self.naq-1]:
return self.naq - 1
return +9999
def findlayer(self, z):
'''
Returns layer-number, layer-type and model-layer-number'''
if z > self.z[0]:
modellayer = -1
ltype = 'above'
layernumber = None
elif z < self.z[-1]:
modellayer = len(self.layernumber)
ltype = 'below'
layernumber = None
else:
modellayer = np.argwhere((z <= self.z[:-1]) &
(z >= self.z[1:]))[0, 0]
layernumber = self.layernumber[modellayer]
ltype = self.ltype[modellayer]
return layernumber, ltype, modellayer
class Aquifer(AquiferData):
def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll,
ltype, topboundary, phreatictop, kzoverkh=None, model3d=False):
AquiferData.__init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll,
poraq, porll, ltype, topboundary, phreatictop, kzoverkh, model3d)
self.inhomlist = []
self.area = 1e300 # Needed to find smallest inhomogeneity
def __repr__(self):
return 'Background Aquifer T: ' + str(self.T)
def initialize(self):
AquiferData.initialize(self)
for inhom in self.inhomlist:
inhom.initialize()
def find_aquifer_data(self, x, y):
rv = self
for aq in self.inhomlist:
if aq.isInside(x, y):
if aq.area < rv.area:
rv = aq
return rv | mbakker7/ttim | ttim/aquifer.py | Python | mit | 8,387 |
from django.test import TestCase
from morelia.decorators import tags
from smarttest.decorators import no_db_testcase
from tasks.factories import TaskFactory, UserFactory
@no_db_testcase
@tags(['unit'])
class TaskGetAbsoluteUrlTest(TestCase):
''' :py:meth:`tasks.models.Task.get_absolute_url` '''
def test_should_return_task_absolute_url(self):
# Arrange
owner = UserFactory.build(pk=1)
task = TaskFactory.build(owner=owner, author=owner)
# Act
url = task.get_absolute_url()
# Assert
self.assertEqual(url, '/%s/' % owner.username)
| dryobates/testing_django | todo/tasks/tests/test_models.py | Python | mit | 601 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines an action for moving the workspace to the user's home directory.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import expanduser
from enthought.traits.api import Bool, Instance
from enthought.pyface.api import ImageResource
from enthought.pyface.action.api import Action
from enthought.envisage.ui.workbench.workbench_window import WorkbenchWindow
from puddle.resource.resource_view import RESOURCE_VIEW
from common import IMAGE_LOCATION
#------------------------------------------------------------------------------
# "HomeAction" class:
#------------------------------------------------------------------------------
class HomeAction(Action):
""" An action for moving the workspace to the user's home directory.
"""
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
# A longer description of the action:
description = "Move workspace to the user's home directory"
# The action"s name (displayed on menus/tool bar tools etc):
name = "&Home"
# A short description of the action used for tooltip text etc:
tooltip = "Open home directory"
# Keyboard accelerator:
accelerator = "Alt+Home"
# The action's image (displayed on tool bar tools etc):
image = ImageResource("home_folder", search_path=[IMAGE_LOCATION])
#--------------------------------------------------------------------------
# "UpAction" interface:
#--------------------------------------------------------------------------
window = Instance(WorkbenchWindow)
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
def perform(self, event):
""" Perform the action.
"""
# Note that we always offer the service via its name, but look it up
# via the actual protocol.
from puddle.resource.i_workspace import IWorkspace
workspace = self.window.application.get_service(IWorkspace)
workspace.path = expanduser("~")
view = self.window.get_view_by_id(RESOURCE_VIEW)
if view is not None:
view.tree_viewer.refresh(workspace)
# EOF -------------------------------------------------------------------------
| rwl/puddle | puddle/resource/action/home_action.py | Python | mit | 3,827 |
"""
Flow's list of built-in types available in 0.44.0 (Apr 13, 2017).
Related to
https://flow.org/en/docs/types/
and
http://www.saltycrane.com/blog/2016/06/flow-type-cheat-sheet/
"""
def print_type_format(trigger, content=None, description=None):
"""Format output for autocompletion for a given trigger text."""
return ("%s\t%s" % (trigger, description), "%s" % (content or trigger))
builtintypes = [
# Built-in types
print_type_format("any", description="Flow built-in type"),
print_type_format("boolean", description="Flow built-in type"),
print_type_format("null", description="Flow built-in type"),
print_type_format("number", description="Flow built-in type"),
print_type_format("mixed", description="Flow built-in type"),
print_type_format("string", description="Flow built-in type"),
print_type_format("void", description="Flow built-in type"),
print_type_format("Class", "Class<${1}>", "Flow built-in type"),
# Built-in 'private' types
print_type_format(
"Abstract", "\$Abstract<${1}>", "Flow built-in private type"
),
print_type_format(
"Diff", "\$Diff<${1}, ${2}>", "Flow built-in private type"
),
print_type_format("Exact", "\$Exact<${1}>", "Flow built-in private type"),
print_type_format("Keys", "\$Keys<${1}>", "Flow built-in private type"),
print_type_format(
"ObjMap", "\$ObjMap<${1}, ${2}>", "Flow built-in private type"
),
print_type_format(
"PropertyType",
"\$PropertyType<${1}, ${2}>",
"Flow built-in private type",
),
print_type_format(
"Subtype", "\$Subtype<${1}, ${2}>", "Flow built-in private type"
),
print_type_format(
"Supertype", "\$Supertype<${1}, ${2}>", "Flow built-in private type"
),
# Core types
print_type_format("Array", "Array<${1}>", "Flow core type"),
print_type_format("ArrayBuffer", description="Flow core type"),
print_type_format(
"AsyncIterable", "AsyncIterable<${1}>", "Flow core type"
),
print_type_format(
"AsyncIterator", "AsyncIterator<${1}>", "Flow core type"
),
print_type_format("Boolean", description="Flow core type"),
print_type_format("CallSite", description="Flow core type"),
print_type_format("DataView", description="Flow core type"),
print_type_format("Date", description="Flow core type"),
print_type_format("Error", description="Flow core type"),
print_type_format("EvalError", description="Flow core type"),
print_type_format("Float32Array", description="Flow core type"),
print_type_format("Float64Array", description="Flow core type"),
print_type_format("Function", description="Flow core type"),
print_type_format("global", description="Flow core type"),
print_type_format("Infinity", description="Flow core type"),
print_type_format("Int16Array", description="Flow core type"),
print_type_format("Int32Array", description="Flow core type"),
print_type_format("Int8Array", description="Flow core type"),
print_type_format("Iterable", "Iterable<${1}>", "Flow core type"),
print_type_format("Iterator", "Iterator<${1}>", "Flow core type"),
print_type_format(
"IteratorResult", "IteratorResult<${1}, ${2}>", "Flow core type"
),
print_type_format("JSON", description="Flow core type"),
print_type_format("Map", "Map<${1}, ${2}>", "Flow core type"),
print_type_format("NaN", description="Flow core type"),
print_type_format("Number", description="Flow core type"),
print_type_format("Object", description="Flow core type"),
print_type_format("Promise", "Promise<${1}>", "Flow core type"),
print_type_format("Proxy", "Proxy<${1}>", "Flow core type"),
print_type_format("RangeError", description="Flow core type"),
print_type_format("ReferenceError", description="Flow core type"),
print_type_format("Reflect", description="Flow core type"),
print_type_format("RegExp", description="Flow core type"),
print_type_format("Set", "Set<${1}>", "Flow core type"),
print_type_format("String", description="Flow core type"),
print_type_format("Symbol", description="Flow core type"),
print_type_format("SyntaxError", description="Flow core type"),
print_type_format("TypeError", description="Flow core type"),
print_type_format("Uint16Array", description="Flow core type"),
print_type_format("Uint32Array", description="Flow core type"),
print_type_format("Uint8Array", description="Flow core type"),
print_type_format("Uint8ClampedArray", description="Flow core type"),
print_type_format("URIError", description="Flow core type"),
print_type_format("WeakMap", "WeakMap<${1}, ${2}>", "Flow core type"),
print_type_format("WeakSet", "WeakSet<${1}>", "Flow core type"),
# Core 'private' types
print_type_format(
"ArrayBufferView",
"\$ArrayBufferView",
description="Flow core private type",
),
print_type_format(
"ReadOnlyArray",
"\$ReadOnlyArray<${1}>",
description="Flow core private type",
),
print_type_format(
"SymboIsConcatSpreadable",
"\$SymboIsConcatSpreadable",
description="Flow core private type",
),
print_type_format(
"SymbolHasInstance",
"\$SymbolHasInstance",
description="Flow core private type",
),
print_type_format(
"SymbolIterator",
"\$SymbolIterator",
description="Flow core private type",
),
print_type_format(
"SymbolMatch", "\$SymbolMatch", description="Flow core private type"
),
print_type_format(
"SymbolReplace",
"\$SymbolReplace",
description="Flow core private type",
),
print_type_format(
"SymbolSearch", "\$SymbolSearch", description="Flow core private type"
),
print_type_format(
"SymbolSpecies",
"\$SymbolSpecies",
description="Flow core private type",
),
print_type_format(
"SymbolSplit", "\$SymbolSplit", description="Flow core private type"
),
print_type_format(
"SymbolToPrimitive",
"\$SymbolToPrimitive",
description="Flow core private type",
),
print_type_format(
"SymbolToStringTag",
"\$SymbolToStringTag",
description="Flow core private type",
),
print_type_format(
"SymbolUnscopables",
"\$SymbolUnscopables",
description="Flow core private type",
),
print_type_format(
"TypedArray", "\$TypedArray", description="Flow core private type"
),
print_type_format(
"Proxyrevocable",
"Proxy\$revocable<${1}>",
description="Flow core private type",
),
print_type_format(
"Proxytraps",
"Proxy\$traps<${1}>",
description="Flow core private type",
),
print_type_format(
"RegExpflags", "RegExp\$flags", description="Flow core private type"
),
# React types
print_type_format(
"_ReactClass", "_ReactClass<${1}, ${2}, ${3}, ${4}>", "Flow React type"
),
print_type_format(
"LegacyReactComponent",
"LegacyReactComponent<${1}, ${2}, ${3}>",
"Flow React type",
),
print_type_format("ReactClass", "ReactClass<${1}>", "Flow React type"),
print_type_format(
"ReactPropsChainableTypeChecker", description="Flow React type"
),
print_type_format("ReactPropsCheckType", description="Flow React type"),
print_type_format("ReactPropTypes", description="Flow React type"),
print_type_format(
"SyntheticClipboardEvent", description="Flow React type"
),
print_type_format(
"SyntheticCompositionEvent", description="Flow React type"
),
print_type_format("SyntheticDragEvent", description="Flow React type"),
print_type_format("SyntheticEvent", description="Flow React type"),
print_type_format("SyntheticFocusEvent", description="Flow React type"),
print_type_format("SyntheticInputEvent", description="Flow React type"),
print_type_format("SyntheticKeyboardEvent", description="Flow React type"),
print_type_format("SyntheticMouseEvent", description="Flow React type"),
print_type_format("SyntheticTouchEvent", description="Flow React type"),
print_type_format("SyntheticUIEvent", description="Flow React type"),
print_type_format("SyntheticWheelEvent", description="Flow React type"),
# React 'private' types
print_type_format(
"DefaultPropsOf", "\$DefaultPropsOf<${1}>", "Flow React type"
),
print_type_format("JSXIntrinsics", "\$JSXIntrinsics", "Flow React type"),
print_type_format("PropsOf", "\$PropsOf<${1}>", "Flow React type"),
print_type_format(
"ReactComponent",
"React\$Component<${1}, ${2}, ${3}>",
"Flow React type",
),
print_type_format(
"ReactElement", "React\$Element<${1}>", "Flow React type"
),
print_type_format(
"ReactPropTypesarrayOf", "React\$PropTypes\$arrayOf", "Flow React type"
),
print_type_format(
"ReactPropTypesinstanceOf",
"React\$PropTypes\$instanceOf",
"Flow React type",
),
print_type_format(
"ReactPropTypesobjectOf",
"React\$PropTypes\$objectOf",
"Flow React type",
),
print_type_format(
"ReactPropTypesoneOf", "React\$PropTypes\$oneOf", "Flow React type"
),
print_type_format(
"ReactPropTypesoneOfType",
"React\$PropTypes\$oneOfType",
"Flow React type",
),
print_type_format(
"ReactPropTypesshape", "React\$PropTypes\$shape", "Flow React type"
),
print_type_format(
"React$PureComponent",
"React\$PureComponent<${1}, ${2}, ${3}>",
"Flow React type",
),
# Document Object Model types
print_type_format("AnimationEvent", description="Flow DOM type"),
print_type_format("AnimationEventHandler", description="Flow DOM type"),
print_type_format("AnimationEventListener", description="Flow DOM type"),
print_type_format("AnimationEventTypes", description="Flow DOM type"),
print_type_format("Attr", description="Flow DOM type"),
print_type_format("AudioTrack", description="Flow DOM type"),
print_type_format("AudioTrackList", description="Flow DOM type"),
print_type_format("Blob", description="Flow DOM type"),
print_type_format("BufferDataSource", description="Flow DOM type"),
print_type_format("CanvasDrawingStyles", description="Flow DOM type"),
print_type_format("CanvasFillRule", description="Flow DOM type"),
print_type_format("CanvasGradient", description="Flow DOM type"),
print_type_format("CanvasImageSource", description="Flow DOM type"),
print_type_format("CanvasPattern", description="Flow DOM type"),
print_type_format("CanvasRenderingContext2D", description="Flow DOM type"),
print_type_format("CharacterData", description="Flow DOM type"),
print_type_format("ClientRect", description="Flow DOM type"),
print_type_format("ClientRectList", description="Flow DOM type"),
print_type_format("Comment", description="Flow DOM type"),
print_type_format("CustomElementRegistry", description="Flow DOM type"),
print_type_format("CustomEvent", description="Flow DOM type"),
print_type_format("DataTransfer", description="Flow DOM type"),
print_type_format("DataTransferItem", description="Flow DOM type"),
print_type_format("DataTransferItemList", description="Flow DOM type"),
print_type_format("Document", description="Flow DOM type"),
print_type_format("DocumentFragment", description="Flow DOM type"),
print_type_format("DocumentType", description="Flow DOM type"),
print_type_format("DOMError", description="Flow DOM type"),
print_type_format("DOMImplementation", description="Flow DOM type"),
print_type_format("DOMTokenList", description="Flow DOM type"),
print_type_format("DragEvent", description="Flow DOM type"),
print_type_format("DragEventHandler", description="Flow DOM type"),
print_type_format("DragEventListener", description="Flow DOM type"),
print_type_format("DragEventTypes", description="Flow DOM type"),
print_type_format("Element", description="Flow DOM type"),
print_type_format(
"ElementRegistrationOptions", description="Flow DOM type"
),
print_type_format("Event", description="Flow DOM type"),
print_type_format("EventHandler", description="Flow DOM type"),
print_type_format("EventListener", description="Flow DOM type"),
print_type_format(
"EventListenerOptionsOrUseCapture", description="Flow DOM type"
),
print_type_format("EventTarget", description="Flow DOM type"),
print_type_format("File", description="Flow DOM type"),
print_type_format("FileList", description="Flow DOM type"),
print_type_format("FileReader", description="Flow DOM type"),
print_type_format("HitRegionOptions", description="Flow DOM type"),
print_type_format("HTMLAnchorElement", description="Flow DOM type"),
print_type_format("HTMLAppletElement", description="Flow DOM type"),
print_type_format("HTMLAudioElement", description="Flow DOM type"),
print_type_format("HTMLBaseElement", description="Flow DOM type"),
print_type_format("HTMLButtonElement", description="Flow DOM type"),
print_type_format("HTMLCanvasElement", description="Flow DOM type"),
print_type_format(
"HTMLCollection", "HTMLCollection<${1}>", "Flow DOM type"
),
print_type_format("HTMLDivElement", description="Flow DOM type"),
print_type_format("HTMLElement", description="Flow DOM type"),
print_type_format("HTMLEmbedElement", description="Flow DOM type"),
print_type_format("HTMLFormElement", description="Flow DOM type"),
print_type_format("HTMLIFrameElement", description="Flow DOM type"),
print_type_format("HTMLImageElement", description="Flow DOM type"),
print_type_format("HTMLInputElement", description="Flow DOM type"),
print_type_format("HTMLLabelElement", description="Flow DOM type"),
print_type_format("HTMLLinkElement", description="Flow DOM type"),
print_type_format("HTMLMediaElement", description="Flow DOM type"),
print_type_format("HTMLMenuElement", description="Flow DOM type"),
print_type_format("HTMLMetaElement", description="Flow DOM type"),
print_type_format("HTMLOptGroupElement", description="Flow DOM type"),
print_type_format("HTMLOptionElement", description="Flow DOM type"),
print_type_format("HTMLOptionsCollection", description="Flow DOM type"),
print_type_format("HTMLParagraphElement", description="Flow DOM type"),
print_type_format("HTMLScriptElement", description="Flow DOM type"),
print_type_format("HTMLSelectElement", description="Flow DOM type"),
print_type_format("HTMLSlotElement", description="Flow DOM type"),
print_type_format("HTMLSourceElement", description="Flow DOM type"),
print_type_format("HTMLSpanElement", description="Flow DOM type"),
print_type_format("HTMLStyleElement", description="Flow DOM type"),
print_type_format("HTMLTableCaptionElement", description="Flow DOM type"),
print_type_format("HTMLTableCellElement", description="Flow DOM type"),
print_type_format("HTMLTableElement", description="Flow DOM type"),
print_type_format("HTMLTableRowElement", description="Flow DOM type"),
print_type_format("HTMLTableSectionElement", description="Flow DOM type"),
print_type_format("HTMLTemplateElement", description="Flow DOM type"),
print_type_format("HTMLTextAreaElement", description="Flow DOM type"),
print_type_format("HTMLVideoElement", description="Flow DOM type"),
print_type_format("Image", description="Flow DOM type"),
print_type_format("ImageBitmap", description="Flow DOM type"),
print_type_format("ImageData", description="Flow DOM type"),
print_type_format("KeyboardEvent", description="Flow DOM type"),
print_type_format("KeyboardEventHandler", description="Flow DOM type"),
print_type_format("KeyboardEventListener", description="Flow DOM type"),
print_type_format("KeyboardEventTypes", description="Flow DOM type"),
print_type_format("MediaError", description="Flow DOM type"),
print_type_format("MediaSource", description="Flow DOM type"),
print_type_format("MessageEvent", description="Flow DOM type"),
print_type_format("MouseEvent", description="Flow DOM type"),
print_type_format("MouseEventHandler", description="Flow DOM type"),
print_type_format("MouseEventListener", description="Flow DOM type"),
print_type_format("MouseEventTypes", description="Flow DOM type"),
print_type_format("NamedNodeMap", description="Flow DOM type"),
print_type_format("Node", description="Flow DOM type"),
print_type_format("NodeFilter", description="Flow DOM type"),
print_type_format("NodeFilterCallback", description="Flow DOM type"),
print_type_format("NodeFilterInterface", description="Flow DOM type"),
print_type_format(
"NodeIterator", "NodeIterator<${1}, ${2}>", "Flow DOM type"
),
print_type_format("NodeList", "NodeList<${1}>", "Flow DOM type"),
print_type_format("Path2D", description="Flow DOM type"),
print_type_format("ProgressEvent", description="Flow DOM type"),
print_type_format("ProgressEventHandler", description="Flow DOM type"),
print_type_format("ProgressEventListener", description="Flow DOM type"),
print_type_format("ProgressEventTypes", description="Flow DOM type"),
print_type_format("PromiseRejectionEvent", description="Flow DOM type"),
print_type_format("Range", description="Flow DOM type"),
print_type_format("RenderingContext", description="Flow DOM type"),
print_type_format("Selection", description="Flow DOM type"),
print_type_format("SelectionDirection", description="Flow DOM type"),
print_type_format("SelectionMode", description="Flow DOM type"),
print_type_format("ShadowRoot", description="Flow DOM type"),
print_type_format("SourceBuffer", description="Flow DOM type"),
print_type_format("SourceBufferList", description="Flow DOM type"),
print_type_format("Storage", description="Flow DOM type"),
print_type_format("SVGMatrix", description="Flow DOM type"),
print_type_format("TexImageSource", description="Flow DOM type"),
print_type_format("Text", description="Flow DOM type"),
print_type_format("TextMetrics", description="Flow DOM type"),
print_type_format("TextRange", description="Flow DOM type"),
print_type_format("TextTrack", description="Flow DOM type"),
print_type_format("TextTrackCue", description="Flow DOM type"),
print_type_format("TextTrackCueList", description="Flow DOM type"),
print_type_format("TextTrackList", description="Flow DOM type"),
print_type_format("TimeRanges", description="Flow DOM type"),
print_type_format("Touch", description="Flow DOM type"),
print_type_format("TouchEvent", description="Flow DOM type"),
print_type_format("TouchEventHandler", description="Flow DOM type"),
print_type_format("TouchEventListener", description="Flow DOM type"),
print_type_format("TouchEventTypes", description="Flow DOM type"),
print_type_format("TouchList", description="Flow DOM type"),
print_type_format("TrackDefault", description="Flow DOM type"),
print_type_format("TrackDefaultList", description="Flow DOM type"),
print_type_format("TreeWalker", "TreeWalker<${1}, ${2}>", "Flow DOM type"),
print_type_format("UIEvent", description="Flow DOM type"),
print_type_format("URL", description="Flow DOM type"),
print_type_format("ValidityState", description="Flow DOM type"),
print_type_format("VertexAttribFVSource", description="Flow DOM type"),
print_type_format("VideoTrack", description="Flow DOM type"),
print_type_format("VideoTrackList", description="Flow DOM type"),
print_type_format("WebGLContextAttributes", description="Flow DOM type"),
print_type_format("WebGLContextEvent", description="Flow DOM type"),
print_type_format("WebGLRenderingContext", description="Flow DOM type"),
print_type_format("WheelEvent", description="Flow DOM type"),
print_type_format("WheelEventHandler", description="Flow DOM type"),
print_type_format("WheelEventListener", description="Flow DOM type"),
print_type_format("WheelEventTypes", description="Flow DOM type"),
# Document Object Model 'private' types
print_type_format(
"CustomEventInit", "CustomEvent\$Init", "Flow DOM private type"
),
print_type_format("EventInit", "Event\$Init", "Flow DOM private type"),
# Browser Object Model types
print_type_format("AnalyserNode", description="Flow BOM type"),
print_type_format("AudioBuffer", description="Flow BOM type"),
print_type_format("AudioBufferSourceNode", description="Flow BOM type"),
print_type_format("AudioContext", description="Flow BOM type"),
print_type_format("AudioDestinationNode", description="Flow BOM type"),
print_type_format("AudioListener", description="Flow BOM type"),
print_type_format("AudioNode", description="Flow BOM type"),
print_type_format("AudioParam", description="Flow BOM type"),
print_type_format("BatteryManager", description="Flow BOM type"),
print_type_format("BiquadFilterNode", description="Flow BOM type"),
print_type_format("BodyInit", description="Flow BOM type"),
print_type_format("CacheType", description="Flow BOM type"),
print_type_format("ChannelMergerNode", description="Flow BOM type"),
print_type_format("ChannelSplitterNode", description="Flow BOM type"),
print_type_format("CloseEvent", description="Flow BOM type"),
print_type_format("ConvolverNode", description="Flow BOM type"),
print_type_format("Coordinates", description="Flow BOM type"),
print_type_format("CredentialsType", description="Flow BOM type"),
print_type_format(
"DedicatedWorkerGlobalScope", description="Flow BOM type"
),
print_type_format("DelayNode", description="Flow BOM type"),
print_type_format("DOMParser", description="Flow BOM type"),
print_type_format("DynamicsCompressorNode", description="Flow BOM type"),
print_type_format("FormData", description="Flow BOM type"),
print_type_format("FormDataEntryValue", description="Flow BOM type"),
print_type_format("GainNode", description="Flow BOM type"),
print_type_format("Gamepad", description="Flow BOM type"),
print_type_format("GamepadButton", description="Flow BOM type"),
print_type_format("Geolocation", description="Flow BOM type"),
print_type_format("Headers", description="Flow BOM type"),
print_type_format("HeadersInit", description="Flow BOM type"),
print_type_format("History", description="Flow BOM type"),
print_type_format("Location", description="Flow BOM type"),
print_type_format(
"MediaElementAudioSourceNode", description="Flow BOM type"
),
print_type_format("MediaStream", description="Flow BOM type"),
print_type_format(
"MediaStreamAudioSourceNode", description="Flow BOM type"
),
print_type_format("MediaStreamTrack", description="Flow BOM type"),
print_type_format("MessageChannel", description="Flow BOM type"),
print_type_format("MessagePort", description="Flow BOM type"),
print_type_format("MethodType", description="Flow BOM type"),
print_type_format("MimeType", description="Flow BOM type"),
print_type_format("MimeTypeArray", description="Flow BOM type"),
print_type_format("ModeType", description="Flow BOM type"),
print_type_format("MutationObserver", description="Flow BOM type"),
print_type_format(
"MutationObserverInitRequired", description="Flow BOM type"
),
print_type_format("MutationRecord", description="Flow BOM type"),
print_type_format("Navigator", description="Flow BOM type"),
print_type_format("NavigatorCommon", description="Flow BOM type"),
print_type_format("OscillatorNode", description="Flow BOM type"),
print_type_format("PannerNode", description="Flow BOM type"),
print_type_format("Performance", description="Flow BOM type"),
print_type_format("PerformanceEntry", description="Flow BOM type"),
print_type_format(
"PerformanceEntryFilterOptions", description="Flow BOM type"
),
print_type_format("PerformanceNavigation", description="Flow BOM type"),
print_type_format(
"PerformanceNavigationTiming", description="Flow BOM type"
),
print_type_format(
"PerformanceResourceTiming", description="Flow BOM type"
),
print_type_format("PerformanceTiming", description="Flow BOM type"),
print_type_format("PeriodicWave", description="Flow BOM type"),
print_type_format("Plugin", description="Flow BOM type"),
print_type_format("PluginArray", description="Flow BOM type"),
print_type_format("Position", description="Flow BOM type"),
print_type_format("PositionError", description="Flow BOM type"),
print_type_format("PositionOptions", description="Flow BOM type"),
print_type_format("RedirectType", description="Flow BOM type"),
print_type_format("ReferrerPolicyType", description="Flow BOM type"),
print_type_format("Request", description="Flow BOM type"),
print_type_format("RequestOptions", description="Flow BOM type"),
print_type_format("Response", description="Flow BOM type"),
print_type_format("ResponseOptions", description="Flow BOM type"),
print_type_format("ResponseType", description="Flow BOM type"),
print_type_format("Screen", description="Flow BOM type"),
print_type_format("ScriptProcessorNode", description="Flow BOM type"),
print_type_format("SharedWorker", description="Flow BOM type"),
print_type_format("SharedWorkerGlobalScope", description="Flow BOM type"),
print_type_format("TextDecoder", description="Flow BOM type"),
print_type_format("TextEncoder", description="Flow BOM type"),
print_type_format("URLSearchParams", description="Flow BOM type"),
print_type_format("WaveShaperNode", description="Flow BOM type"),
print_type_format("WebSocket", description="Flow BOM type"),
print_type_format("Worker", description="Flow BOM type"),
print_type_format("WorkerGlobalScope", description="Flow BOM type"),
print_type_format("WorkerLocation", description="Flow BOM type"),
print_type_format("WorkerNavigator", description="Flow BOM type"),
print_type_format("XDomainRequest", description="Flow BOM type"),
print_type_format("XMLHttpRequest", description="Flow BOM type"),
print_type_format(
"XMLHttpRequestEventTarget", description="Flow BOM type"
),
print_type_format("XMLSerializer", description="Flow BOM type"),
# Browser Object Model 'private' types
print_type_format(
"TextDecoderavailableEncodings",
"TextDecoder\$availableEncodings",
"Flow BOM private type",
),
print_type_format(
"TextEncoderavailableEncodings",
"TextEncoder\$availableEncodings",
"Flow BOM private type",
),
# CSS Object Model types
print_type_format("CSSRule", description="Flow CSSOM type"),
print_type_format("CSSRuleList", description="Flow CSSOM type"),
print_type_format("CSSStyleDeclaration", description="Flow CSSOM type"),
print_type_format("CSSStyleSheet", description="Flow CSSOM type"),
print_type_format("MediaList", description="Flow CSSOM type"),
print_type_format("StyleSheet", description="Flow CSSOM type"),
print_type_format("StyleSheetList", description="Flow CSSOM type"),
print_type_format("TransitionEvent", description="Flow CSSOM type"),
# indexedDB types
print_type_format("IDBCursor", description="Flow indexedDB type"),
print_type_format("IDBCursorWithValue", description="Flow indexedDB type"),
print_type_format("IDBDatabase", description="Flow indexedDB type"),
print_type_format("IDBDirection", description="Flow indexedDB type"),
print_type_format("IDBEnvironment", description="Flow indexedDB type"),
print_type_format("IDBFactory", description="Flow indexedDB type"),
print_type_format("IDBIndex", description="Flow indexedDB type"),
print_type_format("IDBKeyRange", description="Flow indexedDB type"),
print_type_format("IDBObjectStore", description="Flow indexedDB type"),
print_type_format("IDBOpenDBRequest", description="Flow indexedDB type"),
print_type_format("IDBRequest", description="Flow indexedDB type"),
print_type_format("IDBTransaction", description="Flow indexedDB type"),
# Node.js types
print_type_format("AssertionError", description="Flow Node.js type"),
print_type_format("Buffer", description="Flow Node.js type"),
print_type_format("Sign", description="Flow Node.js type"),
print_type_format("Verify", description="Flow Node.js type"),
print_type_format("duplexStreamOptions", description="Flow Node.js type"),
print_type_format("EventEmitter", description="Flow Node.js type"),
print_type_format("FSWatcher", description="Flow Node.js type"),
print_type_format("ReadStream", description="Flow Node.js type"),
print_type_format("Stats", description="Flow Node.js type"),
print_type_format("WriteStream", description="Flow Node.js type"),
print_type_format("ClientRequest", description="Flow Node.js type"),
print_type_format("IncomingMessage", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("ServerResponse", description="Flow Node.js type"),
print_type_format("ClientRequest", description="Flow Node.js type"),
print_type_format("IncomingMessage", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("ServerResponse", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("Socket", description="Flow Node.js type"),
print_type_format("Process", description="Flow Node.js type"),
print_type_format(
"readableStreamOptions", description="Flow Node.js type"
),
print_type_format(
"writableStreamOptions", description="Flow Node.js type"
),
print_type_format("Deflate", description="Flow Node.js type"),
print_type_format("DeflateRaw", description="Flow Node.js type"),
print_type_format("Gunzip", description="Flow Node.js type"),
print_type_format("Gzip", description="Flow Node.js type"),
print_type_format("Inflate", description="Flow Node.js type"),
print_type_format("InflateRaw", description="Flow Node.js type"),
print_type_format("Unzip", description="Flow Node.js type"),
print_type_format("Zlib", description="Flow Node.js type"),
# Node.js private types
print_type_format(
"bufferEncoding", "buffer\$Encoding", "Flow Node.js private type"
),
print_type_format(
"bufferNonBufferEncoding",
"buffer\$NonBufferEncoding",
"Flow Node.js private type",
),
print_type_format(
"bufferToJSONRet", "buffer\$ToJSONRet", "Flow Node.js private type"
),
print_type_format(
"child_processChildProcess",
"child_process\$ChildProcess",
"Flow Node.js private type",
),
print_type_format(
"child_processError",
"child_process\$Error",
"Flow Node.js private type",
),
print_type_format(
"child_processexecCallback",
"child_process\$execCallback",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileCallback",
"child_process\$execFileCallback",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileOpts",
"child_process\$execFileOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileSyncOpts",
"child_process\$execFileSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecOpts",
"child_process\$execOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecSyncOpts",
"child_process\$execSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processforkOpts",
"child_process\$forkOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processHandle",
"child_process\$Handle",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnOpts",
"child_process\$spawnOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnRet",
"child_process\$spawnRet",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnSyncOpts",
"child_process\$spawnSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnSyncRet",
"child_process\$spawnSyncRet",
"Flow Node.js private type",
),
print_type_format(
"clustersetupMasterOpts",
"cluster\$setupMasterOpts",
"Flow Node.js private type",
),
print_type_format(
"clusterWorker", "cluster\$Worker", "Flow Node.js private type"
),
print_type_format(
"cryptoCipher", "crypto\$Cipher", "Flow Node.js private type"
),
print_type_format(
"cryptocreateCredentialsDetails",
"crypto\$createCredentialsDetails",
"Flow Node.js private type",
),
print_type_format(
"cryptoCredentials", "crypto\$Credentials", "Flow Node.js private type"
),
print_type_format(
"cryptoDecipher", "crypto\$Decipher", "Flow Node.js private type"
),
print_type_format(
"cryptoDiffieHellman",
"crypto\$DiffieHellman",
"Flow Node.js private type",
),
print_type_format(
"cryptoHash", "crypto\$Hash", "Flow Node.js private type"
),
print_type_format(
"cryptoHmac", "crypto\$Hmac", "Flow Node.js private type"
),
print_type_format(
"cryptoSign", "crypto\$Sign", "Flow Node.js private type"
),
print_type_format(
"cryptoSignprivate_key",
"crypto\$Sign\$private_key",
"Flow Node.js private type",
),
print_type_format(
"cryptoVerify", "crypto\$Verify", "Flow Node.js private type"
),
print_type_format(
"dgramSocket", "dgram\$Socket", "Flow Node.js private type"
),
print_type_format(
"domainDomain", "domain\$Domain", "Flow Node.js private type"
),
print_type_format(
"eventsEventEmitter",
"events\$EventEmitter",
"Flow Node.js private type",
),
print_type_format(
"httpClientRequest", "http\$ClientRequest", "Flow Node.js private type"
),
print_type_format(
"httpIncomingMessage",
"http\$IncomingMessage",
"Flow Node.js private type",
),
print_type_format(
"httpServerResponse",
"http\$ServerResponse",
"Flow Node.js private type",
),
print_type_format(
"netconnectOptions", "net\$connectOptions", "Flow Node.js private type"
),
print_type_format("netServer", "net\$Server", "Flow Node.js private type"),
print_type_format("netSocket", "net\$Socket", "Flow Node.js private type"),
print_type_format(
"netSocketaddress", "net\$Socket\$address", "Flow Node.js private type"
),
print_type_format("osCPU", "os\$CPU", "Flow Node.js private type"),
print_type_format(
"osNetIFAddr", "os\$NetIFAddr", "Flow Node.js private type"
),
print_type_format(
"osUserInfobuffer", "os\$UserInfo\$buffer", "Flow Node.js private type"
),
print_type_format(
"osUserInfostring", "os\$UserInfo\$string", "Flow Node.js private type"
),
print_type_format(
"readlineInterface", "readline\$Interface", "Flow Node.js private type"
),
print_type_format(
"streamDuplex", "stream\$Duplex", "Flow Node.js private type"
),
print_type_format(
"streamPassThrough", "stream\$PassThrough", "Flow Node.js private type"
),
print_type_format(
"streamReadable", "stream\$Readable", "Flow Node.js private type"
),
print_type_format(
"streamStream", "stream\$Stream", "Flow Node.js private type"
),
print_type_format(
"streamTransform", "stream\$Transform", "Flow Node.js private type"
),
print_type_format(
"streamWritable", "stream\$Writable", "Flow Node.js private type"
),
print_type_format(
"string_decoderStringDecoder",
"string_decoder\$StringDecoder",
"Flow Node.js private type",
),
print_type_format("tlsServer", "tls\$Server", "Flow Node.js private type"),
print_type_format(
"tlsTLSSocket", "tls\$TLSSocket", "Flow Node.js private type"
),
print_type_format(
"ttyReadStream", "tty\$ReadStream", "Flow Node.js private type"
),
print_type_format(
"ttyWriteStream", "tty\$WriteStream", "Flow Node.js private type"
),
print_type_format("vmContext", "vm\$Context", "Flow Node.js private type"),
print_type_format("vmScript", "vm\$Script", "Flow Node.js private type"),
print_type_format(
"vmScriptOptions", "vm\$ScriptOptions", "Flow Node.js private type"
),
print_type_format(
"zlibasyncFn", "zlib\$asyncFn", "Flow Node.js private type"
),
print_type_format(
"zliboptions", "zlib\$options", "Flow Node.js private type"
),
print_type_format(
"zlibsyncFn", "zlib\$syncFn", "Flow Node.js private type"
),
# Service Workers types
print_type_format("Cache", description="Flow service worker type"),
print_type_format(
"CacheQueryOptions", description="Flow service worker type"
),
print_type_format("CacheStorage", description="Flow service worker type"),
print_type_format("Client", description="Flow service worker type"),
print_type_format(
"ClientQueryOptions", description="Flow service worker type"
),
print_type_format("Clients", description="Flow service worker type"),
print_type_format("ClientType", description="Flow service worker type"),
print_type_format(
"ExtendableEvent", description="Flow service worker type"
),
print_type_format("FetchEvent", description="Flow service worker type"),
print_type_format(
"ForeignFetchOptions", description="Flow service worker type"
),
print_type_format("FrameType", description="Flow service worker type"),
print_type_format("InstallEvent", description="Flow service worker type"),
print_type_format(
"NavigationPreloadManager", description="Flow service worker type"
),
print_type_format(
"NavigationPreloadState", description="Flow service worker type"
),
print_type_format(
"RegistrationOptions", description="Flow service worker type"
),
print_type_format("RequestInfo", description="Flow service worker type"),
print_type_format("ServiceWorker", description="Flow service worker type"),
print_type_format(
"ServiceWorkerContainer", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerMessageEvent", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerRegistration", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerState", description="Flow service worker type"
),
print_type_format(
"VisibilityState", description="Flow service worker type"
),
print_type_format("WindowClient", description="Flow service worker type"),
print_type_format("WorkerType", description="Flow service worker type"),
# Streams types
print_type_format("PipeToOptions", description="Flow streams type"),
print_type_format("QueuingStrategy", description="Flow streams type"),
print_type_format(
"ReadableByteStreamController", description="Flow streams type"
),
print_type_format("ReadableStream", description="Flow streams type"),
print_type_format(
"ReadableStreamBYOBRequest", description="Flow streams type"
),
print_type_format(
"ReadableStreamController", description="Flow streams type"
),
print_type_format("ReadableStreamReader", description="Flow streams type"),
print_type_format("TextEncodeOptions", description="Flow streams type"),
print_type_format("TextEncoder", description="Flow streams type"),
print_type_format("TransformStream", description="Flow streams type"),
print_type_format("UnderlyingSink", description="Flow streams type"),
print_type_format("UnderlyingSource", description="Flow streams type"),
print_type_format("WritableStream", description="Flow streams type"),
print_type_format(
"WritableStreamController", description="Flow streams type"
),
print_type_format("WritableStreamWriter", description="Flow streams type"),
]
| Pegase745/sublime-flowtype | flowtype/listeners/builtintypes.py | Python | mit | 41,312 |
# Copyright (c) 2013-2014 Will Thames <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class GitHasVersionRule(AnsibleLintRule):
id = '401'
shortdesc = 'Git checkouts must contain explicit version'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['module', 'repeatability', 'ANSIBLE0004']
version_added = 'historic'
def matchtask(self, file, task):
return (task['action']['__ansible_module__'] == 'git' and
task['action'].get('version', 'HEAD') == 'HEAD')
| willthames/ansible-lint | lib/ansiblelint/rules/GitHasVersionRule.py | Python | mit | 1,699 |
# backports.functools_lru_cache v1.5
# https://github.com/jaraco/backports.functools_lru_cache
# Copyright (c) 2014-2018 Jason R. Coombs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Backport of functools.lru_cache from Python 3.3 as published at ActiveState"""
from __future__ import absolute_import
import functools
from collections import namedtuple
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
@functools.wraps(functools.update_wrapper)
def update_wrapper(wrapper,
wrapped,
assigned = functools.WRAPPER_ASSIGNMENTS,
updated = functools.WRAPPER_UPDATES):
"""
Patch two bugs in functools.update_wrapper.
"""
# workaround for http://bugs.python.org/issue3445
assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
# workaround for https://bugs.python.org/issue17482
wrapper.__wrapped__ = wrapped
return wrapper
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)])):
"""Make a cache key from optionally typed positional and keyword arguments"""
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| orbingol/NURBS-Python | geomdl/functools_lru_cache.py | Python | mit | 8,538 |
'''
Created on Sep 22, 2016
@author: rtorres
'''
import os
from flaskiwsapp.settings.baseConfig import BaseConfig
class DevConfig(BaseConfig):
"""Development configuration"""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example'
AUTH0_CALLBACK_URL = 'http://localhost/auth/callback'
AUTH0_CLIENT_ID = ''
AUTH0_CLIENT_SECRET = ''
AUTH0_DOMAIN = ''
APP_DOMAIN = 'localhost'
APP_URL = 'http://%s' % APP_DOMAIN
SERVER_NAME = 'locahost'
| rafasis1986/EngineeringMidLevel | flaskiwsapp/settings/devConfigTemplate.py | Python | mit | 539 |
import socket, threading, thread, os, sys, time
def create(settings):
return IrcBot(settings)
# Revision, yolo
class IrcBot:
_settings = {}
_debug = None
_client = "ArmedGuys IRC Bot"
_version = "0.5"
_env = "Python"
_socket = None
# Channels the bot is in
_channels = []
# Message Loop
_messageThreadRunning = True
_messageThread = None
# Event Queue Loop
_queueThreadRunning = True
_queueThread = None
_queue = None
def __init__(self, settings):
self._settings = settings
self._queue = IrcEventQueue()
if "debug" in settings:
self._debug = DebugLog(settings['nick'])
def connect(self):
if "host" in self._settings and "port" in self._settings:
self._socket = socket.create_connection((self._settings['host'], self._settings['port']))
# Register events
self._queue.RegisterHandler(IrcEvent.PacketRecieved, self.onpacket)
self._queue.RegisterHandler(IrcEvent.MessageRecieved, self.onmessage)
self._queue.RegisterHandler(IrcEvent.PingRecieved, self.onping)
# before sending, create message & queue loops
self.startQueueThread() # start event queue thread
self.startMessageThread() # start message queue thread
# begin connection
if "serverpassword" in self._settings:
self.out("PASS %s\r\n" % self._settings['serverpassword'])
self.out("NICK %s\r\n" % self._settings['nick'])
self.out("USER %s %s bla :%s\r\n" % (self._settings['ident'], self._settings['host'], self._settings['realname']))
def reconnect(self): # reconnect assumes events are all intact, that socket is closed and that queue thread is still running
if self._messageThreadRunning == False and self._queueThreadRunning == True:
self._socket = socket.create_connection((self._settings['host'], self._settings['port']))
# before sending, create message & queue loops
self._messageThreadRunning = True # reset msgthread state
self.startMessageThread() # start message queue thread
# begin connection
if "serverpassword" in self._settings:
self.out("PASS %s\r\n" % self._settings['serverpassword'])
self.out("NICK %s\r\n" % self._settings['nick'])
self.out("USER %s %s bla :%s\r\n" % (self._settings['ident'], self._settings['host'], self._settings['realname']))
def startMessageThread(self):
try:
self._messageThread = threading.Thread(target=self.messageThread)
self._messageThread.start()
except:
print "exception: %s" % str(sys.exec_info())
def startQueueThread(self):
try:
self._queueThread = threading.Thread(target=self.queueThread)
self._queueThread.start()
except:
print "exception: %s" % str(sys.exec_info())
def messageThread(self):
tempbuf = ""
while self._messageThreadRunning == True:
try:
sockbuf = self._socket.recv(4096)
if sockbuf == "": # dead connection
self._messageThreadRunning = False
self._queue.event(IrcEvent.BotLostConnection, None)
self._socket.close()
if "debug" in self._settings:
self._debug.write("BOT LOST CONNECTION", "Unknown reason")
else:
sockbuf = tempbuf + sockbuf
if "\n" in sockbuf: # should always happen
pcks = sockbuf.split("\n") # Splits them up as full IRC Commands, anyone cut off by buffer size gets put in a temp buffer and used next loop
tempbuf = pcks.pop()
for pck in pcks:
pck = pck.rstrip()
if "debug" in self._settings:
self._debug.write("GOT PACKET", pck)
packet = IrcPacket(pck)
self._queue.event(IrcEvent.PacketRecieved, packet)
except:
print "exception: %s\n" % str(sys.exc_info())
self._messageThreadRunning = False
self._socket.close()
self._queue.event(IrcEvent.BotLostConnection, None)
if "debug" in self._settings:
self._debug.write("MESSAGETHREAD EXCEPTION", str(sys.exc_info()))
def queueThread(self):
while self._queueThreadRunning == True:
next = self._queue.next()
self._queue.Handle(next)
time.sleep(0.001)
######################################### EVENT HANDLER HANDLING HANDLE HANDLING HANDLE HAND #############
def RegisterEventHandler(self, type, handler):
self._queue.RegisterHandler(type, handler)
def UnregisterEventHandler(self, type, handler):
self._queue.UnregisterHandler(type, handler)
######################################### EVENT HANDLING #################################################
def onpacket(self, type, data):
if type == IrcEvent.PacketRecieved:
if data.command == "PING":
self._queue.event(IrcEvent.PingRecieved, data.message)
if data.command == "ERROR":
self._queue.event(IrcEvent.IrcError, data)
else: # can't say this is the best implementation, but hey, it woerkz
self._queue.event(IrcEvent.MessageRecieved, data)
def onping(self, type, data):
if type == IrcEvent.PingRecieved:
self.out("PONG :%s\r\n" % data)
def onmessage(self, type, data):
# print "Recieved message of type: %s from %s" % (data.command, data.sender)
if type == IrcEvent.MessageRecieved:
if data.command == "PRIVMSG":
self._queue.event(IrcEvent.PrivmsgRecieved, data)
#print "privmsg reciever: %s" % data.params[0]
if data.params[0][0] != "#":
self._queue.event(IrcEvent.QueryRecieved, data)
else:
self._queue.event(IrcEvent.ChanmsgRecieved, data)
if data.command == "NOTICE":
self._queue.event(IrcEvent.NoticeRecieved, data)
if data.command == "TOPIC":
self._queue.event(IrcEvent.TopicChanged, data)
if data.command == "JOIN":
self._queue.event(IrcEvent.UserJoined, data)
if data.command == "PART":
self._queue.event(IrcEvent.UserLeft, data)
if data.command == "NICK":
self._queue.event(IrcEvent.NickChanged, data)
######################################### BOT CONTROL ####################################################
def exit(self, message):
self.out("QUIT :%s" % message)
self._queueThreadRunning = False
self._messageThreadRunning = False
self._socket.close()
# basic send types
def out(self, data):
if len(data) == 0: return
if "debug" in self._settings:
self._debug.write("SENT PACKET", data.rstrip())
if "\r\n" not in data:
data = data + "\r\n"
if self._socket:
self._socket.send(data)
def msg(self, target, message):
self.out("PRIVMSG %s :%s\r\n" % (target,message))
def notice(self, target, message):
self.out("NOTICE %s :%s\r\n" % (target, message))
# Channel stuff
def join(self, channel):
self._channels.append(channel)
self.out("JOIN :%s\r\n" % channel)
def leave(self, channel):
self.out("PART :%s\r\n" % channel)
try:
self._channels.remove(channel)
except:
pass
# Other stuff
def status(self, status):
if status == "":
self.out("NICK %s\r\n" % self._settings['nick'])
else:
self.out("NICK %s|%s\r\n" % (self._settings['nick'], status))
########################### EVENT QUEUE #########################################
class IrcEvent:
PacketRecieved = 0
MessageRecieved = 1
PingRecieved = 2
NoticeRecieved = 3
PrivmsgRecieved = 4
ChanmsgRecieved = 5
QueryRecieved = 6
TopicChanged = 7
UserJoined = 8
UserLeft = 9
NickChanged = 10
BotLostConnection = 11
IrcError = 12
class IrcEventQueue:
EventHandlers = {}
next = None
_queue = None
def RegisterHandler(self, event, handler):
if event in self.EventHandlers:
self.EventHandlers[event].append(handler)
else:
self.EventHandlers[event] = [handler]
def UnregisterHandler(self, event, handler):
if event in IrcEventQueue.EventHandlers:
try:
self.EventHandlers[event].remove(handler)
except:
pass
def Handle(self, event):
if event[0] in self.EventHandlers:
for e in self.EventHandlers[event[0]]:
e(event[0], event[1])
# Constructor
def __init__(self):
self._queue = self.ThreadsafeQueue()
self.next = self._queue.get
def event(self, type, data): # queue an event
self._queue.enqueue((type, data))
class ThreadsafeQueue:
def __init__(self):
self._eventList = []
self._newEventCondition = threading.Condition()
def enqueue(self, event): # adds an event to the queue
with self._newEventCondition:
self._eventList.append(event)
self._newEventCondition.notify()
def empty(self): # returns True if list is empty
with self._newEventCondition:
return len(self._eventList) == 0
def get(self):
with self._newEventCondition:
while self.empty():
self._newEventCondition.wait()
return self._eventList.pop(0)
########################### BOT COMPONENTS ######################################
class IrcPacket:
sender = ""
command = "" # command, numerical or text
params = None # any numerical reply params
message = "" # after "last" :
def __init__(self, buf):
self.params = []
if buf[0] == ":": # events generally
self.sender = ""
if ":" in buf[1:]:
d = buf[1:].split(":",1)
cm = d[0].strip()
if " " in cm: # must probably always happen, else will "never" happen
cmpar = cm.split(" ")
self.sender = cmpar[0]
self.command = cmpar[1]
self.params = cmpar[2:]
else:
self.command = cm
self.message = d[1]
else:
cm = buf[1:].strip()
if " " in cm: # must probably always happen, else will "never" happen
cmpar = cm.split(" ")
self.sender = cmpar[0]
self.command = cmpar[1]
self.params = cmpar[2:]
else:
self.command = cm
else:
self.sender = None
if ":" in buf:
d = buf.split(":",1)
cm = d[0].strip()
if " " in cm: # must probably always happen, else will "never" happen
cmpar = cm.split(" ")
self.command = cmpar[0]
self.params = cmpar[1:]
else:
self.command = cm
self.message = d[1]
else:
cm = buf.strip()
if " " in cm: # must probably always happen, else will "never" happen
cmpar = cm.split(" ")
self.command = cmpar[0]
self.params = cmpar[1:]
else:
self.command = cm
class IrcUser:
nick = ""
ident = ""
host = ""
def __init__(self, userstring):
if "!" in userstring:
d = userstring.split('!')
self.nick = d[0]
d = d[1].split("@")
self.ident = d[0]
self.host = d[1]
class DebugLog:
f = None
def __init__(self, prefix):
self.f = open("%s_irc.log" % prefix, "w")
def write(self, prefix, data):
self.f.write("[%s] [%s]: %s\r\n" % (time.time(), prefix, data))
self.f.flush()
############# STANDARD BOT ROUTINES ##############
class StandardBotRoutines:
_bot = None
_botSettings = None
# channels to join
_channels = []
# nickserv password to use
_nickservpassword = None
def __init__(self, bot, settings):
self._bot = bot
self._botSettings = settings
self._bot.RegisterEventHandler(IrcEvent.MessageRecieved, self.onMsgRecieved)
# join channel and nickserv auth
def queueJoinChannels(self, channels):
self._channels = channels
def queueNickServAuth(self, password):
self._nickservpassword = password
# automatic reconnect after internet connection issue
def autoReconnect(self):
self._bot.RegisterEventHandler(IrcEvent.BotLostConnection, self.onLostConn)
def onLostConn(self, type, data):
time.sleep(5)
print "reconnecting..."
self._bot.reconnect()
# handles join and nickserv pw
def onMsgRecieved(self, type, data):
if type == IrcEvent.MessageRecieved and data.command == "376": # end MOTD, auth w/ NickServ and join channels
if self._nickservpassword != None:
self._bot.msg("NickServ", "IDENTIFY %s" % self._nickservpassword)
for channel in self._channels:
self._bot.join(channel)
############# TEST CODE ###############
if __name__ == "__main__":
def bot_lost_connection_test(data1, data2):
print str(data2)
def user_joined(data1, data2):
bot.notice("#Pie-Studios", "Travis CI build currently running!")
bot.exit("Tests complete!")
settings = {
'host': "irc.rizon.net",
'port': 6667,
'nick': 'pyircbot',
'ident': 'pyircbot',
'realname': 'TheLeagueSpecialist',
'debug': False,
}
bot = create(settings)
standard = StandardBotRoutines(bot, settings)
standard.queueJoinChannels(["#Pie-Studios"])
standard.autoReconnect()
bot.RegisterEventHandler(IrcEvent.UserJoined, user_joined)
bot.RegisterEventHandler(IrcEvent.BotLostConnection, bot_lost_connection_test)
bot.connect() | ArmedGuy/pyircbot | pyircbot.py | Python | mit | 15,149 |
from django.conf import settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import View, TemplateView
from socialregistration.clients.oauth import OAuthError
from socialregistration.mixins import SocialRegistration
GENERATE_USERNAME = getattr(settings, 'SOCIALREGISTRATION_GENERATE_USERNAME', False)
USERNAME_FUNCTION = getattr(settings, 'SOCIALREGISTRATION_GENERATE_USERNAME_FUNCTION',
'socialregistration.utils.generate_username')
FORM_CLASS = getattr(settings, 'SOCIALREGISTRATION_SETUP_FORM',
'socialregistration.forms.UserForm')
INITAL_DATA_FUNCTION = getattr(settings, 'SOCIALREGISTRATION_INITIAL_DATA_FUNCTION',
None)
class Setup(SocialRegistration, View):
"""
Setup view to create new Django users from third party APIs.
"""
template_name = 'socialregistration/setup.html'
def get_form(self):
"""
Return the form to be used. The return form is controlled
with ``SOCIALREGISTRATION_SETUP_FORM``.
"""
return self.import_attribute(FORM_CLASS)
def get_username_function(self):
"""
Return a function that can generate a username. The function
is controlled with ``SOCIALREGISTRATION_GENERATE_USERNAME_FUNCTION``.
"""
return self.import_attribute(USERNAME_FUNCTION)
def get_initial_data(self, request, user, profile, client):
"""
Return initial data for the setup form. The function can be
controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``.
:param request: The current request object
:param user: The unsaved user object
:param profile: The unsaved profile object
:param client: The API client
"""
if INITAL_DATA_FUNCTION:
func = self.import_attribute(INITAL_DATA_FUNCTION)
return func(request, user, profile, client)
return {}
def generate_username_and_redirect(self, request, user, profile, client):
"""
Generate a username and then redirect the user to the correct place.
This method is called when ``SOCIALREGISTRATION_GENERATE_USERNAME``
is set.
:param request: The current request object
:param user: The unsaved user object
:param profile: The unsaved profile object
:param client: The API client
"""
func = self.get_username_function()
user.username = func(user, profile, client)
user.set_unusable_password()
user.save()
profile.user = user
profile.save()
user = profile.authenticate()
self.send_connect_signal(request, user, profile, client)
self.login(request, user)
self.send_login_signal(request, user, profile, client)
self.delete_session_data(request)
return HttpResponseRedirect(self.get_next(request))
def get(self, request):
"""
When signing a new user up - either display a setup form, or
generate the username automatically.
"""
try:
user, profile, client = self.get_session_data(request)
except KeyError:
return self.render_to_response(dict(
error=_("Social profile is missing from your session.")))
if GENERATE_USERNAME:
return self.generate_username_and_redirect(request, user, profile, client)
form = self.get_form()(initial=self.get_initial_data(request, user, profile, client))
return self.render_to_response(dict(form=form))
def post(self, request):
"""
Save the user and profile, login and send the right signals.
"""
try:
user, profile, client = self.get_session_data(request)
except KeyError:
return self.render_to_response(dict(
error=_("A social profile is missing from your session.")))
form = self.get_form()(request.POST, request.FILES,
initial=self.get_initial_data(request, user, profile, client))
if not form.is_valid():
return self.render_to_response(dict(form=form))
user, profile = form.save(request, user, profile, client)
user = profile.authenticate()
self.send_connect_signal(request, user, profile, client)
self.login(request, user)
self.send_login_signal(request, user, profile, client)
self.delete_session_data(request)
return HttpResponseRedirect(self.get_next(request))
class Logout(View):
"""
Log the user out of Django. This **does not** log the user out
of third party sites.
"""
def get(self, request):
logout(request)
url = getattr(settings, 'LOGOUT_REDIRECT_URL', '/')
return HttpResponseRedirect(url)
class OAuthRedirect(SocialRegistration, View):
"""
Base class for both OAuth and OAuth2 redirects.
:param client: The API client class that should be used.
:param template_name: The error template.
"""
# The OAuth{1,2} client to be used
client = None
# The template to render in case of errors
template_name = None
def post(self, request):
"""
Create a client, store it in the user's session and redirect the user
to the API provider to authorize our app and permissions.
"""
request.session['next'] = self.get_next(request)
client = self.get_client()()
request.session[self.get_client().get_session_key()] = client
try:
return HttpResponseRedirect(client.get_redirect_url())
except OAuthError, error:
return self.render_to_response({'error': error})
class OAuthCallback(SocialRegistration, View):
"""
Base class for OAuth and OAuth2 callback views.
:param client: The API client class that should be used.
:param template_name: The error template.
"""
# The OAuth{1,2} client to be used
client = None
# The template to render in case of errors
template_name = None
def get_redirect(self):
"""
Return a URL that will set up the correct models if the
OAuth flow succeeded. Subclasses **must** override this
method.
"""
raise NotImplementedError
def get(self, request):
"""
Called after the user is redirected back to our application.
Tries to:
- Complete the OAuth / OAuth2 flow
- Redirect the user to another view that deals with login, connecting
or user creation.
"""
try:
client = request.session[self.get_client().get_session_key()]
client.complete(dict(request.GET.items()))
request.session[self.get_client().get_session_key()] = client
return HttpResponseRedirect(self.get_redirect())
except KeyError:
return self.render_to_response({'error': "Session expired."})
except OAuthError, error:
return self.render_to_response({'error': error})
class SetupCallback(SocialRegistration, TemplateView):
"""
Base class for OAuth and OAuth2 login / connects / registration.
"""
template_name = 'socialregistration/setup.error.html'
def get(self, request):
"""
Called after authorization was granted and the OAuth flow
successfully completed.
Tries to:
- Connect the remote account if the user is logged in already
- Log the user in if a local profile of the remote account
exists already
- Create a user and profile object if none of the above succeed
and redirect the user further to either capture some data via
form or generate a username automatically
"""
try:
client = request.session[self.get_client().get_session_key()]
except KeyError:
return self.render_to_response({'error': "Session expired."})
# Get the lookup dictionary to find the user's profile
lookup_kwargs = self.get_lookup_kwargs(request, client)
# Logged in user (re-)connecting an account
if request.user.is_authenticated():
try:
profile = self.get_profile(**lookup_kwargs)
# Make sure that there is only *one* account per profile.
if not profile.user == request.user:
self.delete_session_data(request)
return self.render_to_response({
'error': _('This profile is already connected to another user account.')
})
except self.get_model().DoesNotExist:
profile, created = self.get_or_create_profile(request.user,
save=True, **lookup_kwargs)
self.send_connect_signal(request, request.user, profile, client)
return self.redirect(request)
# Logged out user - let's see if we've got the identity saved already.
# If so - just log the user in. If not, create profile and redirect
# to the setup view
user = self.authenticate(**lookup_kwargs)
# No user existing - create a new one and redirect to the final setup view
if user is None:
user = self.create_user()
profile = self.create_profile(user, **lookup_kwargs)
self.store_user(request, user)
self.store_profile(request, profile)
self.store_client(request, client)
return HttpResponseRedirect(reverse('socialregistration:setup'))
# Inactive user - displaying / redirect to the appropriate place.
if not user.is_active:
return self.inactive_response()
# Active user with existing profile: login, send signal and redirect
self.login(request, user)
profile = self.get_profile(user=user, **lookup_kwargs)
self.send_login_signal(request, user, profile, client)
return self.redirect(request)
| vinco/django-socialregistration | socialregistration/views.py | Python | mit | 10,506 |
# -*- coding: utf-8 -*-
import configs.module
import wsgiref.simple_server
import select
import json
import bot
from urllib import parse
import irc.fullparse
import irc.splitparse
import os.path
def init(options):
m = configs.module.Module(__name__)
if 'wserver' in options['server'].state:
del options['server'].state['wserver']
try:
if 'apiport' in options['server'].entry:
options['server'].state[
'wserver'] = wsgiref.simple_server.make_server(
'', options['server'].entry['apiport'],
application(options['server']))
print(('Opening API server on %d' % options[
'server'].entry['apiport']))
except OSError:
print(('Unable to open API server on %d' % options[
'server'].entry['apiport']))
m.set_help('Access various bot functions from a json API.')
m.add_timer_hook(1 * 1000, timer)
m.add_base_hook('api.action.command', apiactioncommand)
m.add_base_hook('api.path.interface', apipathinterface)
return m
class application:
def __init__(self, server):
self.server = server
def __call__(self, environ, start_response):
ret = {
'status': 'error',
'message': 'unknown',
}
start_response('200 OK',
[('content-type', 'text/html;charset=utf-8')])
path = environ['PATH_INFO'].strip('/')
q = parse.parse_qs(environ['QUERY_STRING'])
action = q['action'][0] if 'action' in q else ''
try:
if path:
ret['message'] = 'unknown request'
ret['status'] = 'error'
self.server.do_base_hook('api.path.%s' % path,
ret, self.server, q, environ)
else:
ret['message'] = 'invalid action'
ret['status'] = 'error'
self.server.do_base_hook('api.action.%s' % action,
ret, self.server, q, environ)
if '_html' in ret:
return [ret['_html'].encode('utf-8')]
except KeyError:
pass
return [json.dumps(ret).encode('utf-8')]
def apiactioncommand(ret, server, q, environ):
del ret['message']
ip = environ['REMOTE_ADDR']
if 'command' not in q:
ret['message'] = 'no command'
ret['status'] = 'error'
if server.type == 'irc':
def process_message(i):
sp = irc.splitparse.SplitParser(i)
fp = irc.fullparse.FullParse(
server, sp, nomore=True)
return fp.execute(sp.text)
ret['output'] = process_message(
':%s!%s PRIVMSG %s :%s' % (':' + ip, "~api@" + ip,
server.nick,
q['command'][0],
))
elif server.type == 'file':
ret['output'] = server.fp(server, q['command'][0])
ret['status'] = 'good'
def apipathinterface(ret, server, q, environ):
del ret['message']
ret['_html'] = open(os.path.dirname(__file__) + '/interface.html').read()
ret['status'] = 'good'
def timer():
for server in bot.servers():
if 'wserver' not in server.state:
continue
wserver = server.state['wserver']
inr, _, _ = select.select([wserver], [], [], 0.01)
if inr:
wserver.handle_request() | shacknetisp/fourthevaz | modules/core/api/__init__.py | Python | mit | 3,378 |
# Write a function to delete a node (except the tail) in a singly linked list,
# given only access to that node.
#
# Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node
# with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
#
# time: O(1)
# space: O(1)
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteNode2(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
curt = node
prev = None
while curt.next is not None:
curt.val = curt.next.val
prev = curt
curt = curt.next
if prev is not None:
prev.next = None
return
def deleteNode1(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
curt = node
while curt.next is not None:
curt.val = curt.next.val
if curt.next.next is None:
curt.next = None
break
curt = curt.next
return
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
return
if __name__ == '__main__':
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n1.next = n2
n2.next = n3
n3.next = n4
sol = Solution()
sol.deleteNode(n1)
print n1.val, n1.next.val, n1.next.next.val
try:
print n1.next.next.next.val
except:
print 'None Type!'
pass
| RobinCPC/algorithm-practice | LinkedList/deleteNode.py | Python | mit | 1,861 |
from django import template
from djangopress.core.util import smart_truncate_chars as _smart_truncate_chars
register = template.Library()
# truncate chars but leaving last word complete
@register.filter(name='smarttruncatechars')
def smart_truncate_chars(value, max_length):
return _smart_truncate_chars(value, max_length) | codefisher/djangopress | djangopress/core/templatetags/smart_truncate_chars.py | Python | mit | 330 |
from __future__ import absolute_import, print_function
import os
import pwd
import grp
import sys
import subprocess
from .command import Command
class AuthorizedKeysCommand(Command):
"""
Get authorized keys for a user using NSS and SSSD.
"""
@staticmethod
def configure_parser(parser):
"""
Configure an argument parser with arguments for this command.
"""
parser.add_argument(
'-u', '--user',
default=os.getenv("USER", None),
required=True,
help="username")
parser.add_argument(
'--include-group',
action='store_true',
help="retrieve ssh keys for everyone in the user's primary group")
def __init__(self, config, args):
"""
Create the command.
"""
self.config = config
self.args = args
def run(self):
"""
Run the command.
"""
# verify the sssd ssh helper is available
if not os.path.exists("/bin/sss_ssh_authorizedkeys"):
print("can't locate sssd ssh helper!", file=sys.stderr)
sys.exit(1)
# determine the users we need to retrieve keys for
users = set([self.args.user])
if self.args.include_group:
try:
# retrieve the user's passwd entry
user_passwd = pwd.getpwnam(self.args.user)
except KeyError as e:
print(
"failed to retrieve user passwd entry: {0}".format(e),
file=sys.stderr)
sys.exit(1)
try:
# retrieve the user's primary group
user_group = grp.getgrgid(user_passwd[3])
except KeyError as e:
print(
"failed to retrieve user's primary group: {0}".format(e),
file=sys.stderr)
sys.exit(1)
# update the list of users
users.update(user_group[3])
# retrieve user keys
for user in users:
try:
# call the SSSD SSH helper script
subprocess.check_call(["/bin/sss_ssh_authorizedkeys", user])
except subprocess.CalledProcessError:
# handle a non-zero exit code
print(
"failed to retrieve keys for user {0}".format(user),
file=sys.stderr)
sys.exit(1)
| CtrlC-Root/ssh-ldap-utils | ssh_ldap_utils/authorized_keys.py | Python | mit | 2,487 |
"""
Writes Python egg files.
Supports what's needed for saving and loading components/simulations.
"""
import copy
import os.path
import re
import sys
import zipfile
import pkg_resources
from openmdao.util import eggobserver
__all__ = ('egg_filename', 'write')
# Legal egg strings.
_EGG_NAME_RE = re.compile('[a-zA-Z][_a-zA-Z0-9]*')
_EGG_VERSION_RE = \
re.compile('([a-zA-Z0-9][_a-zA-Z0-9]*)+(\.[_a-zA-Z0-9][_a-zA-Z0-9]*)*')
def egg_filename(name, version):
"""
Returns name for egg file as generated by :mod:`setuptools`.
name: string
Must be alphanumeric.
version: string
Must be alphanumeric.
"""
assert name and isinstance(name, basestring)
match = _EGG_NAME_RE.search(name)
if match is None or match.group() != name:
raise ValueError('Egg name must be alphanumeric')
assert version and isinstance(version, basestring)
match = _EGG_VERSION_RE.search(version)
if match is None or match.group() != version:
raise ValueError('Egg version must be alphanumeric')
name = pkg_resources.to_filename(pkg_resources.safe_name(name))
version = pkg_resources.to_filename(pkg_resources.safe_version(version))
return '%s-%s-py%s.egg' % (name, version, sys.version[:3])
def write(name, version, doc, entry_map, src_files, distributions, modules,
dst_dir, logger, observer=None, compress=True):
"""
Write egg in the manner of :mod:`setuptools`, with some differences:
- Writes directly to the zip file, avoiding some intermediate copies.
- Doesn't compile any Python modules.
name: string
Must be an alphanumeric string.
version: string
Must be an alphanumeric string.
doc: string
Used for the `Summary` and `Description` entries in the egg's metadata.
entry_map: dict
A :mod:`pkg_resources` :class:`EntryPoint` map: a dictionary mapping
group names to dictionaries mapping entry point names to
:class:`EntryPoint` objects.
src_files: list
List of non-Python files to include.
distributions: list
List of Distributions this egg depends on. It is used for the `Requires`
entry in the egg's metadata.
modules: list
List of module names not found in a distribution that this egg depends
on. It is used for the `Requires` entry in the egg's metadata and is
also recorded in the 'openmdao_orphans.txt' resource.
dst_dir: string
The directory to write the egg to.
logger: Logger
Used for recording progress, etc.
observer: callable
Will be called via an :class:`EggObserver` intermediary.
Returns the egg's filename.
"""
observer = eggobserver.EggObserver(observer, logger)
egg_name = egg_filename(name, version)
egg_path = os.path.join(dst_dir, egg_name)
distributions = sorted(distributions, key=lambda dist: dist.project_name)
modules = sorted(modules)
sources = []
files = []
size = 0 # Approximate (uncompressed) size. Used to set allowZip64 flag.
# Collect src_files.
for path in src_files:
path = os.path.join(name, path)
files.append(path)
size += os.path.getsize(path)
# Collect Python modules.
for dirpath, dirnames, filenames in os.walk('.', followlinks=True):
dirs = copy.copy(dirnames)
for path in dirs:
if not os.path.exists(os.path.join(dirpath, path, '__init__.py')):
dirnames.remove(path)
for path in filenames:
if path.endswith('.py'):
path = os.path.join(dirpath[2:], path) # Skip leading './'
files.append(path)
size += os.path.getsize(path)
sources.append(path)
# Package info -> EGG-INFO/PKG-INFO
pkg_info = []
pkg_info.append('Metadata-Version: 1.1')
pkg_info.append('Name: %s' % pkg_resources.safe_name(name))
pkg_info.append('Version: %s' % pkg_resources.safe_version(version))
pkg_info.append('Summary: %s' % doc.strip().split('\n')[0])
pkg_info.append('Description: %s' % doc.strip())
pkg_info.append('Author-email: UNKNOWN')
pkg_info.append('License: UNKNOWN')
pkg_info.append('Platform: UNKNOWN')
for dist in distributions:
pkg_info.append('Requires: %s (%s)' % (dist.project_name, dist.version))
for module in modules:
pkg_info.append('Requires: %s' % module)
pkg_info = '\n'.join(pkg_info) + '\n'
sources.append(name + '.egg-info/PKG-INFO')
size += len(pkg_info)
# Dependency links -> EGG-INFO/dependency_links.txt
dependency_links = '\n'
sources.append(name + '.egg-info/dependency_links.txt')
size += len(dependency_links)
# Entry points -> EGG-INFO/entry_points.txt
entry_points = []
for entry_group in sorted(entry_map.keys()):
entry_points.append('[%s]' % entry_group)
for entry_name in sorted(entry_map[entry_group].keys()):
entry_points.append('%s' % entry_map[entry_group][entry_name])
entry_points.append('')
entry_points = '\n'.join(entry_points) + '\n'
sources.append(name + '.egg-info/entry_points.txt')
size += len(entry_points)
# Unsafe -> EGG-INFO/not-zip-safe
not_zip_safe = '\n'
sources.append(name + '.egg-info/not-zip-safe')
size += len(not_zip_safe)
# Requirements -> EGG-INFO/requires.txt
requirements = [str(dist.as_requirement()) for dist in distributions]
requirements = '\n'.join(requirements) + '\n'
sources.append(name + '.egg-info/requires.txt')
size += len(requirements)
# Modules not part of a distribution -> EGG-INFO/openmdao_orphans.txt
orphans = '\n'.join(modules) + '\n'
sources.append(name + '.egg-info/openmdao_orphans.txt')
size += len(orphans)
# Top-level names -> EGG-INFO/top_level.txt
top_level = '%s\n' % name
sources.append(name + '.egg-info/top_level.txt')
size += len(top_level)
# Manifest -> EGG-INFO/SOURCES.txt
sources.append(name + '.egg-info/SOURCES.txt')
sources = '\n'.join(sorted(sources)) + '\n'
size += len(sources)
# Open zipfile.
logger.debug('Creating %s', egg_path)
zip64 = size > zipfile.ZIP64_LIMIT
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
egg = zipfile.ZipFile(egg_path, 'w', compression, zip64)
stats = {'completed_files': 0., 'total_files': float(8 + len(files)),
'completed_bytes': 0., 'total_bytes': float(size)}
# Write egg info.
_write_info(egg, 'PKG-INFO', pkg_info, observer, stats)
_write_info(egg, 'dependency_links.txt', dependency_links, observer, stats)
_write_info(egg, 'entry_points.txt', entry_points, observer, stats)
_write_info(egg, 'not-zip-safe', not_zip_safe, observer, stats)
_write_info(egg, 'requires.txt', requirements, observer, stats)
_write_info(egg, 'openmdao_orphans.txt', orphans, observer, stats)
_write_info(egg, 'top_level.txt', top_level, observer, stats)
_write_info(egg, 'SOURCES.txt', sources, observer, stats)
# Write collected files.
for path in sorted(files):
_write_file(egg, path, observer, stats)
observer.complete(egg_name)
egg.close()
if os.path.getsize(egg_path) > zipfile.ZIP64_LIMIT:
logger.warning('Egg zipfile requires Zip64 support to unzip.')
return egg_name
def _write_info(egg, name, info, observer, stats):
""" Write info string to egg. """
path = os.path.join('EGG-INFO', name)
observer.add(path, stats['completed_files'] / stats['total_files'],
stats['completed_bytes'] / stats['total_bytes'])
egg.writestr(path, info)
stats['completed_files'] += 1
stats['completed_bytes'] += len(info)
def _write_file(egg, path, observer, stats):
""" Write file to egg. """
observer.add(path, stats['completed_files'] / stats['total_files'],
stats['completed_bytes'] / stats['total_bytes'])
egg.write(path)
stats['completed_files'] += 1
stats['completed_bytes'] += os.path.getsize(path)
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/eggwriter.py | Python | mit | 8,098 |
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{prefix}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{first_name_female}}-{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}} {{prefix}} {{last_name}}',
)
formats_male = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{prefix}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_male}}-{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}} {{prefix}} {{last_name}}',
)
formats = formats_male + formats_female
first_names_male = (
'Adrien',
'Aimé',
'Alain',
'Alexandre',
'Alfred',
'Alphonse',
'André',
'Antoine',
'Arthur',
'Auguste',
'Augustin',
'Benjamin',
'Benoît',
'Bernard',
'Bertrand',
'Charles',
'Christophe',
'Daniel',
'David',
'Denis',
'Édouard',
'Émile',
'Emmanuel',
'Éric',
'Étienne',
'Eugène',
'François',
'Franck',
'Frédéric',
'Gabriel',
'Georges',
'Gérard',
'Gilbert',
'Gilles',
'Grégoire',
'Guillaume',
'Guy',
'William',
'Henri',
'Honoré',
'Hugues',
'Isaac',
'Jacques',
'Jean',
'Jérôme',
'Joseph',
'Jules',
'Julien',
'Laurent',
'Léon',
'Louis',
'Luc',
'Lucas',
'Marc',
'Marcel',
'Martin',
'Matthieu',
'Maurice',
'Michel',
'Nicolas',
'Noël',
'Olivier',
'Patrick',
'Paul',
'Philippe',
'Pierre',
'Raymond',
'Rémy',
'René',
'Richard',
'Robert',
'Roger',
'Roland',
'Sébastien',
'Stéphane',
'Théodore',
'Théophile',
'Thibaut',
'Thibault',
'Thierry',
'Thomas',
'Timothée',
'Tristan',
'Victor',
'Vincent',
'Xavier',
'Yves',
'Zacharie')
first_names_female = (
'Adélaïde',
'Adèle',
'Adrienne',
'Agathe',
'Agnès',
'Aimée',
'Alexandrie',
'Alix',
'Alexandria',
'Alex',
'Alice',
'Amélie',
'Anaïs',
'Anastasie',
'Andrée',
'Anne',
'Anouk',
'Antoinette',
'Arnaude',
'Astrid',
'Audrey',
'Aurélie',
'Aurore',
'Bernadette',
'Brigitte',
'Capucine',
'Caroline',
'Catherine',
'Cécile',
'Céline',
'Célina',
'Chantal',
'Charlotte',
'Christelle',
'Christiane',
'Christine',
'Claire',
'Claudine',
'Clémence',
'Colette',
'Constance',
'Corinne',
'Danielle',
'Denise',
'Diane',
'Dorothée',
'Édith',
'Éléonore',
'Élisabeth',
'Élise',
'Élodie',
'Émilie',
'Emmanuelle',
'Françoise',
'Frédérique',
'Gabrielle',
'Geneviève',
'Hélène',
'Henriette',
'Hortense',
'Inès',
'Isabelle',
'Jacqueline',
'Jeanne',
'Jeannine',
'Joséphine',
'Josette',
'Julie',
'Juliette',
'Laetitia',
'Laure',
'Laurence',
'Lorraine',
'Louise',
'Luce',
'Lucie',
'Lucy',
'Madeleine',
'Manon',
'Marcelle',
'Margaux',
'Margaud',
'Margot',
'Marguerite',
'Margot',
'Margaret',
'Maggie',
'daisy',
'Marianne',
'Marie',
'Marine',
'Marthe',
'Martine',
'Maryse',
'Mathilde',
'Michèle',
'Michelle',
'Michelle',
'Monique',
'Nathalie',
'Nath',
'Nathalie',
'Nicole',
'Noémi',
'Océane',
'Odette',
'Olivie',
'Patricia',
'Paulette',
'Pauline',
'Pénélope',
'Philippine',
'Renée',
'Sabine',
'Simone',
'Sophie',
'Stéphanie',
'Susanne',
'Suzanne',
'Susan',
'Suzanne',
'Sylvie',
'Thérèse',
'Valentine',
'Valérie',
'Véronique',
'Victoire',
'Virginie',
'Zoé',
'Camille',
'Claude',
'Dominique')
first_names = first_names_male + first_names_female
last_names = (
'Martin', 'Bernard', 'Thomas', 'Robert', 'Petit', 'Dubois', 'Richard', 'Garcia', 'Durand', 'Moreau', 'Lefebvre',
'Simon', 'Laurent', 'Michel', 'Leroy', 'Martinez', 'David', 'Fontaine', 'Da Silva', 'Morel', 'Fournier',
'Dupont', 'Bertrand', 'Lambert', 'Rousseau', 'Girard', 'Roux', 'Vincent', 'Lefevre', 'Boyer', 'Lopez', 'Bonnet',
'Andre', 'Francois', 'Mercier', 'Muller', 'Guerin', 'Legrand', 'Sanchez', 'Garnier', 'Chevalier', 'Faure',
'Perez', 'Clement', 'Fernandez', 'Blanc', 'Robin', 'Morin', 'Gauthier', 'Pereira', 'Perrin', 'Roussel', 'Henry',
'Duval', 'Gautier', 'Nicolas', 'Masson', 'Marie', 'Noel', 'Ferreira', 'Lemaire', 'Mathieu', 'Riviere', 'Denis',
'Marchand', 'Rodriguez', 'Dumont', 'Payet', 'Lucas', 'Dufour', 'Dos Santos', 'Joly', 'Blanchard', 'Meunier',
'Rodrigues', 'Caron', 'Gerard', 'Fernandes', 'Brunet', 'Meyer', 'Barbier', 'Leroux', 'Renard', 'Goncalves',
'Gaillard', 'Brun', 'Roy', 'Picard', 'Giraud', 'Roger', 'Schmitt', 'Colin', 'Arnaud', 'Vidal', 'Gonzalez',
'Lemoine', 'Roche', 'Aubert', 'Olivier', 'Leclercq', 'Pierre', 'Philippe', 'Bourgeois', 'Renaud', 'Martins',
'Leclerc', 'Guillaume', 'Lacroix', 'Lecomte', 'Benoit', 'Fabre', 'Carpentier', 'Vasseur', 'Louis', 'Hubert',
'Jean', 'Dumas', 'Rolland', 'Grondin', 'Rey', 'Huet', 'Gomez', 'Dupuis', 'Guillot', 'Berger', 'Moulin',
'Hoarau', 'Menard', 'Deschamps', 'Fleury', 'Adam', 'Boucher', 'Poirier', 'Bertin', 'Charles', 'Aubry',
'Da Costa', 'Royer', 'Dupuy', 'Maillard', 'Paris', 'Baron', 'Lopes', 'Guyot', 'Carre', 'Jacquet', 'Renault',
'Herve', 'Charpentier', 'Klein', 'Cousin', 'Collet', 'Leger', 'Ribeiro', 'Hernandez', 'Bailly', 'Schneider',
'Le Gall', 'Ruiz', 'Langlois', 'Bouvier', 'Gomes', 'Prevost', 'Julien', 'Lebrun', 'Breton', 'Germain', 'Millet',
'Boulanger', 'Remy', 'Le Roux', 'Daniel', 'Marques', 'Maillot', 'Leblanc', 'Le Goff', 'Barre', 'Perrot',
'Leveque', 'Marty', 'Benard', 'Monnier', 'Hamon', 'Pelletier', 'Alves', 'Etienne', 'Marchal', 'Poulain',
'Tessier', 'Lemaitre', 'Guichard', 'Besson', 'Mallet', 'Hoareau', 'Gillet', 'Weber', 'Jacob', 'Collin',
'Chevallier', 'Perrier', 'Michaud', 'Carlier', 'Delaunay', 'Chauvin', 'Alexandre', 'Marechal', 'Antoine',
'Lebon', 'Cordier', 'Lejeune', 'Bouchet', 'Pasquier', 'Legros', 'Delattre', 'Humbert', 'De Oliveira', 'Briand',
'Lamy', 'Launay', 'Gilbert', 'Perret', 'Lesage', 'Gay', 'Nguyen', 'Navarro', 'Besnard', 'Pichon', 'Hebert',
'Cohen', 'Pons', 'Lebreton', 'Sauvage', 'De Sousa', 'Pineau', 'Albert', 'Jacques', 'Pinto', 'Barthelemy',
'Turpin', 'Bigot', 'Lelievre', 'Georges', 'Reynaud', 'Ollivier', 'Martel', 'Voisin', 'Leduc', 'Guillet',
'Vallee', 'Coulon', 'Camus', 'Marin', 'Teixeira', 'Costa', 'Mahe', 'Didier', 'Charrier', 'Gaudin', 'Bodin',
'Guillou', 'Gregoire', 'Gros', 'Blanchet', 'Buisson', 'Blondel', 'Paul', 'Dijoux', 'Barbe', 'Hardy', 'Laine',
'Evrard', 'Laporte', 'Rossi', 'Joubert', 'Regnier', 'Tanguy', 'Gimenez', 'Allard', 'Devaux', 'Morvan', 'Levy',
'Dias', 'Courtois', 'Lenoir', 'Berthelot', 'Pascal', 'Vaillant', 'Guilbert', 'Thibault', 'Moreno', 'Duhamel',
'Colas', 'Masse', 'Baudry', 'Bruneau', 'Verdier', 'Delorme', 'Blin', 'Guillon', 'Mary', 'Coste', 'Pruvost',
'Maury', 'Allain', 'Valentin', 'Godard', 'Joseph', 'Brunel', 'Marion', 'Texier', 'Seguin', 'Raynaud', 'Bourdon',
'Raymond', 'Bonneau', 'Chauvet', 'Maurice', 'Legendre', 'Loiseau', 'Ferrand', 'Toussaint', 'Techer', 'Lombard',
'Lefort', 'Couturier', 'Bousquet', 'Diaz', 'Riou', 'Clerc', 'Weiss', 'Imbert', 'Jourdan', 'Delahaye', 'Gilles',
'Guibert', 'Begue', 'Descamps', 'Delmas', 'Peltier', 'Dupre', 'Chartier', 'Martineau', 'Laroche', 'Leconte',
'Maillet', 'Parent', 'Labbe', 'Potier', 'Bazin', 'Normand', 'Pottier', 'Torres', 'Lagarde', 'Blot', 'Jacquot',
'Lemonnier', 'Grenier', 'Rocher', 'Bonnin', 'Boutin', 'Fischer', 'Munoz', 'Neveu', 'Lacombe', 'Mendes',
'Delannoy', 'Auger', 'Wagner', 'Fouquet', 'Mace', 'Ramos', 'Pages', 'Petitjean', 'Chauveau', 'Foucher', 'Peron',
'Guyon', 'Gallet', 'Rousset', 'Traore', 'Bernier', 'Vallet', 'Letellier', 'Bouvet', 'Hamel', 'Chretien',
'Faivre', 'Boulay', 'Thierry', 'Samson', 'Ledoux', 'Salmon', 'Gosselin', 'Lecoq', 'Pires', 'Leleu', 'Becker',
'Diallo', 'Merle', 'Valette',
)
prefixes = ('de', 'de la', 'Le', 'du')
| danhuss/faker | faker/providers/person/fr_FR/__init__.py | Python | mit | 10,085 |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def BubbleChart():
from ..bubble_chart import BubbleChart
return BubbleChart
class TestBubbleChart:
def test_ctor(self, BubbleChart):
bubble_chart = BubbleChart()
xml = tostring(bubble_chart.to_tree())
expected = """
<bubbleChart>
<axId val="10" />
<axId val="20" />
</bubbleChart>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, BubbleChart):
src = """
<bubbleChart>
<axId val="10" />
<axId val="20" />
</bubbleChart>
"""
node = fromstring(src)
bubble_chart = BubbleChart.from_tree(node)
assert dict(bubble_chart) == {}
| cgimenop/Excel2Testlink | ExcelParser/lib/openpyxl/chart/tests/test_bubble_chart.py | Python | mit | 952 |
"""
This module contains a helper to extract various kinds of primitive data types
from a dictionary of strings.
"""
class StringDictHelper:
"""
Helper class to extract primitive types from a dictionary of strings. This is a port
of Java robotutils class StringmapHelper. The special values 'true' and 'false' (in
any combinations of case) represent boolean True and False. This MUST NOT be changed
as it is part of the 'structured message' used in the robotcomm protocol and in
in configuration and logging - across multiple languages.
"""
def __init__(self, dct):
"""Constructs a helper for the given dict {dct}"""
self._dct = dct
def get_as_str(self, key, default, pattern=None):
"""
Returns a string - either parsed from map of {key} or {defaultValue}.
param key -- key to lookup.
default -- default value to use if the key did not exist, the value was not
parseable or did not match {pattern}. This value does not need
match {pattern}.
pattern -- [If not None] Regex.Pattern object representing valid strings to accept.
"""
ret = str(self._dct.get(key, default))
if pattern:
return ret if pattern.fullmatch(ret) else default
return ret
def get_as_bool(self, key, default):
"""
Returns a bool - either parsed from map of {key} or {default}.
key -- key to lookup.
default -- default value to use if the key did not exist or the value was not
parseable.
"""
val = self._dct.get(key)
ret = default
if val:
val = val.lower()
if val == 'true':
ret = True
elif val == 'false':
ret = False
return ret
def get_as_num(self, key, default, minval=None, maxval=None):
"""
Returns a number - either parsed from map of {key} or {default}.
key -- key to lookup.
default -- default value to use if the key did exist, the value was not
parseable or out of bounds. This value does not need to be between
{minval} and {maxval}.
NOTE: The *type* of this default value is used to
determine the type of return value. So, if a floating point value is expected,
specify a float default value!
[minval] -- Optional inclusive minimum to accept.
[maxval] -- Optional inclusive (not exclusive) maximum to accept.
"""
val = self._dct.get(key)
ret = default
if val:
try:
# Below we extract type (int or float or ??) and use it to construct the result!
type_ = type(default)
ret1 = type_(val)
valid = (minval is None or ret1 >= minval) and (maxval is None or ret1 <= maxval)
ret = ret1 if valid else default
except ValueError:
ret = default
return ret
if __name__ == '__main__':
D = dict(a='abc', b='true', c=42, d=1.5)
H = StringDictHelper(D)
AV = H.get_as_str('a', '')
BV = H.get_as_bool('b', False)
CV = H.get_as_num('c', 100)
DV = H.get_as_num('d', 0.0)
assert AV == 'abc'
assert BV is True
assert CV == 42
assert abs(DV-1.5) < 1E-10
print("StringDictHelper seems to work")
| josephmjoy/robotics | python_robotutils/robotutils/strmap_helper.py | Python | mit | 3,444 |
from pathlib import Path
import click
from git import Repo
from git.exc import InvalidGitRepositoryError
from logger import logger
from config import config
from utils import PairedObject, PairedProject, get_current_project, PathType
@click.group()
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose messaging')
@click.pass_context
def et(ctx: click.core.Context, verbose: bool):
"""
Primary top-level group command.
Calling directly with no parameters will display help.
"""
ctx.obj = {}
ctx.obj['verbose'] = verbose
@et.command('init', short_help='Initialize a new Env Tracker repository')
@click.argument('directory', default='.',
type=PathType(exists=True, file_okay=False, dir_okay=True, resolve_path=True, allow_dash=False))
@click.option('-n', '--name', type=click.STRING)
def cmd_init(directory: Path, name: str):
"""
Create an empty Git repository that points to an existing repository
"""
try:
existing_project = PairedProject.from_path(directory)
except Exception:
logger.debug('No existing project found - continuing')
else:
raise click.BadParameter(
f'Conflict: specified directory is already linked to {str(existing_project.child_dir)}',
param_hint='DIRECTORY')
## Validate parameters and set defaults
try:
repo = Repo(directory, search_parent_directories=False)
except InvalidGitRepositoryError:
try:
# Check if the directory is a subdir of a git repo and suggest that
repo = Repo(directory, search_parent_directories=True)
except InvalidGitRepositoryError:
raise click.BadParameter('Not a git repository.', param_hint=['directory'])
else:
raise click.BadParameter(f'Not a git repository. Did you mean this?\n\n\t{repo.working_dir}',
param_hint=['directory'])
parent_path = Path(repo.working_dir)
if name:
# names must not contain OS path delimiters
if Path(name).name != name:
raise click.BadParameter('Must not contain path delimiter, e.g. "/" or "\\"', param_hint=['name'])
else:
name = parent_path.name
child_path: Path = Path(config.ET_HOME) / name
to_parent_symlink: Path = child_path / config.PARENT_SYMLINK_NAME
## Attempt to create the child directory
try:
child_path.mkdir(parents=True)
except FileExistsError:
if to_parent_symlink.exists():
raise click.BadParameter(
f'Path "{child_path}" already exists and links to: "{to_parent_symlink.resolve()}"',
param_hint=['name'])
else:
raise click.BadParameter(f'Path "{child_path}" already exists', param_hint=['name'])
## Initialize the child repo
repo = Repo.init(child_path)
to_parent_symlink.symlink_to(parent_path)
repo.index.add([config.PARENT_SYMLINK_NAME])
repo.index.commit('Link project to parent directory')
click.echo(f'Installed new project "{name}", linking "{child_path}" -> "{parent_path}"')
@et.command('link', short_help='Link a file or directory')
@click.argument('file', type=PathType(exists=True, file_okay=True, dir_okay=True, allow_dash=False, writable=True,
readable=True, resolve_path=False))
def cmd_link(file: Path):
"""
Tracks a file in the parent repo.
Moves the specified file to the child repository and symlinks the file back to
its original location.
Validations:
- path exists
- path exists under the parent dir
- path does not exist under the child dir
- parent path is not a symlink
"""
# We check for symlink here because we resolve the file path to init the project
obj_pair = PairedObject.from_path(file)
if obj_pair.is_linked:
raise click.BadParameter(f'Path "{obj_pair.relative_path}" is already linked')
if obj_pair.parent_path.is_symlink():
raise click.BadParameter(f'Path "{file}" is already a symlink', param_hint=['file'])
if not obj_pair.working_from_parent:
raise click.BadParameter(f'Path "{file}" not found under "{obj_pair.project.parent_dir}"',
param_hint=['file'])
if obj_pair.child_path.exists():
raise click.BadParameter(f'Destination path "{obj_pair.child_path}" already exists', param_hint=['file'])
obj_pair.link()
# commit the new file
child_repo = obj_pair.project.child_repo
child_repo.index.add([str(obj_pair.relative_path)])
child_repo.index.commit(f'Initialize tracking for "{obj_pair.relative_path}"')
@et.command('unlink', short_help='Stop tracking a file or directory')
@click.argument('file', type=PathType(exists=True, file_okay=True, dir_okay=True, allow_dash=False, writable=True,
readable=True, resolve_path=False))
def cmd_unlink(file: Path):
"""
Unlinks a tracked file by reverting the changes made by the `link` command
TODO: add an `--all` option to unlink all objects
"""
## Validate parameters and set defaults
obj_pair = PairedObject.from_path(file)
if not obj_pair.is_linked:
raise click.BadParameter('File is not linked', param_hint=['file'])
## Unlink files
obj_pair.unlink()
## Commit changes
child_repo = obj_pair.project.child_repo
child_repo.index.remove([str(obj_pair.relative_path)])
child_repo.index.commit(f'Stop tracking for "{obj_pair.relative_path}"')
@et.command('status', short_help='`git status` on the linked repository')
def cmd_status():
proj = get_current_project()
g = proj.child_repo.git
click.echo(click.style(f'Showing git status for "{proj.child_dir}"', fg='red'))
click.echo()
click.echo(g.status())
@et.command('other', short_help='Output the linked repository directory')
def cmd_other():
"""
Writes the linked directory of the current location to stdout
Example usage:
cd `et other` - changes directory back and forth between linked repositories
"""
proj = get_current_project()
other_dir = proj.child_dir if proj.working_from_parent else proj.parent_dir
click.echo(other_dir)
@et.command('commit', short_help='Commit all changes to the linked directory')
@click.option('-m', '--message', type=click.STRING, default='Saving changes')
def cmd_commit(message):
"""
Commits all changes to the linked repository using `git add -u`
"""
proj = get_current_project()
proj.child_repo.git.add(update=True) # git add -u
proj.child_repo.index.commit(message)
| pnw/env-tracker | main.py | Python | mit | 6,662 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
' module description'
import os
__author__ = 'Andrew Wen'
def file_extension(filename):
"""
获取文件后缀名
:param path:
:return:
"""
#return os.path.splitext(path)[1]
return filename.rsplit('.', 1)[1].lower() | wenanguo/crm | code/pyfw/util/CommonFileUtil.py | Python | mit | 293 |
import datetime
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import ListView
from django.views.generic.base import View
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from feincms.module.mixins import ContentView
from djapps.blogs.models import Blog, Post
class PostPermalinkView(SingleObjectMixin, View):
model = Post
def get_queryset(self):
return self.model.objects.active()
def get(self, request, *args, **kwargs):
post = self.get_object()
return HttpResponseRedirect(
post.get_pretty_url()
)
class PostView(ContentView):
model = Post
context_object_name = 'post'
def get_queryset(self):
return self.model.objects.active().filter(**self.kwargs)
def get_context_data(self, *args, **kwargs):
data = super(PostView, self).get_context_data(*args, **kwargs)
self.request._blogs_current_blog = self.object.blog
self.request._blogs_current_date = self.object.date
self.request._blogs_current_year = int(self.kwargs.get('date__year', 0))
self.request._blogs_current_month = int(self.kwargs.get('date__month', 0))
self.request._blogs_current_day = int(self.kwargs.get('date__day', 0))
return data
def get_object(self):
queryset = self.get_queryset()
obj = queryset.first()
if obj is None:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
class PostListView(ListView):
model = Post
paginate_by = 15
def dispatch(self, *args, **kwargs):
try:
self.range_start = datetime.date(
year=int(self.kwargs.get("date__year", 1)),
month=int(self.kwargs.get("date__month", 1)),
day=int(self.kwargs.get("date__day", 1)),
)
except ValueError:
raise Http404(_("Invalid date"))
return super(PostListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return self.model.objects.active().filter(
**dict(
(k, v) for k,v in self.kwargs.items()
)
)
def get_context_data(self, *args, **kwargs):
data = super(PostListView, self).get_context_data(*args, **kwargs)
data["range_start"] = self.range_start
data["year"] = self.kwargs.get('date__year', None)
data["month"] = self.kwargs.get('date__month', None)
data["day"] = self.kwargs.get('date__day', None)
blogslug = self.kwargs.get('blog__slug', False)
if blogslug:
blog = Blog.objects.filter(slug=blogslug).first()
if blog is None:
raise Http404(_("Blog not found"))
data["blog"] = blog
self.request._blogs_current_blog = blog
self.request._blogs_current_date = self.range_start
self.request._blogs_current_year = int(self.kwargs.get('date__year', 0))
self.request._blogs_current_month = int(self.kwargs.get('date__month', 0))
self.request._blogs_current_day = int(self.kwargs.get('date__day', 0))
return data
| fah-designs/feincms-blogs | blogs/views.py | Python | mit | 3,343 |
"""
We have two special characters. The first character can be represented by one bit 0. The second character can be represented by two bits (10 or 11).
Now given a string represented by several bits. Return whether the last character must be a one-bit character or not. The given string will always end with a zero.
Example 1:
Input:
bits = [1, 0, 0]
Output: True
Explanation:
The only way to decode it is two-bit character and one-bit character. So the last character is one-bit character.
Example 2:
Input:
bits = [1, 1, 1, 0]
Output: False
Explanation:
The only way to decode it is two-bit character and two-bit character. So the last character is NOT one-bit character.
Note:
1 <= len(bits) <= 1000.
bits[i] is always 0 or 1.
"""
class Solution(object):
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
skip_next, curr = False, None
for i in bits:
if skip_next:
skip_next = False
curr = 2
continue
if i == 1:
skip_next = True
curr = 2
else:
skip_next = False
curr = 1
return curr == 1
| franklingu/leetcode-solutions | questions/1-bit-and-2-bit-characters/Solution.py | Python | mit | 1,231 |
"""
XML handler for element id condition
"""
# Standard library modules.
# Third party modules.
# Local modules.
from pyhmsa.spec.condition.elementalid import ElementalID, ElementalIDXray
from pyhmsa.fileformat.xmlhandler.condition.condition import _ConditionXMLHandler
# Globals and constants variables.
class ElementalIDXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(ElementalID, version)
def convert(self, obj):
element = super().convert(obj)
element.find('Element').set('Symbol', obj.symbol) # manually add symbol
return element
class ElementalIDXrayXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(ElementalIDXray, version)
def convert(self, obj):
element = super().convert(obj)
element.find('Element').set('Symbol', obj.symbol) # manually add symbol
return element
| pyhmsa/pyhmsa | pyhmsa/fileformat/xmlhandler/condition/elementalid.py | Python | mit | 925 |
from gi.repository import GObject, Gtk, Gtranslator, PeasGtk
from panel import Panel
import sys
from gettext import _
class TrobadorPlugin(GObject.Object, Gtranslator.TabActivatable, PeasGtk.Configurable):
__gtype_name__ = "TrobadorPlugin"
tab = GObject.property(type=Gtranslator.Tab)
handler_id = None
project = ''
version = ''
host = ''
default_host = 'trobador.trasno.net'
check = Gtk.CheckButton()
project_entry = Gtk.Entry()
version_entry = Gtk.Entry()
host_entry = Gtk.Entry()
save_button = Gtk.Button(label="Save")
save_host_button = Gtk.Button(label="Save")
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
self.window = self.tab.get_toplevel()
self.create_panel()
TrobadorPlugin.host = self.default_host
self.tab.add_widget(self.panel, "GtrTrobador", _("Trobador"), "results panel", Gtranslator.TabPlacement.RIGHT)
def do_deactivate(self):
print "Removing..."
self.tab.remove_widget(self.panel)
self.tab.disconnect(self.handler_id)
def do_update_state(self):
pass
def check_checkButton_state(self, check):
if self.check.get_active():
print "activate"
self.project_entry.set_editable(True)
self.version_entry.set_editable(True)
self.save_button.set_sensitive(True)
else:
print "deactivate"
self.project_entry.set_text("")
self.version_entry.set_text("")
self.project_entry.set_editable(False)
self.version_entry.set_editable(False)
self.save_button.set_sensitive(False)
TrobadorPlugin.project = ''
TrobadorPlugin.version = ''
def do_create_configure_widget(self):
table = Gtk.Table(8, 2, True)
if not self.check.get_active():
self.project_entry.set_editable(False)
self.version_entry.set_editable(False)
self.save_button.set_sensitive(False)
#self.check = Gtk.CheckButton("Seleccionar proyecto y version")
self.check.set_label("Select project & version")
self.check.set_border_width(6)
self.check.connect("clicked", self.check_checkButton_state)
project_label = Gtk.Label("Project")
#self.proyectoEntry = Gtk.Entry()
self.project_entry.set_text(TrobadorPlugin.project)
version_label = Gtk.Label("Version")
#self.version_entry = Gtk.Entry()
self.version_entry.set_text(TrobadorPlugin.version)
#save_button = Gtk.Button(label="Guardar")
self.save_button.set_label("Save")
self.save_host_button.set_label("Save")
hostLabel = Gtk.Label("Host")
if self.host == '':
self.host_entry.set_text(TrobadorPlugin.default_host)
else:
self.host_entry.set_text(TrobadorPlugin.host)
info_label1 = Gtk.Label("Project settings")
info_label2 = Gtk.Label("Host settings")
table.attach(info_label1, 0, 2, 0, 1)
table.attach(self.check, 0, 2, 1, 2)
table.attach(project_label, 0, 1, 2, 3)
table.attach(self.project_entry, 1, 2, 2, 3)
table.attach(version_label, 0, 1, 3, 4)
table.attach(self.version_entry, 1, 2, 3, 4)
table.attach(self.save_button, 0, 1, 4, 5)
table.attach(info_label2, 0, 2, 5, 6)
table.attach(hostLabel, 0, 1, 6, 7)
table.attach(self.host_entry, 1, 2, 6, 7)
table.attach(self.save_host_button, 0, 1, 7, 8)
self.save_button.connect("clicked", self.save_config, self.project_entry.get_text(), self.version_entry.get_text())
self.save_host_button.connect("clicked", self.save_host_config, self.host_entry.get_text())
return table
def save_config(self, save_button, project, version):
TrobadorPlugin.project = self.project_entry.get_text()
TrobadorPlugin.version = self.version_entry.get_text()
def save_host_config(self, save_host_button, host):
if self.host_entry.get_text() != '':
TrobadorPlugin.host = self.host_entry.get_text()
else:
TrobadorPlugin.host = self.default_host
self.host_entry.set_text(TrobadorPlugin.host)
def create_panel(self):
self.panel = Panel()
self.panel.set_host(TrobadorPlugin.default_host)
tree = self.panel.get_tree()
tree.connect("row-activated", self.set_buffer)
self.get_translation_unit
self.handler_id = self.tab.connect("showed-message", self.get_translation_unit)
self.panel.show()
def set_buffer(self, tree, row, col):
iterator = self.panel.get_iterator()
# l = tree.get_model()
#rootiter = l.get_iter_first()
selection, iterator = tree.get_selection().get_selected()
if iterator != None:
view = self.window.get_active_view()
if not view or not view.get_editable():
return "no editable"
document = view.get_buffer()
document.begin_user_action()
iters = document.get_selection_bounds()
if iters:
document.delete_interactive(iters[0], iters[1], view.get_editable())
document.insert_interactive_at_cursor(selection.get_value(iterator, 0), -1, view.get_editable())
document.end_user_action()
def get_translation_unit(self, tab, msg):
po_file = GObject.property(type=Gtranslator.Po)
po_file = self.tab.get_po()
print msg.get_msgid()
msg = po_file.get_current_message()
c = msg[0].get_msgid()
self.panel.set_translation_unit(c)
self.panel.set_project(self.project)
self.panel.set_version(self.version)
self.panel.set_host(self.host)
print "hola: " + self.panel.get_host()
# Updating the results
self.panel.update_data()
# ex:et:ts=4:
| tomasVega/trobador | gtranslator-plugin/trobador.py | Python | mit | 5,980 |
from helper import greeting
if "__name__" == "__main__":
greeting('hello') | jay4ek/cs3240-labdemo | hello.py | Python | mit | 75 |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise NotImplementedError
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel('')
ax.set_ylabel('')
_label_axis(ax, kind='x', label=b, position='bottom', rotate=True)
_label_axis(ax, kind='y', label=a, position='left')
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
for ax in axes.flat:
setp(ax.get_xticklabels(), fontsize=8)
setp(ax.get_yticklabels(), fontsize=8)
return axes
def _label_axis(ax, kind='x', label='', position='top',
ticks=True, rotate=False):
from matplotlib.artist import setp
if kind == 'x':
ax.set_xlabel(label, visible=True)
ax.xaxis.set_visible(True)
ax.xaxis.set_ticks_position(position)
ax.xaxis.set_label_position(position)
if rotate:
setp(ax.get_xticklabels(), rotation=90)
elif kind == 'y':
ax.yaxis.set_visible(True)
ax.set_ylabel(label, visible=True)
# ax.set_ylabel(a)
ax.yaxis.set_ticks_position(position)
ax.yaxis.set_label_position(position)
return
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else True
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
# need only a single match
if re.match('^[a-z]+?', self.style) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
from pandas.core.frame import DataFrame
if isinstance(data, (Series, np.ndarray, Index)):
if keep_index is True:
yield self.label, data
else:
yield self.label, np.asarray(data)
elif isinstance(data, DataFrame):
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
# # is this right?
# empty = df[col].count() == 0
# values = df[col].values if not empty else np.zeros(len(df))
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _maybe_right_yaxis(self, ax):
if hasattr(ax, 'right_ax'):
return ax.right_ax
else:
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
new_ax.right_ax = new_ax
if len(orig_ax.get_lines()) == 0: # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
return self.axes[0]
def _compute_plot_data(self):
numeric_data = self.data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise NotImplementedError
def _add_table(self):
if self.table is False:
return
elif self.table is True:
from pandas.core.frame import DataFrame
if isinstance(self.data, Series):
data = DataFrame(self.data, columns=[self.data.name])
elif isinstance(self.data, DataFrame):
data = self.data
data = data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(all_axes, len(all_axes), len(all_axes), nrows, ncols, self.sharex, self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if hasattr(self.data, 'columns'):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
else:
return None
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'right_ax', None) or
getattr(ax, 'left_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
if self.on_right(i):
ax = self._maybe_right_yaxis(ax)
self.axes[i] = ax
else:
ax = self.axes[0]
if self.on_right(i):
ax = self._maybe_right_yaxis(ax)
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if sec_true or all_sec:
self.axes[0] = ax
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
from pandas.core.frame import DataFrame
if isinstance(self.secondary_y, bool):
return self.secondary_y
if (isinstance(self.data, DataFrame) and
isinstance(self.secondary_y, (tuple, list, np.ndarray, Index))):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
self._add_legend_handle(scatter, label)
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
from pandas.core.frame import DataFrame
if isinstance(self.data, (Series, DataFrame)):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
from pandas.core.frame import DataFrame
if (isinstance(self.data, (Series, DataFrame))
and isinstance(self.data.index, DatetimeIndex)):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
from pandas.core.frame import DataFrame
if (isinstance(data.index, DatetimeIndex) and
isinstance(data, DataFrame)):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
data = DataFrame(data.values,
index=data.index.to_period(freq=freq),
columns=data.columns)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start,log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, **kwds)
else:
raise NotImplementedError
return f
def _make_plot(self):
import matplotlib as mpl
# mpl decided to make their version string unicode across all Python
# versions for mpl >= 1.3 so we have to call str here for python 2
mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log:
start = 1
if any(y < 1):
# GH3254
start = 0 if mpl_le_1_2_1 else None
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if not self.log: # GH3254+
ax.axhline(0, color='k', linestyle='--')
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
ax.axvline(0, color='k', linestyle='--')
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = np.ravel(self.data.values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Degree')
else:
for ax in self.axes:
ax.set_ylabel('Degree')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [com.pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=0, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, y in self._iter_data()]
labels = [com.pprint_thing(l) for l in labels]
if not self.use_index:
labels = [com.pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _post_plot_logic(self):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError('Invalid chart type given %s' % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError('Invalid chart type given %s' % kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if com.is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = x if x is not None else data.index.name
label = kwds.pop('label', label)
series = data[y].copy() # Don't modify
series.index.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
com.is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
klass_coord=df_coord, klass_ax=df_ax,
klass_unique=df_unique, klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
layout : tuple (optional)
(rows, columns) for the layout of the plot
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
subplots=False, sharex=True, sharey=False, layout=None, # Dataframe unique
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, # Dataframe unique
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
"""
If no axes is specified, check whether there are existing figures
If there is no existing figures, _gca() will
create a figure with the default figsize, causing the figsize=parameter to
be ignored.
"""
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = getattr(ax, 'left_ax', ax)
# is there harm in this?
if label is None:
label = data.name
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {'axes', 'dict', 'both'}, default 'dict'
The kind of object to return. 'dict' returns a dictionary
whose values are the matplotlib Lines of the boxplot;
'axes' returns the matplotlib axes the boxplot is drawn on;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a dict mapping columns to ``return_type``
is returned.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [com.pprint_thing(x) for x in keys]
values = [remove_na(v) for v in values]
bp = ax.boxplot(values, **kwds)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
else:
ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if return_type is None:
msg = ("\nThe default value for 'return_type' will change to "
"'axes' in a future release.\n To use the future behavior "
"now, set return_type='axes'.\n To keep the previous "
"behavior and silence this warning, set "
"return_type='dict'.")
warnings.warn(msg, FutureWarning)
return_type = 'dict'
if ax is None:
ax = _gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwargs.setdefault('c', plt.rcParams['patch.facecolor'])
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : bool, if True, the X axis will be shared amongst all subplots.
sharey : bool, if True, the Y axis will be shared amongst all subplots.
figsize : tuple
The size of the figure to create in inches by default
layout: (optional) a tuple (rows, columns) for the layout of the histograms
bins: integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,
sharex=sharex, sharey=sharey, layout=layout, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None, bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True, figsize=figsize,
layout=layout)
axes = _flatten(axes)
ret = compat.OrderedDict()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(com.pprint_thing(key))
ret[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
else:
from pandas.tools.merge import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(com.pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None, return_type=None,
**kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns - by
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
result = compat.OrderedDict()
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(com.pprint_thing(by))
result[col] = re_plotf
ax.grid(grid)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
def table(ax, data, rowLabels=None, colLabels=None,
**kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column name will be used.
Returns
-------
matplotlib table object
"""
from pandas import DataFrame
if isinstance(data, Series):
data = DataFrame(data, columns=[data.name])
elif isinstance(data, DataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels, colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols >0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of %sx%s must be larger than required size %s' %
(nrows, ncols, nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if com.is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey are ignored."
"These settings must be specified when creating axes", UserWarning)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the same as "
"the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing the passed axes "
"is being cleared", UserWarning)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if sharex and nrows > 1:
for ax in axarr[:naxes][:-ncols]: # only bottom row
for label in ax.get_xticklabels():
label.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(ax.xaxis.get_minor_locator(), ticker.NullLocator):
ax.xaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.xaxis.get_minor_formatter(), ticker.NullFormatter):
ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_xticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.xaxis.get_label().set_visible(False)
if sharey and ncols > 1:
for i, ax in enumerate(axarr):
if (i % ncols) != 0: # only first column
for label in ax.get_yticklabels():
label.set_visible(False)
try:
import matplotlib.ticker as ticker
if isinstance(ax.yaxis.get_minor_locator(), ticker.NullLocator):
ax.yaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.yaxis.get_minor_formatter(), ticker.NullFormatter):
ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_yticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.yaxis.get_label().set_visible(False)
def _flatten(axes):
if not com.is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, Index)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
# check for right_ax, which can oddly sometimes point back to ax
if hasattr(ax, 'right_ax') and ax.right_ax != ax:
lines += ax.right_ax.get_lines()
# no such risk with left_ax
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(x[0], left)
right = max(x[-1], right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
| dssg/wikienergy | disaggregator/build/pandas/pandas/tools/plotting.py | Python | mit | 116,674 |
from django import forms
from django.forms import Form, ModelForm
from django.utils import timezone
from webapp.models import Task, TaskGroup, TaskGroupSet
from webapp.validators import validate_package
from webapp.widgets import CustomSplitDateTimeWidget
class TaskGroupForm(ModelForm):
class Meta:
model = TaskGroup
fields = '__all__'
exclude = ['raw_csv', 'is_public']
labels = {
'name': 'Group name',
'description': 'Description',
'is_public': 'Public'
}
help_texts = {
'is_public': 'determines whether group is public or not'
}
def __init__(self, *args, **kwargs):
kwargs.pop('edit', None)
super(TaskGroupForm, self).__init__(*args, **kwargs)
class TaskGroupCSVForm(Form):
file = forms.FileField()
upload_csv = forms.IntegerField(initial=1, widget=forms.HiddenInput)
class TaskGroupAccessForm(Form):
grant_single = forms.IntegerField(initial=1, widget=forms.HiddenInput)
username = forms.CharField(
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'username', 'class': 'form-control'})
)
class TaskGroupInviteForm(Form):
send_invitation = forms.CharField(initial=1, widget=forms.HiddenInput)
email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder': 'E-mail', 'class': 'form-control'}))
class TaskForm(ModelForm):
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
help_text='Set blank if no deadline',
required=False
)
package = forms.FileField(
label='Package',
help_text='.zip package created according to guidelines',
widget=forms.FileInput,
validators=[validate_package]
)
class Meta:
model = Task
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Task name',
'description_brief': 'Short description',
'tg_set': 'Task set',
'submission_limit': 'Submissions limit',
'result_type': 'Result priority',
'files_count_limit': 'Max. files amount',
'file_size_limit': 'Max. file size'
}
help_texts = {
'description': 'Markdown can be used here',
'description_brief': 'Short description will be shown on the tasks list page',
'tg_set': 'Task set to which this task belongs',
'result_type': 'Pattern, according to which results list will appear.',
'submission_limit': 'Limit of submissions per user. Put 0 if unlimited',
'files_count_limit': 'Maximal amount of files in one submission',
'file_size_limit': 'Maximal size of single file (in bytes)'
}
widgets = {
'package': forms.FileInput
}
def __init__(self, *args, **kwargs):
edit = kwargs.pop('edit', None)
super(TaskForm, self).__init__(*args, **kwargs)
if edit:
self.fields['package'].label = 'New package'
self.fields['package'].required = False
self.fields['tg_set'].queryset = TaskGroupSet.objects.filter(task_group_id=self.instance.task_group_id)
else:
self.fields['deadline'].initial = timezone.now() + timezone.timedelta(days=14)
del self.fields['tg_set']
class InvalidateSubmissionForm(Form):
comment = forms.CharField(
label='Your comment',
widget=forms.Textarea(attrs={'placeholder': 'Type in the reason here'}),
required=True
)
class CopyTaskGroup(Form):
name = forms.CharField(
label='New name',
widget=forms.TextInput(attrs={'placeholder': 'New name'}),
required=True
)
description = forms.CharField(
label='Description',
widget=forms.Textarea(attrs={'placeholder': 'Type in new description (optional)'}),
required=False
)
class TaskGroupSetForm(ModelForm):
class Meta:
model = TaskGroupSet
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Name',
'description': 'Description'
}
class TaskGroupBulkDeadlines(Form):
set_id = forms.IntegerField(
required=True,
widget=forms.HiddenInput()
)
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
required=False,
label='name of the set'
)
def __init__(self, *args, **kwargs):
super(TaskGroupBulkDeadlines, self).__init__(*args, **kwargs)
self.fields['deadline'].label = self.initial.get('set_name')
class FeedbackFrom(Form):
TOPIC = (
('', '- Please select -'),
('proposal', 'I have a proposal'),
('report', 'I want to report a problem'),
('question', 'I have a question'),
('other', 'Other')
)
theme = forms.ChoiceField(label='What happened?', choices=TOPIC)
email = forms.EmailField(
label='',
widget=forms.EmailInput(attrs={'placeholder': 'Contact e-mail'})
)
content = forms.CharField(
label='Write your message here:',
widget=forms.Textarea
)
class InternalLoginForm(Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class InternalRegisterForm(Form):
username = forms.CharField(min_length=3, label='Username')
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
first_name = forms.CharField(min_length=1, label='First name')
last_name = forms.CharField(min_length=1, label='Last name')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetInitForm(Form):
username = forms.CharField(min_length=3, label='Username')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetResetForm(Form):
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
| algochecker/algochecker-web | webapp/forms.py | Python | mit | 6,858 |
'''
Pypi cache server
Original author: Victor-mortal
'''
import os
import httplib
import urlparse
import logging
import locale
import json
import hashlib
import webob
import gevent
from gevent import wsgi as wsgi_fast, pywsgi as wsgi, monkey
CACHE_DIR = '.cache'
wsgi = wsgi_fast # comment to use pywsgi
host = '0.0.0.0'
port = 8080
class Proxy(object):
"""A WSGI based web proxy application
"""
def __init__(self, chunkSize=4096, timeout=60, dropHeaders=['transfer-encoding'], pypiHost=None, log=None):
"""
@param log: logger of logging library
"""
self.log = log
if self.log is None:
self.log = logging.getLogger('proxy')
self.chunkSize = chunkSize
self.timeout = timeout
self.dropHeaders = dropHeaders
self.pypiHost = pypiHost
def yieldData(self, response, cache_file=None):
while True:
data = response.read(self.chunkSize)
yield data
if cache_file:
cache_file.write(data)
if len(data) < self.chunkSize:
break
if cache_file:
cache_file.close()
def _rewrite(self, req, start_response):
path = req.path_info
if req.query_string:
path += '?' + req.query_string
parts = urlparse.urlparse(path)
headers = req.headers
md = hashlib.md5()
md.update(' '.join('%s:%s'%v for v in headers.iteritems()))
md.update(path)
cache_file = os.path.join(CACHE_DIR, md.hexdigest())
if os.path.exists(cache_file):
o = json.load( open(cache_file+'.js', 'rb') )
start_response(o['response'], o['headers'])
return self.yieldData( open(cache_file) )
self.log.debug('Request from %s to %s', req.remote_addr, path)
url = path
conn = httplib.HTTPConnection(self.pypiHost, timeout=self.timeout)
#headers['X-Forwarded-For'] = req.remote_addr
#headers['X-Real-IP'] = req.remote_addr
try:
conn.request(req.method, url, headers=headers, body=req.body)
response = conn.getresponse()
except Exception, e:
msg = str(e)
if os.name == 'nt':
_, encoding = locale.getdefaultlocale()
msg = msg.decode(encoding)
self.log.warn('Bad gateway with reason: %s', msg, exc_info=True)
start_response('502 Bad gateway', [])
return ['Bad gateway']
headers = [(k, v) for (k, v) in response.getheaders()\
if k not in self.dropHeaders]
start_response('%s %s' % (response.status, response.reason),
headers)
json.dump( {'headers': headers, 'response': '%s %s' % (response.status, response.reason)}, open(cache_file+'.js', 'wb'))
return self.yieldData(response, cache_file=open(cache_file, 'wb'))
def __call__(self, env, start_response):
req = webob.Request(env)
return self._rewrite(req, start_response)
if __name__ == '__main__':
if not os.path.isdir(CACHE_DIR):
os.mkdir(CACHE_DIR)
monkey.patch_all()
handler = Proxy(pypiHost='pypi.python.org:80')
wsgi.WSGIServer((host, port), handler).serve_forever()
run()
| rubik/pyg | tests/pypi_cache_server.py | Python | mit | 3,301 |
#!/usr/bin/env python
""" Example program for JDEV Mercurial tutorial """
from optparse import OptionParser
def calculate_result(white_balls, power_ball):
""" Computation is lauched here """
for ball in white_balls:
if ball < 1 or ball > 59:
return -1
if power_ball < 1 or power_ball > 39:
return -1
return 0
def main():
""" Program used to compute the integer percent of chance
of winning at the lottery.
Five white balls and a power ball are drawn"""
usage = "Usage: %prog power_ball (5 white balls)"
parser = OptionParser(usage)
(_, args) = parser.parse_args()
if len(args) != 6:
parser.error("incorrect number of arguments")
power_ball = int(args[0])
white_balls = [int(arg) for arg in args[1:]]
result = calculate_result(white_balls, power_ball)
print "White balls : %s" % white_balls
print "Chance ball : %s" % power_ball
print "%d percent chance of winning" % result
return 0
if __name__ == "__main__":
main()
| fjammes/tutorial-git | tp_dvcs/data/lottery_v3.py | Python | cc0-1.0 | 1,056 |
#!/usr/bin/env python
"""PySide port of the network/http example from Qt v4.x"""
import sys
from PySide import QtCore, QtGui, QtNetwork
class HttpWindow(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.urlLineEdit = QtGui.QLineEdit("http://www.ietf.org/iesg/1rfc_index.txt")
self.urlLabel = QtGui.QLabel(self.tr("&URL:"))
self.urlLabel.setBuddy(self.urlLineEdit)
self.statusLabel = QtGui.QLabel(self.tr("Please enter the URL of a file "
"you want to download."))
self.quitButton = QtGui.QPushButton(self.tr("Quit"))
self.downloadButton = QtGui.QPushButton(self.tr("Download"))
self.downloadButton.setDefault(True)
self.progressDialog = QtGui.QProgressDialog(self)
self.http = QtNetwork.QHttp(self)
self.outFile = None
self.httpGetId = 0
self.httpRequestAborted = False
self.connect(self.urlLineEdit, QtCore.SIGNAL("textChanged(QString &)"),
self.enableDownloadButton)
self.connect(self.http, QtCore.SIGNAL("requestFinished(int, bool)"),
self.httpRequestFinished)
self.connect(self.http, QtCore.SIGNAL("dataReadProgress(int, int)"),
self.updateDataReadProgress)
self.connect(self.http, QtCore.SIGNAL("responseHeaderReceived(QHttpResponseHeader &)"),
self.readResponseHeader)
self.connect(self.progressDialog, QtCore.SIGNAL("canceled()"),
self.cancelDownload)
self.connect(self.downloadButton, QtCore.SIGNAL("clicked()"),
self.downloadFile)
self.connect(self.quitButton, QtCore.SIGNAL("clicked()"),
self, QtCore.SLOT("close()"))
topLayout = QtGui.QHBoxLayout()
topLayout.addWidget(self.urlLabel)
topLayout.addWidget(self.urlLineEdit)
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(self.downloadButton)
buttonLayout.addWidget(self.quitButton)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addLayout(topLayout)
mainLayout.addWidget(self.statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle(self.tr("HTTP"))
self.urlLineEdit.setFocus()
def downloadFile(self):
url = QtCore.QUrl(self.urlLineEdit.text())
fileInfo = QtCore.QFileInfo(url.path())
fileName = fileInfo.fileName()
if QtCore.QFile.exists(fileName):
QtGui.QMessageBox.information(self, self.tr("HTTP"), self.tr(
"There already exists a file called %s "
"in the current directory.") % (fileName))
return
self.outFile = QtCore.QFile(fileName)
if not self.outFile.open(QtCore.QIODevice.WriteOnly):
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Unable to save the file %(name)s: %(error)s.")
% {'name': fileName,
'error': self.outFile.errorString()})
self.outFile = None
return
if url.port() != -1:
self.http.setHost(url.host(), url.port())
else:
self.http.setHost(url.host(), 80)
if url.userName():
self.http.setUser(url.userName(), url.password())
self.httpRequestAborted = False
self.httpGetId = self.http.get(url.path(), self.outFile)
self.progressDialog.setWindowTitle(self.tr("HTTP"))
self.progressDialog.setLabelText(self.tr("Downloading %s.") % (fileName))
self.downloadButton.setEnabled(False)
def cancelDownload(self):
self.statusLabel.setText(self.tr("Download canceled."))
self.httpRequestAborted = True
self.http.abort()
self.downloadButton.setEnabled(True)
def httpRequestFinished(self, requestId, error):
if self.httpRequestAborted:
if self.outFile is not None:
self.outFile.close()
self.outFile.remove()
self.outFile = None
self.progressDialog.hide()
return
if requestId != self.httpGetId:
return
self.progressDialog.hide()
self.outFile.close()
if error:
self.outFile.remove()
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Download failed: %s.")
% (self.http.errorString()))
else:
fileName = QtCore.QFileInfo(QtCore.QUrl(self.urlLineEdit.text()).path()).fileName()
self.statusLabel.setText(self.tr("Downloaded %s to current directory.") % (fileName))
self.downloadButton.setEnabled(True)
self.outFile = None
def readResponseHeader(self, responseHeader):
if responseHeader.statusCode() != 200:
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Download failed: %s.")
% (responseHeader.reasonPhrase()))
self.httpRequestAborted = True
self.progressDialog.hide()
self.http.abort()
return
def updateDataReadProgress(self, bytesRead, totalBytes):
if self.httpRequestAborted:
return
self.progressDialog.setMaximum(totalBytes)
self.progressDialog.setValue(bytesRead)
def enableDownloadButton(self):
self.downloadButton.setEnabled(not self.urlLineEdit.text())
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
httpWin = HttpWindow()
sys.exit(httpWin.exec_())
| Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/network/http.py | Python | epl-1.0 | 5,973 |
# Copyright 2012 Kevin Goodsell
#
# This software is licensed under the Eclipse Public License (EPL) V1.0.
from __future__ import with_statement
import time
import unittest
import STAF
class HandleTests(unittest.TestCase):
def assertSTAFResultError(self, rc, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.fail('STAFResultError not raised')
except STAF.STAFResultError, exc:
self.assertEqual(exc.rc, rc)
def testBasicHandle(self):
with STAF.Handle('test handle') as h:
result = h.submit('local', 'ping', 'ping')
self.assertEqual(result, 'PONG')
result = h.submit('local', 'ping', ['ping'])
self.assertEqual(result, 'PONG')
result = h.submit('local', 'service', 'list')
services = dict((s['name'], s) for s in result)
# There's not much reason to check all these, so just pick a few.
self.assertEqual(services['DELAY'],
{'name': 'DELAY', 'executable': None,
'library': '<Internal>'})
self.assertEqual(services['DIAG'],
{'name': 'DIAG', 'executable': None,
'library': '<Internal>'})
self.assertEqual(services['ECHO'],
{'name': 'ECHO', 'executable': None,
'library': '<Internal>'})
# Submit using a list
result = h.submit('local', 'handle',
['list handles name', 'test handle', 'long'])
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
pieces = result[0]
self.assertEqual(pieces['name'], 'test handle')
self.assertEqual(pieces['state'], 'Registered')
self.assertTrue(h.is_registered())
self.assertFalse(h.is_registered())
def testErrors(self):
h = STAF.Handle('test handle')
self.assertSTAFResultError(STAF.errors.UnknownService,
h.submit, 'local', 'doesntexist', 'do magic')
self.assertSTAFResultError(STAF.errors.InvalidRequestString,
h.submit, 'local', 'ping', 'not a ping command')
h.unregister()
self.assertSTAFResultError(STAF.errors.HandleDoesNotExist,
h.submit, 'local', 'ping', 'ping')
# Unregistering a second time should not produce an error.
h.unregister()
def testStaticHandle(self):
with STAF.Handle('helper') as helper:
self.assertFalse(helper.is_static())
handle_num = helper.submit('local', 'handle',
'create handle name static-test')
handle_num = int(handle_num)
h = STAF.Handle(handle_num)
self.assertTrue(h.is_static())
self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG')
# Unregistering a static handle does nothing.
h.unregister()
self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG')
# Delete the static handle
helper.submit('local', 'handle',
['delete handle', str(h.handle_num())])
def testSyncModes(self):
with STAF.Handle('test handle') as h:
# FIRE AND FORGET
req = h.submit('local', 'ping', 'ping', STAF.REQ_FIRE_AND_FORGET)
self.assertTrue(req.isdigit())
time.sleep(2)
# No queued result
self.assertSTAFResultError(STAF.errors.NoQueueElement,
h.submit, 'local', 'queue', 'get type STAF/RequestComplete')
# No retained result
self.assertSTAFResultError(STAF.errors.RequestNumberNotFound,
h.submit, 'local', 'service', ['free request', req])
# QUEUE
req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE)
self.assertTrue(req.isdigit())
time.sleep(2)
# Check queued result
result = h.submit('local', 'queue', 'get type STAF/RequestComplete')
msg = result['message']
self.assertEqual(msg['rc'], '0')
self.assertEqual(msg['requestNumber'], req)
self.assertEqual(msg['result'], 'PONG')
# No retained result
self.assertSTAFResultError(STAF.errors.RequestNumberNotFound,
h.submit, 'local', 'service', ['free request', req])
# RETAIN
req = h.submit('local', 'ping', 'ping', STAF.REQ_RETAIN)
self.assertTrue(req.isdigit())
time.sleep(2)
# No queued result
self.assertSTAFResultError(STAF.errors.NoQueueElement,
h.submit, 'local', 'queue', 'get type STAF/RequestComplete')
# Check retained result
result = h.submit('local', 'service', ['free request', req])
self.assertEqual(result['rc'], '0')
self.assertEqual(result['result'], 'PONG')
# QUEUE AND RETAIN
req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE_RETAIN)
self.assertTrue(req.isdigit())
time.sleep(2)
# Check queued result
result = h.submit('local', 'queue', 'get type STAF/RequestComplete')
msg = result['message']
self.assertEqual(msg['rc'], '0')
self.assertEqual(msg['requestNumber'], req)
self.assertEqual(msg['result'], 'PONG')
# Check retained result
result = h.submit('local', 'service', ['free request', req])
self.assertEqual(result['rc'], '0')
self.assertEqual(result['result'], 'PONG')
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| KevinGoodsell/caduceus | test/test_handle.py | Python | epl-1.0 | 5,924 |
"""Unit test of the AlignmentScan
@author: Kay Kasemir
"""
from __future__ import print_function
import unittest
from scan.commands import Set, CommandSequence
from scan.alignment import AlignmentScan
class AlignmentTest(unittest.TestCase):
def testBasics(self):
align = AlignmentScan("motor_x", 0, 10, 0.5, "seconds", 0.5, "signal",
pre=Set("motor_y", 3),
find_command="FindPeak")
cmds = align.createScan()
print(CommandSequence(cmds))
self.assertEqual(str(cmds), "[Set('Demo:CS:Scan:Fit:Height', 0), Set('motor_y', 3), Loop('motor_x', 0, 10, 0.5, [ Delay(0.5), Log('signal', 'motor_x'), Script('WriteDataToPV', 'motor_x', 'Demo:CS:Scan:Fit:Data:X'), Script('WriteDataToPV', 'signal', 'Demo:CS:Scan:Fit:Data:Y', '-', '1') ]), Script('FindPeak', 'motor_x', 'signal', '-', '1', 'Demo:CS:Scan:Fit:Pos', 'Demo:CS:Scan:Fit:Height', 'Demo:CS:Scan:Fit:Width')]")
if __name__ == "__main__":
unittest.main() | PythonScanClient/PyScanClient | Test/test_alignment.py | Python | epl-1.0 | 1,016 |
import os, requests, time
import mydropbox
edmunds = mydropbox.get_keys('edmunds')
api_key = edmunds['api_key']
api_secret = edmunds['api_secret']
vin = mydropbox.read_dropbox_file(os.path.join('Records', 'Financials', 'Car', 'VIN')).strip()
r = requests.get("https://api.edmunds.com/api/vehicle/v2/vins/%s?&fmt=json&api_key=%s" % (vin, api_key))
car = r.json()
time.sleep(1)
# Pulled from above query
styleid = str(car['years'][0]['styles'][0]['id'])
optionids = []
for optcat in car['options']:
for opt in optcat['options']:
optionids.append(str(opt['id']))
colorids = []
for colorcat in car['colors']:
for opt in colorcat['options']:
colorids.append(str(opt['id']))
# User-supplied
condition = "Clean"
mileage = "6000"
zipcode = "60613"
r = requests.get(
"https://api.edmunds.com/v1/api/tmv/tmvservice/calculateusedtmv" +
"?styleid=%s" % styleid +
''.join(map(lambda optionid: "&optionid=%s" % optionid, optionids)) +
''.join(map(lambda colorid: "&colorid=%s" % colorid, colorids)) +
"&condition=%s" % condition +
"&mileage=%s" % mileage +
"&zip=%s" % zipcode +
"&fmt=json&api_key=%s" % api_key
)
data = r.json()
totalWithOptions = data['tmv']['totalWithOptions']
disp = [
('Used Trade-in', 'usedTradeIn'),
('Used Private Party', 'usedPrivateParty'),
('Used TMV Retail', 'usedTmvRetail')
]
total = 0.0
for label, key in disp:
total += totalWithOptions[key]
print("%s: %f" % (label, totalWithOptions[key]))
total /= 3
print("Average: %f" % total)
| smiley325/accounter | ref/edmunds.py | Python | epl-1.0 | 1,537 |
from __future__ import absolute_import
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager
from Screens.InfoBarGenerics import InfoBarNotifications
import Screens.Standby
import Tools.Notifications
from boxbranding import getMachineBrand, getMachineName
class JobView(InfoBarNotifications, Screen, ConfigListScreen):
def __init__(self, session, job, parent=None, cancelable = True, backgroundable = True, afterEventChangeable = True , afterEvent="nothing"):
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Sources.Boolean import Boolean
from Components.ActionMap import ActionMap
Screen.__init__(self, session, parent)
Screen.setTitle(self, _("Job View"))
InfoBarNotifications.__init__(self)
ConfigListScreen.__init__(self, [])
self.parent = parent
self.job = job
if afterEvent:
self.job.afterEvent = afterEvent
self["job_name"] = StaticText(job.name)
self["job_progress"] = Progress()
self["job_task"] = StaticText()
self["summary_job_name"] = StaticText(job.name)
self["summary_job_progress"] = Progress()
self["summary_job_task"] = StaticText()
self["job_status"] = StaticText()
self["finished"] = Boolean()
self["cancelable"] = Boolean(cancelable)
self["backgroundable"] = Boolean(backgroundable)
self["key_blue"] = StaticText(_("Background"))
self.onShow.append(self.windowShow)
self.onHide.append(self.windowHide)
self["setupActions"] = ActionMap(["ColorActions", "SetupActions"],
{
"green": self.ok,
"red": self.abort,
"blue": self.background,
"cancel": self.abort,
"ok": self.ok,
}, -2)
self.settings = ConfigSubsection()
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.settings.afterEvent = ConfigSelection(choices = [("nothing", _("do nothing")), ("close", _("Close")), ("standby", _("go to standby")), ("deepstandby", shutdownString)], default = self.job.afterEvent or "nothing")
self.job.afterEvent = self.settings.afterEvent.value
self.afterEventChangeable = afterEventChangeable
self.setupList()
self.state_changed()
def setupList(self):
if self.afterEventChangeable:
self["config"].setList( [ getConfigListEntry(_("After event"), self.settings.afterEvent) ])
else:
self["config"].hide()
self.job.afterEvent = self.settings.afterEvent.value
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setupList()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setupList()
def windowShow(self):
job_manager.visible = True
self.job.state_changed.append(self.state_changed)
def windowHide(self):
job_manager.visible = False
if len(self.job.state_changed) > 0:
self.job.state_changed.remove(self.state_changed)
def state_changed(self):
j = self.job
self["job_progress"].range = j.end
self["summary_job_progress"].range = j.end
self["job_progress"].value = j.progress
self["summary_job_progress"].value = j.progress
#print "JobView::state_changed:", j.end, j.progress
self["job_status"].text = j.getStatustext()
if j.status == j.IN_PROGRESS:
self["job_task"].text = j.tasks[j.current_task].name
self["summary_job_task"].text = j.tasks[j.current_task].name
else:
self["job_task"].text = ""
self["summary_job_task"].text = j.getStatustext()
if j.status in (j.FINISHED, j.FAILED):
self.performAfterEvent()
self["backgroundable"].boolean = False
if j.status == j.FINISHED:
self["finished"].boolean = True
self["cancelable"].boolean = False
elif j.status == j.FAILED:
self["cancelable"].boolean = True
def background(self):
if self["backgroundable"].boolean:
self.close(True)
def ok(self):
if self.job.status in (self.job.FINISHED, self.job.FAILED):
self.close(False)
else:
self.background()
def abort(self):
if self.job.status == self.job.NOT_STARTED:
job_manager.active_jobs.remove(self.job)
self.close(False)
elif self.job.status == self.job.IN_PROGRESS and self["cancelable"].boolean == True:
self.job.cancel()
else:
self.close(False)
def performAfterEvent(self):
self["config"].hide()
if self.settings.afterEvent.value == "nothing":
return
elif self.settings.afterEvent.value == "close" and self.job.status == self.job.FINISHED:
self.close(False)
from Screens.MessageBox import MessageBox
if self.settings.afterEvent.value == "deepstandby":
if not Screens.Standby.inTryQuitMainloop:
Tools.Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A sleep timer wants to shut down\nyour %s %s. Shutdown now?") % (getMachineBrand(), getMachineName()), timeout = 20)
elif self.settings.afterEvent.value == "standby":
if not Screens.Standby.inStandby:
Tools.Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A sleep timer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 20)
def checkNotifications(self):
InfoBarNotifications.checkNotifications(self)
if not Tools.Notifications.notifications:
if self.settings.afterEvent.value == "close" and self.job.status == self.job.FAILED:
self.close(False)
def sendStandbyNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
| atvcaptain/enigma2 | lib/python/Screens/TaskView.py | Python | gpl-2.0 | 5,728 |
# -*- coding: utf-8 -*-
#############################################################################
# File : ConfigCheck.py
# Package : rpmlint
# Author : Frederic Lepied
# Created on : Sun Oct 3 21:48:20 1999
# Purpose :
#############################################################################
import AbstractCheck
from Filter import addDetails, printError, printWarning
class ConfigCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, "ConfigCheck")
def check_binary(self, pkg):
config_files = pkg.configFiles()
noreplace_files = pkg.noreplaceFiles()
for c in config_files:
if c.startswith("/var/lib/games/"):
printError(pkg, "score-file-must-not-be-conffile", c)
elif not c.startswith("/etc/") and not c.startswith("/var/"):
printWarning(pkg, "non-etc-or-var-file-marked-as-conffile", c)
if c not in noreplace_files:
printWarning(pkg, "conffile-without-noreplace-flag", c)
# Create an object to enable the auto registration of the test
check = ConfigCheck()
# Add information about checks
addDetails(
'score-file-must-not-be-conffile',
"""A file in /var/lib/games/ is a configuration file. Store your conf
files in /etc instead.""",
'non-etc-or-var-file-marked-as-conffile',
"""A file not in /etc or /var is marked as being a configuration file.
Please put your conf files in /etc or /var.""",
'conffile-without-noreplace-flag',
"""A configuration file is stored in your package without the noreplace flag.
A way to resolve this is to put the following in your SPEC file:
%config(noreplace) /etc/your_config_file_here
""",
)
# ConfigCheck.py ends here
# ex: ts=4 sw=4 et
| aborrero/pkg-rpmlint | ConfigCheck.py | Python | gpl-2.0 | 1,798 |
# configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
from lib.six import six
from _version import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, six.string_types):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, six.string_types):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, six.string_types):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, six.string_types):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, six.string_types):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, six.string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning, stacklevel=2)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, six.string_types):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, six.string_types) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, six.text_type):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, six.binary_type) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, six.text_type):
return infile.splitlines(True)
elif isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
if six.PY2 and isinstance(line, str):
# don't actually do any decoding, since we're on python 2 and
# returning a bytestring is fine
return self._decode(infile, None)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, six.binary_type) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, six.string_types):
return infile.splitlines(True)
if isinstance(infile, six.binary_type):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, six.binary_type):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, six.binary_type) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, six.string_types):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, six.string_types):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, six.binary_type):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, six.string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| theguardian/JIRA-APPy | lib/configobj/configobj.py | Python | gpl-2.0 | 89,640 |
#! /usr/bin/env python
from ppclass import pp
var1 = pp(\
file="/home/aymeric/Big_Data/DATAPLOT/diagfired.nc",\
var="ps",\
x=None,\
y=10.,\
t=2.)
var1.get()
var2 = pp(\
file="/home/aymeric/Big_Data/DATAPLOT/diagfired.nc",\
var="phisinit",\
x=None,\
y=10.,\
t=2.)
var2.get()
var2 = var2 / 3.72
S = var2.func(var1)
S.p[0].marker = 'o'
S.p[0].linestyle = ''
S.p[0].ylabel = "Surface geopotential height (km)"
S.p[0].ycoeff = 1./1000.
S.p[0].fmt = '%.0f'
S.filename = "function"
S.makeplot()
| aymeric-spiga/planetoplot | examples/ppclass_reference/function.py | Python | gpl-2.0 | 493 |
import time
import net.mapserv as mapserv
import net.charserv as charserv
import commands
import walkto
import logicmanager
import status
import plugins
from collections import deque
from net.inventory import get_item_index, get_storage_index
from utils import extends
from actor import find_nearest_being
from chat import send_whisper as whisper
__all__ = [ 'PLUGIN', 'init' ]
PLUGIN = {
'name': 'manaboy',
'requires': ('chatbot', 'npc', 'autofollow'),
'blocks': (),
}
npcdialog = {
'start_time': -1,
'program': [],
}
_times = {
'follow': 0,
'where' : 0,
'status' : 0,
'inventory' : 0,
'say' : 0,
'zeny' : 0,
'storage' : 0,
}
admins = ['Trav', 'Travolta', 'Komornyik']
allowed_drops = [535, 719, 513, 727, 729, 869]
npc_owner = ''
history = deque(maxlen=10)
storage_is_open = False
def set_npc_owner(nick):
global npc_owner
if plugins.npc.npc_id < 0:
npc_owner = nick
@extends('smsg_being_remove')
def bot_dies(data):
if data.id == charserv.server.account:
mapserv.cmsg_player_respawn()
@extends('smsg_npc_message')
@extends('smsg_npc_choice')
@extends('smsg_npc_close')
@extends('smsg_npc_next')
@extends('smsg_npc_int_input')
@extends('smsg_npc_str_input')
def npc_activity(data):
npcdialog['start_time'] = time.time()
@extends('smsg_npc_message')
def npc_message(data):
if not npc_owner:
return
npc = mapserv.beings_cache.findName(data.id)
m = '[npc] {} : {}'.format(npc, data.message)
whisper(npc_owner, m)
@extends('smsg_npc_choice')
def npc_choice(data):
if not npc_owner:
return
choices = filter(lambda s: len(s.strip()) > 0,
data.select.split(':'))
whisper(npc_owner, '[npc][select] (use !input <number> to select)')
for i, s in enumerate(choices):
whisper(npc_owner, ' {}) {}'.format(i + 1, s))
@extends('smsg_npc_int_input')
@extends('smsg_npc_str_input')
def npc_input(data):
if not npc_owner:
return
t = 'number'
if plugins.npc.input_type == 'str':
t = 'string'
whisper(npc_owner, '[npc][input] (use !input <{}>)'.format(t))
@extends('smsg_storage_status')
def storage_status(data):
print 'storage_status'
global storage_is_open
storage_is_open = True
_times['storage'] = time.time()
if npc_owner:
whisper(npc_owner, '[storage]')
@extends('smsg_storage_items')
@extends('smsg_storage_equip')
def storage_items(data):
if not npc_owner:
return
ls = status.invlists2(max_length=255, source='storage')
for l in ls:
whisper(npc_owner, l)
@extends('smsg_storage_close')
def storage_close(data):
print 'smsg_storage_close'
global storage_is_open
storage_is_open = False
_times['storage'] = 0
def cmd_where(nick, message, is_whisper, match):
if not is_whisper:
return
msg = status.player_position()
whisper(nick, msg)
def cmd_goto(nick, message, is_whisper, match):
if not is_whisper:
return
try:
x = int(match.group(1))
y = int(match.group(2))
except ValueError:
return
set_npc_owner(nick)
plugins.autofollow.follow = ''
mapserv.cmsg_player_change_dest(x, y)
def cmd_goclose(nick, message, is_whisper, match):
if not is_whisper:
return
x = mapserv.player_pos['x']
y = mapserv.player_pos['y']
if message.startswith('!left'):
x -= 1
elif message.startswith('!right'):
x += 1
elif message.startswith('!up'):
y -= 1
elif message.startswith('!down'):
y += 1
set_npc_owner(nick)
plugins.autofollow.follow = ''
mapserv.cmsg_player_change_dest(x, y)
def cmd_pickup(nick, message, is_whisper, match):
if not is_whisper:
return
commands.pickup()
def cmd_drop(nick, message, is_whisper, match):
if not is_whisper:
return
try:
amount = int(match.group(1))
item_id = int(match.group(2))
except ValueError:
return
if nick not in admins:
if item_id not in allowed_drops:
return
index = get_item_index(item_id)
if index > 0:
mapserv.cmsg_player_inventory_drop(index, amount)
def cmd_item_action(nick, message, is_whisper, match):
if not is_whisper:
return
try:
itemId = int(match.group(1))
except ValueError:
return
index = get_item_index(itemId)
if index <= 0:
return
if message.startswith('!equip'):
mapserv.cmsg_player_equip(index)
elif message.startswith('!unequip'):
mapserv.cmsg_player_unequip(index)
elif message.startswith('!use'):
mapserv.cmsg_player_inventory_use(index, itemId)
def cmd_emote(nick, message, is_whisper, match):
if not is_whisper:
return
try:
emote = int(match.group(1))
except ValueError:
return
mapserv.cmsg_player_emote(emote)
def cmd_attack(nick, message, is_whisper, match):
if not is_whisper:
return
target_s = match.group(1)
try:
target = mapserv.beings_cache[int(target_s)]
except (ValueError, KeyError):
target = find_nearest_being(name=target_s,
ignored_ids=walkto.unreachable_ids)
if target is not None:
set_npc_owner(nick)
plugins.autofollow.follow = ''
walkto.walkto_and_action(target, 'attack')
def cmd_say(nick, message, is_whisper, match):
if not is_whisper:
return
msg = match.group(1)
whisper(nick, msg)
def cmd_sit(nick, message, is_whisper, match):
if not is_whisper:
return
plugins.autofollow.follow = ''
mapserv.cmsg_player_change_act(0, 2)
def cmd_turn(nick, message, is_whisper, match):
if not is_whisper:
return
commands.set_direction('', message[6:])
def cmd_follow(nick, message, is_whisper, match):
if not is_whisper:
return
if plugins.autofollow.follow == nick:
plugins.autofollow.follow = ''
else:
set_npc_owner(nick)
plugins.autofollow.follow = nick
def cmd_lvlup(nick, message, is_whisper, match):
if not is_whisper:
return
stat = match.group(1).lower()
stats = {'str': 13, 'agi': 14, 'vit': 15,
'int': 16, 'dex': 17, 'luk': 18}
skills = {'mallard': 45, 'brawling': 350, 'speed': 352,
'astral': 354, 'raging': 355, 'resist': 353}
if stat in stats:
mapserv.cmsg_stat_update_request(stats[stat], 1)
elif stat in skills:
mapserv.cmsg_skill_levelup_request(skills[stat])
def cmd_invlist(nick, message, is_whisper, match):
if not is_whisper:
return
ls = status.invlists(50)
for l in ls:
whisper(nick, l)
def cmd_inventory(nick, message, is_whisper, match):
if not is_whisper:
return
ls = status.invlists2(255)
for l in ls:
whisper(nick, l)
def cmd_status(nick, message, is_whisper, match):
if not is_whisper:
return
all_stats = ('stats', 'hpmp', 'weight', 'points',
'zeny', 'attack', 'skills')
sr = status.stats_repr(*all_stats)
whisper(nick, ' | '.join(sr.values()))
def cmd_zeny(nick, message, is_whisper, match):
if not is_whisper:
return
whisper(nick, 'I have {} GP'.format(mapserv.player_money))
def cmd_talk2npc(nick, message, is_whisper, match):
if not is_whisper:
return
npc_s = match.group(1)
jobs = []
name = ''
try:
jobs = [int(npc_s)]
except ValueError:
name = npc_s
b = find_nearest_being(name=name, type='npc', allowed_jobs=jobs)
if b is None:
return
set_npc_owner(nick)
plugins.autofollow.follow = ''
plugins.npc.npc_id = b.id
mapserv.cmsg_npc_talk(b.id)
def cmd_input(nick, message, is_whisper, match):
if not is_whisper:
return
plugins.npc.cmd_npcinput('', match.group(1))
def cmd_close(nick, message, is_whisper, match):
if not is_whisper:
return
if storage_is_open:
reset_storage()
else:
plugins.npc.cmd_npcclose()
def cmd_history(nick, message, is_whisper, match):
if not is_whisper:
return
for user, cmd in history:
whisper(nick, '{} : {}'.format(user, cmd))
def cmd_store(nick, message, is_whisper, match):
if not is_whisper:
return
if not storage_is_open:
return
try:
amount = int(match.group(1))
item_id = int(match.group(2))
except ValueError:
return
index = get_item_index(item_id)
if index > 0:
mapserv.cmsg_move_to_storage(index, amount)
def cmd_retrieve(nick, message, is_whisper, match):
if not is_whisper:
return
if not storage_is_open:
return
try:
amount = int(match.group(1))
item_id = int(match.group(2))
except ValueError:
return
index = get_storage_index(item_id)
if index > 0:
mapserv.cmsg_move_from_storage(index, amount)
def cmd_help(nick, message, is_whisper, match):
if not is_whisper:
return
m = ('[@@https://forums.themanaworld.org/viewtopic.php?f=12&t=19673|Forum@@]'
'[@@https://bitbucket.org/rumly111/manachat|Sources@@] '
'Try !commands for list of commands')
whisper(nick, m)
def cmd_commands(nick, message, is_whisper, match):
if not is_whisper:
return
c = []
for cmd in manaboy_commands:
if cmd.startswith('!('):
br = cmd.index(')')
c.extend(cmd[2:br].split('|'))
elif cmd.startswith('!'):
c.append(cmd[1:].split()[0])
c.sort()
whisper(nick, ', '.join(c))
def reset_storage():
mapserv.cmsg_storage_close()
mapserv.cmsg_npc_list_choice(plugins.npc.npc_id, 6)
# =========================================================================
def manaboy_logic(ts):
def reset():
global npc_owner
npc_owner = ''
npcdialog['start_time'] = -1
plugins.npc.cmd_npcinput('', '6')
# plugins.npc.cmd_npcclose()
if storage_is_open and ts > _times['storage'] + 150:
reset_storage()
if npcdialog['start_time'] <= 0:
return
if not storage_is_open and ts > npcdialog['start_time'] + 30.0:
reset()
# =========================================================================
manaboy_commands = {
'!where' : cmd_where,
'!goto (\d+) (\d+)' : cmd_goto,
'!(left|right|up|down)' : cmd_goclose,
'!pickup' : cmd_pickup,
'!drop (\d+) (\d+)' : cmd_drop,
'!equip (\d+)' : cmd_item_action,
'!unequip (\d+)' : cmd_item_action,
'!use (\d+)' : cmd_item_action,
'!emote (\d+)' : cmd_emote,
'!attack (.+)' : cmd_attack,
'!say ((@|#).+)' : cmd_say,
'!sit' : cmd_sit,
'!turn' : cmd_turn,
'!follow' : cmd_follow,
'!lvlup (\w+)' : cmd_lvlup,
'!inventory' : cmd_inventory,
'!invlist' : cmd_invlist,
'!status' : cmd_status,
'!zeny' : cmd_zeny,
'!talk2npc (\w+)' : cmd_talk2npc,
'!input (.+)' : cmd_input,
'!close' : cmd_close,
'!store (\d+) (\d+)' : cmd_store,
'!retrieve (\d+) (\d+)' : cmd_retrieve,
'!(help|info)' : cmd_help,
'!commands' : cmd_commands,
'!history' : cmd_history,
}
def chatbot_answer_mod(func):
'''modifies chatbot.answer to remember last 10 commands'''
def mb_answer(nick, message, is_whisper):
if is_whisper:
history.append((nick, message))
return func(nick, message, is_whisper)
return mb_answer
def init(config):
for cmd, action in manaboy_commands.items():
plugins.chatbot.add_command(cmd, action)
plugins.chatbot.answer = chatbot_answer_mod(plugins.chatbot.answer)
logicmanager.logic_manager.add_logic(manaboy_logic)
| mekolat/manachat | plugins/manaboy.py | Python | gpl-2.0 | 11,836 |
"""
WSGI config for geodjango_smurfs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geodjango_smurfs.settings")
application = get_wsgi_application()
| cyberbikepunk/smurfs | geodjango_smurfs/wsgi.py | Python | gpl-2.0 | 409 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import os
import random
import re
import shlex
import string
import sys
import time
from twisted.cred import credentials
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import task
from twisted.internet import utils
from twisted.python import log
from twisted.python import runtime
from twisted.python.procutils import which
from twisted.spread import pb
from buildbot.process.results import SUCCESS
from buildbot.process.results import Results
from buildbot.util import bytes2unicode
from buildbot.util import now
from buildbot.util import unicode2bytes
from buildbot.util.eventual import fireEventually
class SourceStamp:
def __init__(self, branch, revision, patch, repository=''):
self.branch = branch
self.revision = revision
self.patch = patch
self.repository = repository
def output(*msg):
print(' '.join([str(m)for m in msg]))
class SourceStampExtractor:
def __init__(self, treetop, branch, repository):
self.treetop = treetop
self.repository = repository
self.branch = branch
exes = which(self.vcexe)
if not exes:
output("Could not find executable '{}'.".format(self.vcexe))
sys.exit(1)
self.exe = exes[0]
def dovc(self, cmd):
"""This accepts the arguments of a command, without the actual
command itself."""
env = os.environ.copy()
env['LC_ALL'] = "C"
d = utils.getProcessOutputAndValue(self.exe, cmd, env=env,
path=self.treetop)
d.addCallback(self._didvc, cmd)
return d
def _didvc(self, res, cmd):
(stdout, stderr, code) = res
# 'bzr diff' sets rc=1 if there were any differences.
# cvs does something similar, so don't bother requiring rc=0.
return stdout
def get(self):
"""Return a Deferred that fires with a SourceStamp instance."""
d = self.getBaseRevision()
d.addCallback(self.getPatch)
d.addCallback(self.done)
return d
def readPatch(self, diff, patchlevel):
if not diff:
diff = None
self.patch = (patchlevel, bytes2unicode(diff))
def done(self, res):
if not self.repository:
self.repository = self.treetop
# TODO: figure out the branch and project too
ss = SourceStamp(bytes2unicode(self.branch), self.baserev, self.patch,
repository=self.repository)
return ss
class CVSExtractor(SourceStampExtractor):
patchlevel = 0
vcexe = "cvs"
def getBaseRevision(self):
# this depends upon our local clock and the repository's clock being
# reasonably synchronized with each other. We express everything in
# UTC because the '%z' format specifier for strftime doesn't always
# work.
self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000",
time.gmtime(now()))
return defer.succeed(None)
def getPatch(self, res):
# the -q tells CVS to not announce each directory as it works
if self.branch is not None:
# 'cvs diff' won't take both -r and -D at the same time (it
# ignores the -r). As best I can tell, there is no way to make
# cvs give you a diff relative to a timestamp on the non-trunk
# branch. A bare 'cvs diff' will tell you about the changes
# relative to your checked-out versions, but I know of no way to
# find out what those checked-out versions are.
output("Sorry, CVS 'try' builds don't work with branches")
sys.exit(1)
args = ['-q', 'diff', '-u', '-D', self.baserev]
d = self.dovc(args)
d.addCallback(self.readPatch, self.patchlevel)
return d
class SVNExtractor(SourceStampExtractor):
patchlevel = 0
vcexe = "svn"
def getBaseRevision(self):
d = self.dovc(["status", "-u"])
d.addCallback(self.parseStatus)
return d
def parseStatus(self, res):
# svn shows the base revision for each file that has been modified or
# which needs an update. You can update each file to a different
# version, so each file is displayed with its individual base
# revision. It also shows the repository-wide latest revision number
# on the last line ("Status against revision: \d+").
# for our purposes, we use the latest revision number as the "base"
# revision, and get a diff against that. This means we will get
# reverse-diffs for local files that need updating, but the resulting
# tree will still be correct. The only weirdness is that the baserev
# that we emit may be different than the version of the tree that we
# first checked out.
# to do this differently would probably involve scanning the revision
# numbers to find the max (or perhaps the min) revision, and then
# using that as a base.
for line in res.split(b"\n"):
m = re.search(br'^Status against revision:\s+(\d+)', line)
if m:
self.baserev = m.group(1)
return
output(
b"Could not find 'Status against revision' in SVN output: " + res)
sys.exit(1)
def getPatch(self, res):
d = self.dovc(["diff", "-r{}".format(self.baserev)])
d.addCallback(self.readPatch, self.patchlevel)
return d
class BzrExtractor(SourceStampExtractor):
patchlevel = 0
vcexe = "bzr"
def getBaseRevision(self):
d = self.dovc(["revision-info", "-rsubmit:"])
d.addCallback(self.get_revision_number)
return d
def get_revision_number(self, out):
revno, revid = out.split()
self.baserev = 'revid:' + revid
return
def getPatch(self, res):
d = self.dovc(["diff", "-r{}..".format(self.baserev)])
d.addCallback(self.readPatch, self.patchlevel)
return d
class MercurialExtractor(SourceStampExtractor):
patchlevel = 1
vcexe = "hg"
def _didvc(self, res, cmd):
(stdout, stderr, code) = res
if code:
cs = ' '.join(['hg'] + cmd)
if stderr:
stderr = '\n' + stderr.rstrip()
raise RuntimeError("{} returned {} {}".format(cs, code, stderr))
return stdout
@defer.inlineCallbacks
def getBaseRevision(self):
upstream = ""
if self.repository:
upstream = "r'{}'".format(self.repository)
output = ''
try:
output = yield self.dovc(["log", "--template", "{node}\\n", "-r",
"max(::. - outgoing({}))".format(upstream)])
except RuntimeError:
# outgoing() will abort if no default-push/default path is
# configured
if upstream:
raise
# fall back to current working directory parent
output = yield self.dovc(["log", "--template", "{node}\\n", "-r", "p1()"])
m = re.search(br'^(\w+)', output)
if not m:
raise RuntimeError(
"Revision {!r} is not in the right format".format(output))
self.baserev = m.group(0)
def getPatch(self, res):
d = self.dovc(["diff", "-r", self.baserev])
d.addCallback(self.readPatch, self.patchlevel)
return d
class PerforceExtractor(SourceStampExtractor):
patchlevel = 0
vcexe = "p4"
def getBaseRevision(self):
d = self.dovc(["changes", "-m1", "..."])
d.addCallback(self.parseStatus)
return d
def parseStatus(self, res):
#
# extract the base change number
#
m = re.search(br'Change (\d+)', res)
if m:
self.baserev = m.group(1)
return
output(b"Could not find change number in output: " + res)
sys.exit(1)
def readPatch(self, res, patchlevel):
#
# extract the actual patch from "res"
#
if not self.branch:
output("you must specify a branch")
sys.exit(1)
mpatch = ""
found = False
for line in res.split("\n"):
m = re.search('==== //depot/' + self.branch
+ r'/([\w/\.\d\-_]+)#(\d+) -', line)
if m:
mpatch += "--- {}#{}\n".format(m.group(1), m.group(2))
mpatch += "+++ {}\n".format(m.group(1))
found = True
else:
mpatch += line
mpatch += "\n"
if not found:
output(b"could not parse patch file")
sys.exit(1)
self.patch = (patchlevel, mpatch)
def getPatch(self, res):
d = self.dovc(["diff"])
d.addCallback(self.readPatch, self.patchlevel)
return d
class DarcsExtractor(SourceStampExtractor):
patchlevel = 1
vcexe = "darcs"
def getBaseRevision(self):
d = self.dovc(["changes", "--context"])
d.addCallback(self.parseStatus)
return d
def parseStatus(self, res):
self.baserev = res # the whole context file
def getPatch(self, res):
d = self.dovc(["diff", "-u"])
d.addCallback(self.readPatch, self.patchlevel)
return d
class GitExtractor(SourceStampExtractor):
patchlevel = 1
vcexe = "git"
config = None
def getBaseRevision(self):
# If a branch is specified, parse out the rev it points to
# and extract the local name.
if self.branch:
d = self.dovc(["rev-parse", self.branch])
d.addCallback(self.override_baserev)
d.addCallback(self.extractLocalBranch)
return d
d = self.dovc(["branch", "--no-color", "-v", "--no-abbrev"])
d.addCallback(self.parseStatus)
return d
# remove remote-prefix from self.branch (assumes format <prefix>/<branch>)
# this uses "git remote" to retrieve all configured remote names
def extractLocalBranch(self, res):
if '/' in self.branch:
d = self.dovc(["remote"])
d.addCallback(self.fixBranch)
return d
return None
# strip remote prefix from self.branch
def fixBranch(self, remotes):
for l in bytes2unicode(remotes).split("\n"):
r = l.strip()
if r and self.branch.startswith(r + "/"):
self.branch = self.branch[len(r) + 1:]
break
def readConfig(self):
if self.config:
return defer.succeed(self.config)
d = self.dovc(["config", "-l"])
d.addCallback(self.parseConfig)
return d
def parseConfig(self, res):
self.config = {}
for l in res.split(b"\n"):
if l.strip():
parts = l.strip().split(b"=", 2)
if len(parts) < 2:
parts.append('true')
self.config[parts[0]] = parts[1]
return self.config
def parseTrackingBranch(self, res):
# If we're tracking a remote, consider that the base.
remote = self.config.get(b"branch." + self.branch + b".remote")
ref = self.config.get(b"branch." + self.branch + b".merge")
if remote and ref:
remote_branch = ref.split(b"/", 2)[-1]
baserev = remote + b"/" + remote_branch
else:
baserev = b"master"
d = self.dovc(["rev-parse", baserev])
d.addCallback(self.override_baserev)
return d
def override_baserev(self, res):
self.baserev = bytes2unicode(res).strip()
def parseStatus(self, res):
# The current branch is marked by '*' at the start of the
# line, followed by the branch name and the SHA1.
#
# Branch names may contain pretty much anything but whitespace.
m = re.search(br'^\* (\S+)\s+([0-9a-f]{40})', res, re.MULTILINE)
if m:
self.baserev = m.group(2)
self.branch = m.group(1)
d = self.readConfig()
d.addCallback(self.parseTrackingBranch)
return d
output(b"Could not find current GIT branch: " + res)
sys.exit(1)
def getPatch(self, res):
d = self.dovc(["diff", "--src-prefix=a/", "--dst-prefix=b/",
"--no-textconv", "--no-ext-diff", self.baserev])
d.addCallback(self.readPatch, self.patchlevel)
return d
class MonotoneExtractor(SourceStampExtractor):
patchlevel = 0
vcexe = "mtn"
def getBaseRevision(self):
d = self.dovc(["automate", "get_base_revision_id"])
d.addCallback(self.parseStatus)
return d
def parseStatus(self, output):
hash = output.strip()
if len(hash) != 40:
self.baserev = None
self.baserev = hash
def getPatch(self, res):
d = self.dovc(["diff"])
d.addCallback(self.readPatch, self.patchlevel)
return d
def getSourceStamp(vctype, treetop, branch=None, repository=None):
if vctype == "cvs":
cls = CVSExtractor
elif vctype == "svn":
cls = SVNExtractor
elif vctype == "bzr":
cls = BzrExtractor
elif vctype == "hg":
cls = MercurialExtractor
elif vctype == "p4":
cls = PerforceExtractor
elif vctype == "darcs":
cls = DarcsExtractor
elif vctype == "git":
cls = GitExtractor
elif vctype == "mtn":
cls = MonotoneExtractor
elif vctype == "none":
return defer.succeed(SourceStamp("", "", (1, ""), ""))
else:
output("unknown vctype '{}'".format(vctype))
sys.exit(1)
return cls(treetop, branch, repository).get()
def ns(s):
return "{}:{},".format(len(s), s)
def createJobfile(jobid, branch, baserev, patch_level, patch_body, repository,
project, who, comment, builderNames, properties):
# Determine job file version from provided arguments
if properties:
version = 5
elif comment:
version = 4
elif who:
version = 3
else:
version = 2
job = ""
job += ns(str(version))
if version < 5:
job += ns(jobid)
job += ns(branch)
job += ns(str(baserev))
job += ns("{}".format(patch_level))
job += ns(patch_body or "")
job += ns(repository)
job += ns(project)
if (version >= 3):
job += ns(who)
if (version >= 4):
job += ns(comment)
for bn in builderNames:
job += ns(bn)
else:
job += ns(
json.dumps({
'jobid': jobid, 'branch': branch, 'baserev': str(baserev),
'patch_level': patch_level, 'patch_body': patch_body,
'repository': repository, 'project': project, 'who': who,
'comment': comment, 'builderNames': builderNames,
'properties': properties,
}))
return job
def getTopdir(topfile, start=None):
"""walk upwards from the current directory until we find this topfile"""
if not start:
start = os.getcwd()
here = start
toomany = 20
while toomany > 0:
if os.path.exists(os.path.join(here, topfile)):
return here
next = os.path.dirname(here)
if next == here:
break # we've hit the root
here = next
toomany -= 1
output("Unable to find topfile '{}' anywhere "
"from {} upwards".format(topfile, start))
sys.exit(1)
class RemoteTryPP(protocol.ProcessProtocol):
def __init__(self, job):
self.job = job
self.d = defer.Deferred()
def connectionMade(self):
self.transport.write(unicode2bytes(self.job))
self.transport.closeStdin()
def outReceived(self, data):
sys.stdout.write(bytes2unicode(data))
def errReceived(self, data):
sys.stderr.write(bytes2unicode(data))
def processEnded(self, status_object):
sig = status_object.value.signal
rc = status_object.value.exitCode
if sig is not None or rc != 0:
self.d.errback(RuntimeError("remote 'buildbot tryserver' failed"
": sig={}, rc={}".format(sig, rc)))
return
self.d.callback((sig, rc))
class FakeBuildSetStatus:
def callRemote(self, name):
if name == "getBuildRequests":
return defer.succeed([])
raise NotImplementedError()
class Try(pb.Referenceable):
buildsetStatus = None
quiet = False
printloop = False
def __init__(self, config):
self.config = config
self.connect = self.getopt('connect')
if self.connect not in ['ssh', 'pb']:
output("you must specify a connect style: ssh or pb")
sys.exit(1)
self.builderNames = self.getopt('builders')
self.project = self.getopt('project', '')
self.who = self.getopt('who')
self.comment = self.getopt('comment')
def getopt(self, config_name, default=None):
value = self.config.get(config_name)
if value is None or value == []:
value = default
return value
def createJob(self):
# returns a Deferred which fires when the job parameters have been
# created
# generate a random (unique) string. It would make sense to add a
# hostname and process ID here, but a) I suspect that would cause
# windows portability problems, and b) really this is good enough
self.bsid = "{}-{}".format(time.time(), random.randint(0, 1000000))
# common options
branch = self.getopt("branch")
difffile = self.config.get("diff")
if difffile:
baserev = self.config.get("baserev")
if difffile == "-":
diff = sys.stdin.read()
else:
with open(difffile, "r") as f:
diff = f.read()
if not diff:
diff = None
patch = (self.config['patchlevel'], diff)
ss = SourceStamp(
branch, baserev, patch, repository=self.getopt("repository"))
d = defer.succeed(ss)
else:
vc = self.getopt("vc")
if vc in ("cvs", "svn"):
# we need to find the tree-top
topdir = self.getopt("topdir")
if topdir:
treedir = os.path.expanduser(topdir)
else:
topfile = self.getopt("topfile")
if topfile:
treedir = getTopdir(topfile)
else:
output("Must specify topdir or topfile.")
sys.exit(1)
else:
treedir = os.getcwd()
d = getSourceStamp(vc, treedir, branch, self.getopt("repository"))
d.addCallback(self._createJob_1)
return d
def _createJob_1(self, ss):
self.sourcestamp = ss
patchlevel, diff = ss.patch
if diff is None:
raise RuntimeError("There is no patch to try, diff is empty.")
if self.connect == "ssh":
revspec = ss.revision
if revspec is None:
revspec = ""
self.jobfile = createJobfile(
self.bsid, ss.branch or "", revspec, patchlevel, diff,
ss.repository, self.project, self.who, self.comment,
self.builderNames, self.config.get('properties', {}))
def fakeDeliverJob(self):
# Display the job to be delivered, but don't perform delivery.
ss = self.sourcestamp
output("Job:\n\tRepository: {}\n\tProject: {}\n\tBranch: {}\n\t"
"Revision: {}\n\tBuilders: {}\n{}".format(
ss.repository, self.project, ss.branch,
ss.revision,
self.builderNames,
ss.patch[1]))
self.buildsetStatus = FakeBuildSetStatus()
d = defer.Deferred()
d.callback(True)
return d
def deliver_job_ssh(self):
tryhost = self.getopt("host")
tryport = self.getopt("port")
tryuser = self.getopt("username")
trydir = self.getopt("jobdir")
buildbotbin = self.getopt("buildbotbin")
ssh_command = self.getopt("ssh")
if not ssh_command:
ssh_commands = which("ssh")
if not ssh_commands:
raise RuntimeError("couldn't find ssh executable, make sure "
"it is available in the PATH")
argv = [ssh_commands[0]]
else:
# Split the string on whitespace to allow passing options in
# ssh command too, but preserving whitespace inside quotes to
# allow using paths with spaces in them which is common under
# Windows. And because Windows uses backslashes in paths, we
# can't just use shlex.split there as it would interpret them
# specially, so do it by hand.
if runtime.platformType == 'win32':
# Note that regex here matches the arguments, not the
# separators, as it's simpler to do it like this. And then we
# just need to get all of them together using the slice and
# also remove the quotes from those that were quoted.
argv = [string.strip(a, '"') for a in
re.split(r'''([^" ]+|"[^"]+")''', ssh_command)[1::2]]
else:
# Do use standard tokenization logic under POSIX.
argv = shlex.split(ssh_command)
if tryuser:
argv += ["-l", tryuser]
if tryport:
argv += ["-p", tryport]
argv += [tryhost, buildbotbin, "tryserver", "--jobdir", trydir]
pp = RemoteTryPP(self.jobfile)
reactor.spawnProcess(pp, argv[0], argv, os.environ)
d = pp.d
return d
@defer.inlineCallbacks
def deliver_job_pb(self):
user = self.getopt("username")
passwd = self.getopt("passwd")
master = self.getopt("master")
tryhost, tryport = master.split(":")
tryport = int(tryport)
f = pb.PBClientFactory()
d = f.login(credentials.UsernamePassword(unicode2bytes(user), unicode2bytes(passwd)))
reactor.connectTCP(tryhost, tryport, f)
remote = yield d
ss = self.sourcestamp
output("Delivering job; comment=", self.comment)
self.buildsetStatus = \
yield remote.callRemote("try", ss.branch, ss.revision, ss.patch, ss.repository,
self.project, self.builderNames, self.who, self.comment,
self.config.get('properties', {}))
def deliverJob(self):
# returns a Deferred that fires when the job has been delivered
if self.connect == "ssh":
return self.deliver_job_ssh()
if self.connect == "pb":
return self.deliver_job_pb()
raise RuntimeError("unknown connecttype '{}', "
"should be 'ssh' or 'pb'".format(self.connect))
def getStatus(self):
# returns a Deferred that fires when the builds have finished, and
# may emit status messages while we wait
wait = bool(self.getopt("wait"))
if not wait:
output("not waiting for builds to finish")
elif self.connect == "ssh":
output("waiting for builds with ssh is not supported")
else:
self.running = defer.Deferred()
if not self.buildsetStatus:
output("try scheduler on the master does not have the builder configured")
return None
self._getStatus_1() # note that we don't wait for the returned Deferred
if bool(self.config.get("dryrun")):
self.statusDone()
return self.running
return None
@defer.inlineCallbacks
def _getStatus_1(self):
# gather the set of BuildRequests
brs = yield self.buildsetStatus.callRemote("getBuildRequests")
self.builderNames = []
self.buildRequests = {}
# self.builds holds the current BuildStatus object for each one
self.builds = {}
# self.outstanding holds the list of builderNames which haven't
# finished yet
self.outstanding = []
# self.results holds the list of build results. It holds a tuple of
# (result, text)
self.results = {}
# self.currentStep holds the name of the Step that each build is
# currently running
self.currentStep = {}
# self.ETA holds the expected finishing time (absolute time since
# epoch)
self.ETA = {}
for n, br in brs:
self.builderNames.append(n)
self.buildRequests[n] = br
self.builds[n] = None
self.outstanding.append(n)
self.results[n] = [None, None]
self.currentStep[n] = None
self.ETA[n] = None
# get new Builds for this buildrequest. We follow each one until
# it finishes or is interrupted.
br.callRemote("subscribe", self)
# now that those queries are in transit, we can start the
# display-status-every-30-seconds loop
if not self.getopt("quiet"):
self.printloop = task.LoopingCall(self.printStatus)
self.printloop.start(3, now=False)
# these methods are invoked by the status objects we've subscribed to
def remote_newbuild(self, bs, builderName):
if self.builds[builderName]:
self.builds[builderName].callRemote("unsubscribe", self)
self.builds[builderName] = bs
bs.callRemote("subscribe", self, 20)
d = bs.callRemote("waitUntilFinished")
d.addCallback(self._build_finished, builderName)
def remote_stepStarted(self, buildername, build, stepname, step):
self.currentStep[buildername] = stepname
def remote_stepFinished(self, buildername, build, stepname, step, results):
pass
def remote_buildETAUpdate(self, buildername, build, eta):
self.ETA[buildername] = now() + eta
@defer.inlineCallbacks
def _build_finished(self, bs, builderName):
# we need to collect status from the newly-finished build. We don't
# remove the build from self.outstanding until we've collected
# everything we want.
self.builds[builderName] = None
self.ETA[builderName] = None
self.currentStep[builderName] = "finished"
self.results[builderName][0] = yield bs.callRemote("getResults")
self.results[builderName][1] = yield bs.callRemote("getText")
self.outstanding.remove(builderName)
if not self.outstanding:
self.statusDone()
def printStatus(self):
try:
names = sorted(self.buildRequests.keys())
for n in names:
if n not in self.outstanding:
# the build is finished, and we have results
code, text = self.results[n]
t = Results[code]
if text:
t += " ({})".format(" ".join(text))
elif self.builds[n]:
t = self.currentStep[n] or "building"
if self.ETA[n]:
t += " [ETA {}s]".format(self.ETA[n] - now())
else:
t = "no build"
self.announce("{}: {}".format(n, t))
self.announce("")
except Exception:
log.err(None, "printing status")
def statusDone(self):
if self.printloop:
self.printloop.stop()
self.printloop = None
output("All Builds Complete")
# TODO: include a URL for all failing builds
names = sorted(self.buildRequests.keys())
happy = True
for n in names:
code, text = self.results[n]
t = "{}: {}".format(n, Results[code])
if text:
t += " ({})".format(" ".join(text))
output(t)
if code != SUCCESS:
happy = False
if happy:
self.exitcode = 0
else:
self.exitcode = 1
self.running.callback(self.exitcode)
@defer.inlineCallbacks
def getAvailableBuilderNames(self):
# This logs into the master using the PB protocol to
# get the names of the configured builders that can
# be used for the --builder argument
if self.connect == "pb":
user = self.getopt("username")
passwd = self.getopt("passwd")
master = self.getopt("master")
tryhost, tryport = master.split(":")
tryport = int(tryport)
f = pb.PBClientFactory()
d = f.login(credentials.UsernamePassword(unicode2bytes(user), unicode2bytes(passwd)))
reactor.connectTCP(tryhost, tryport, f)
remote = yield d
buildernames = yield remote.callRemote("getAvailableBuilderNames")
output("The following builders are available for the try scheduler: ")
for buildername in buildernames:
output(buildername)
yield remote.broker.transport.loseConnection()
return
if self.connect == "ssh":
output("Cannot get available builders over ssh.")
sys.exit(1)
raise RuntimeError(
"unknown connecttype '{}', should be 'pb'".format(self.connect))
def announce(self, message):
if not self.quiet:
output(message)
@defer.inlineCallbacks
def run_impl(self):
output("using '{}' connect method".format(self.connect))
self.exitcode = 0
# we can't do spawnProcess until we're inside reactor.run(), so force asynchronous execution
yield fireEventually(None)
try:
if bool(self.config.get("get-builder-names")):
yield self.getAvailableBuilderNames()
else:
yield self.createJob()
yield self.announce("job created")
if bool(self.config.get("dryrun")):
yield self.fakeDeliverJob()
else:
yield self.deliverJob()
yield self.announce("job has been delivered")
yield self.getStatus()
if not bool(self.config.get("dryrun")):
yield self.cleanup()
except SystemExit as e:
self.exitcode = e.code
except Exception as e:
log.err(e)
raise
def run(self):
d = self.run_impl()
d.addCallback(lambda res: reactor.stop())
reactor.run()
sys.exit(self.exitcode)
def trapSystemExit(self, why):
why.trap(SystemExit)
self.exitcode = why.value.code
def cleanup(self, res=None):
if self.buildsetStatus:
self.buildsetStatus.broker.transport.loseConnection()
| cmouse/buildbot | master/buildbot/clients/tryclient.py | Python | gpl-2.0 | 32,055 |
Subsets and Splits