code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Generated by Django 2.2.12 on 2020-05-09 06:28
from django.db import migrations
# Can't use fixtures because load_fixtures method is janky with django-tenant-schemas
def load_initial_data(apps, schema_editor):
Grade = apps.get_model('courses', 'Grade')
# add some initial data if none has been created yet
if not Grade.objects.exists():
Grade.objects.create(
name="8",
value=8
)
Grade.objects.create(
name="9",
value=9
)
Grade.objects.create(
name="10",
value=10
)
Grade.objects.create(
name="11",
value=11
)
Grade.objects.create(
name="12",
value=12
)
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_auto_20200508_1957'),
]
operations = [
migrations.RunPython(load_initial_data),
]
| timberline-secondary/hackerspace | src/courses/migrations/0016_grades_initialdata.py | Python | gpl-3.0 | 963 |
import re
import sys
import traceback
import copy
import json
from distutils import file_util
from grammalecte.echo import echo
DEF = {}
FUNCTIONS = []
JSREGEXES = {}
WORDLIMITLEFT = r"(?<![\w.,–-])" # r"(?<![-.,—])\b" seems slower
WORDLIMITRIGHT = r"(?![\w–-])" # r"\b(?!-—)" seems slower
def prepare_for_eval (s):
s = re.sub(r"(select|exclude)[(][\\](\d+)", '\\1(dDA, m.start(\\2), m.group(\\2)', s)
s = re.sub(r"define[(][\\](\d+)", 'define(dDA, m.start(\\1)', s)
s = re.sub(r"(morph|morphex|displayInfo)[(][\\](\d+)", '\\1((m.start(\\2), m.group(\\2))', s)
s = re.sub(r"(morph|morphex|displayInfo)[(]", '\\1(dDA, ', s)
s = re.sub(r"(sugg\w+|switch\w+)\(@", '\\1(m.group(i[4])', s)
s = re.sub(r"word\(\s*1\b", 'nextword1(s, m.end()', s) # word(1)
s = re.sub(r"word\(\s*-1\b", 'prevword1(s, m.start()', s) # word(-1)
s = re.sub(r"word\(\s*(\d)", 'nextword(s, m.end(), \\1', s) # word(n)
s = re.sub(r"word\(\s*-(\d)", 'prevword(s, m.start(), \\1', s) # word(-n)
s = re.sub(r"before\(\s*", 'look(s[:m.start()], ', s) # before(s)
s = re.sub(r"after\(\s*", 'look(s[m.end():], ', s) # after(s)
s = re.sub(r"textarea\(\s*", 'look(s, ', s) # textarea(s)
s = re.sub(r"before_chk1\(\s*", 'look_chk1(dDA, s[:m.start()], 0, ', s) # before_chk1(s)
s = re.sub(r"after_chk1\(\s*", 'look_chk1(dDA, s[m.end():], m.end(), ', s) # after_chk1(s)
s = re.sub(r"textarea_chk1\(\s*", 'look_chk1(dDA, s, 0, ', s) # textarea_chk1(s)
s = re.sub(r"before0\(\s*", 'look(sx[:m.start()], ', s) # before0(s)
s = re.sub(r"after0\(\s*", 'look(sx[m.end():], ', s) # after0(s)
s = re.sub(r"textarea0\(\s*", 'look(sx, ', s) # textarea0(s)
s = re.sub(r"before0_chk1\(\s*", 'look_chk1(dDA, sx[:m.start()], 0, ', s) # before0_chk1(s)
s = re.sub(r"after0_chk1\(\s*", 'look_chk1(dDA, sx[m.end():], m.end(), ', s) # after0_chk1(s)
s = re.sub(r"textarea0_chk1\(\s*", 'look_chk1(dDA, sx, 0, ', s) # textarea0_chk1(s)
s = re.sub(r"isEndOfNG\(\s*\)", 'isEndOfNG(dDA, s[m.end():], m.end())', s) # isEndOfNG(s)
s = re.sub(r"\bspell *[(]", '_oDict.isValid(', s)
s = re.sub(r"[\\](\d+)", 'm.group(\\1)', s)
return s
def py2js (sCode):
"convert Python code to JavaScript code"
# Python 2.x unicode strings
sCode = re.sub('\\b[ur]"', '"', sCode)
sCode = re.sub("\\b[ur]'", "'", sCode)
# operators
sCode = sCode.replace(" and ", " && ")
sCode = sCode.replace(" or ", " || ")
sCode = re.sub("\\bnot\\b", "!", sCode)
sCode = re.sub("(.+) if (.+) else (.+)", "\\2 ? \\1 : \\3", sCode)
# boolean
sCode = sCode.replace("False", "false")
sCode = sCode.replace("True", "true")
sCode = sCode.replace("bool", "Boolean")
# methods
sCode = sCode.replace(".endswith", ".endsWith")
sCode = sCode.replace(".find", ".indexOf")
sCode = sCode.replace(".startswith", ".startsWith")
sCode = sCode.replace(".lower", ".toLowerCase")
sCode = sCode.replace(".upper", ".toUpperCase")
sCode = sCode.replace(".isdigit", "._isDigit")
sCode = sCode.replace(".isupper", "._isUpperCase")
sCode = sCode.replace(".islower", "._isLowerCase")
sCode = sCode.replace(".istitle", "._isTitle")
sCode = sCode.replace(".capitalize", "._toCapitalize")
sCode = sCode.replace(".strip", "._trim")
sCode = sCode.replace(".lstrip", "._trimLeft")
sCode = sCode.replace(".rstrip", "._trimRight")
sCode = sCode.replace('.replace("."', ".replace(/\./g")
sCode = sCode.replace('.replace("..."', ".replace(/\.\.\./g")
sCode = re.sub('.replace\("([^"]+)" ?,', ".replace(/\\1/g,", sCode)
# regex
sCode = re.sub('re.search\("([^"]+)", *(m.group\(\\d\))\)', "(\\2.search(/\\1/) >= 0)", sCode)
sCode = re.sub(".search\\(/\\(\\?i\\)([^/]+)/\\) >= 0\\)", ".search(/\\1/i) >= 0)", sCode)
sCode = re.sub('(look\\(sx?[][.a-z:()]*), "\\(\\?i\\)([^"]+)"', "\\1, /\\2/i", sCode)
sCode = re.sub('(look\\(sx?[][.a-z:()]*), "([^"]+)"', "\\1, /\\2/", sCode)
sCode = re.sub('(look_chk1\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), "\\(\\?i\\)([^"]+)"', "\\1, /\\2/i", sCode)
sCode = re.sub('(look_chk1\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), "([^"]+)"', "\\1, /\\2/i", sCode)
sCode = sCode.replace("(?<!-)", "") # todo
# slices
sCode = sCode.replace("[:m.start()]", ".slice(0,m.index)")
sCode = sCode.replace("[m.end():]", ".slice(m.end[0])")
sCode = re.sub("\\[(-?\\d+):(-?\\d+)\\]", ".slice(\\1,\\2)", sCode)
sCode = re.sub("\\[(-?\\d+):\\]", ".slice(\\1)", sCode)
sCode = re.sub("\\[:(-?\\d+)\\]", ".slice(0,\\1)", sCode)
# regex matches
sCode = sCode.replace(".end()", ".end[0]")
sCode = sCode.replace(".start()", ".index")
sCode = sCode.replace("m.group()", "m[0]")
sCode = re.sub("\\.start\\((\\d+)\\)", ".start[\\1]", sCode)
sCode = re.sub("m\\.group\\((\\d+)\\)", "m[\\1]", sCode)
# tuples -> lists
sCode = re.sub("\((m\.start\[\\d+\], m\[\\d+\])\)", "[\\1]", sCode)
# regex
sCode = sCode.replace("\w[\w-]+", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ-]+")
sCode = sCode.replace(r"/\w/", "/[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ]/")
sCode = sCode.replace(r"[\w-]", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ-]")
sCode = sCode.replace(r"[\w,]", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ,]")
return sCode
def uppercase (s, sLang):
"convert regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'."
sUp = ""
state = 0
for i in range(0, len(s)):
c = s[i]
if c == "[":
state = 1
if state == 1 and c == "]":
state = 0
if c == "<" and i > 3 and s[i-3:i] == "(?P":
state = 2
if state == 2 and c == ">":
state = 0
if c == "?" and i > 0 and s[i-1:i] == "(" and s[i+1:i+2] != ":":
state = 5
if state == 5 and c == ")":
state = 0
if c.isalpha() and c.islower() and state == 0:
if c == "i" and (sLang == "tr" or sLang == "az"):
sUp += "[İ" + c + "]"
else:
sUp += "[" + c.upper() + c + "]"
elif c.isalpha() and c.islower() and state == 1 and s[i+1:i+2] != "-":
if s[i-1:i] == "-" and s[i-2:i-1].islower(): # [a-z] -> [a-zA-Z]
sUp += c + s[i-2:i-1].upper() + "-" + c.upper()
elif c == "i" and (sLang == "tr" or sLang == "az"):
sUp += "İ" + c
else:
sUp += c.upper() + c
else:
sUp += c
if c == "\\":
state = 4
elif state == 4:
state = 0
return sUp
def countGroupInRegex (sRegex):
try:
return re.compile(sRegex).groups
except:
traceback.print_exc()
echo(sRegex)
return 0
def createRule (s, nIdLine, sLang, bParagraph):
"returns rule as list [option name, regex, bCaseInsensitive, identifier, list of actions]"
global JSREGEXES
#### OPTIONS
sRuleId = str(nIdLine) + ("p" if bParagraph else "s")
sOption = False # False or [a-z0-9]+ name
tGroups = None # code for groups positioning (only useful for JavaScript)
cCaseMode = 'i' # i: case insensitive, s: case sensitive, u: uppercasing allowed
cWordLimitLeft = '[' # [: word limit, <: no specific limit
cWordLimitRight = ']' # ]: word limit, >: no specific limit
m = re.match("^__([[<]\\w[]>])(/[a-zA-Z0-9]+|)__ *", s)
if m:
if m.group(1):
cWordLimitLeft = m.group(1)[0]
cCaseMode = m.group(1)[1]
cWordLimitRight = m.group(1)[2]
sOption = m.group(2)[1:] if m.group(2) else False
s = s[m.end(0):]
#### REGEX TRIGGER
i = s.find(" <<-")
if i == -1:
print("# Error: no condition at line " + sRuleId)
return None
sRegex = s[:i].strip()
s = s[i+4:]
# JS groups positioning codes
m = re.search("@@\\S+", sRegex)
if m:
tGroups = groupsPositioningCodeToList(sRegex[m.start()+2:])
sRegex = sRegex[:m.start()].strip()
# JS regex
m = re.search("<js>.+</js>i?", sRegex)
if m:
JSREGEXES[sRuleId] = m.group(0)
sRegex = sRegex[:m.start()].strip()
if "<js>" in sRegex or "</js>" in sRegex:
print("# Error: JavaScript regex not delimited at line " + sRuleId)
return None
# quotes ?
if sRegex.startswith('"') and sRegex.endswith('"'):
sRegex = sRegex[1:-1]
## definitions
for sDef, sRepl in DEF.items():
sRegex = sRegex.replace(sDef, sRepl)
## count number of groups (must be done before modifying the regex)
nGroup = countGroupInRegex(sRegex)
if nGroup > 0:
if not tGroups:
print("# warning: groups positioning code for JavaScript should be defined at line " + sRuleId)
else:
if nGroup != len(tGroups):
print("# error: groups positioning code irrelevant at line " + sRuleId)
## word limit
if cWordLimitLeft == '[' and not sRegex.startswith(("^", '’', "'", ",")):
sRegex = WORDLIMITLEFT + sRegex
if cWordLimitRight == ']' and not sRegex.endswith(("$", '’', "'", ",")):
sRegex = sRegex + WORDLIMITRIGHT
## casing mode
if cCaseMode == "i":
bCaseInsensitive = True
if not sRegex.startswith("(?i)"):
sRegex = "(?i)" + sRegex
elif cCaseMode == "s":
bCaseInsensitive = False
sRegex = sRegex.replace("(?i)", "")
elif cCaseMode == "u":
bCaseInsensitive = False
sRegex = sRegex.replace("(?i)", "")
sRegex = uppercase(sRegex, sLang)
else:
print("# Unknown case mode [" + cCaseMode + "] at line " + sRuleId)
## check regex
try:
z = re.compile(sRegex)
except:
print("# Regex error at line ", nIdLine)
echo(sRegex)
traceback.print_exc()
return None
## groups in non grouping parenthesis
for x in re.finditer("\(\?:[^)]*\([[\w -]", sRegex):
print("# Warning: groups inside non grouping parenthesis in regex at line " + sRuleId)
#### PARSE ACTIONS
lActions = []
nAction = 1
for sAction in s.split(" <<- "):
t = createAction(sRuleId + "_" + str(nAction), sAction, nGroup)
nAction += 1
if t:
lActions.append(t)
if not lActions:
return None
return [sOption, sRegex, bCaseInsensitive, sRuleId, lActions, tGroups]
def createAction (sIdAction, sAction, nGroup):
"returns an action to perform as a tuple (condition, action type, action[, iGroup [, message, URL ]])"
global FUNCTIONS
m = re.search(r"([-~=])(\d*|)>> ", sAction)
if not m:
echo("# No action at line " + sIdAction)
return None
#### CONDITION
sCondition = sAction[:m.start()].strip()
if sCondition:
sCondition = prepare_for_eval(sCondition)
FUNCTIONS.append(("c"+sIdAction, sCondition))
for x in re.finditer("[.](?:group|start|end)[(](\d+)[)]", sCondition):
if int(x.group(1)) > nGroup:
print("# Error in groups in condition at line " + sIdAction + " ("+str(nGroup)+" groups only)")
if ".match" in sCondition:
echo("# Error. JS compatibility. Don't use .match() in condition, use .search()")
sCondition = "c"+sIdAction
else:
sCondition = None
#### iGroup / positioning
iGroup = int(m.group(2)) if m.group(2) else 0
if iGroup > nGroup:
echo("# Selected group > group number in regex at line " + sIdAction)
#### ACTION
sAction = sAction[m.end():].strip()
cAction = m.group(1)
if cAction == "-":
## error
iMsg = sAction.find(" # ")
sMsg = sAction[iMsg+3:].strip()
sAction = sAction[:iMsg].strip()
sURL = ""
mURL = re.search("[|] *(https?://.*)", sMsg)
if mURL:
sURL = mURL.group(1).strip()
sMsg = sMsg[:mURL.start(0)].strip()
if sMsg[0:1] == "=":
sMsg = prepare_for_eval(sMsg[1:])
FUNCTIONS.append(("m"+sIdAction, sMsg))
for x in re.finditer("group[(](\d+)[)]", sMsg):
if int(x.group(1)) > nGroup:
print("# error in groups in message at line " + sIdAction + " ("+str(nGroup)+" groups only)")
sMsg = "=m"+sIdAction
else:
for x in re.finditer(r"\\(\d+)", sMsg):
if int(x.group(1)) > nGroup:
print("# error in groups in message at line " + sIdAction + " ("+str(nGroup)+" groups only)")
if re.search("[.]\\w+[(]", sMsg):
print("# error in message at line " + sIdAction + ": This message looks like code. Line should begin with =")
if sAction[0:1] == "=" or cAction == "=":
if "define" in sAction and not re.search(r"define\(\\\d+ *, *\[.*\] *\)", sAction):
print("# error in action at line " + sIdAction + ": second argument for define must be a list of strings")
sAction = prepare_for_eval(sAction)
sAction = sAction.replace("m.group(i[4])", "m.group("+str(iGroup)+")")
for x in re.finditer("group[(](\d+)[)]", sAction):
if int(x.group(1)) > nGroup:
print("# error in groups in replacement at line " + sIdAction + " ("+str(nGroup)+" groups only)")
else:
for x in re.finditer(r"\\(\d+)", sAction):
if int(x.group(1)) > nGroup:
print("# error in groups in replacement at line " + sIdAction + " ("+str(nGroup)+" groups only)")
if re.search("[.]\\w+[(]", sAction):
print("# error in action at line " + sIdAction + ": This action looks like code. Line should begin with =")
if cAction == "-":
## error detected
if sAction[0:1] == "=":
FUNCTIONS.append(("s"+sIdAction, sAction[1:]))
sAction = "=s"+sIdAction
elif sAction.startswith('"') and sAction.endswith('"'):
sAction = sAction[1:-1]
return [sCondition, cAction, sAction, iGroup, sMsg, sURL]
elif cAction == "~":
## text preprocessor
if sAction[0:1] == "=":
if sAction[1:2] == "@":
FUNCTIONS.append(("p"+sIdAction, sAction[2:]))
sAction = "=@p"+sIdAction
else:
FUNCTIONS.append(("p"+sIdAction, sAction[1:]))
sAction = "=p"+sIdAction
return [sCondition, cAction, sAction, iGroup]
elif cAction == "=":
## disambiguator
if sAction[0:1] == "=":
sAction = sAction[1:]
FUNCTIONS.append(("d"+sIdAction, sAction))
sAction = "d"+sIdAction
return [sCondition, cAction, sAction]
else:
echo("# Unknown action at line " + sIdAction)
return None
def regex2js (sRegex):
"converts Python regex to JS regex and returns JS regex and list of negative lookbefore assertions"
# Latin letters: http://unicode-table.com/fr/
# 0-9
# A-Z
# a-z
# À-Ö 00C0-00D6 (upper case)
# Ø-ß 00D8-00DF (upper case)
# à-ö 00E0-00F6 (lower case)
# ø-ÿ 00F8-00FF (lower case)
# Ā-ʯ 0100-02AF (mixed)
# -> a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ
bCaseInsensitive = False
if "(?i)" in sRegex:
sRegex = sRegex.replace("(?i)", "")
bCaseInsensitive = True
lNegLookBeforeRegex = []
if WORDLIMITLEFT in sRegex:
sRegex = sRegex.replace(WORDLIMITLEFT, "")
lNegLookBeforeRegex = ["[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ.,–-]$"]
sRegex = sRegex.replace("[\\w", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ")
sRegex = sRegex.replace("\\w", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ]")
sRegex = sRegex.replace("[.]", r"\.")
if not sRegex.startswith("<js>"):
sRegex = sRegex.replace("/", r"\/")
m = re.search(r"\(\?<!([^()]+)\)", sRegex) # Negative lookbefore assertion should always be at the beginning of regex
if m:
lNegLookBeforeRegex.append(m.group(1)+"$")
sRegex = sRegex.replace(m.group(0), "")
if "(?<" in sRegex:
echo("# Warning. Lookbefore assertion not changed in:\n ")
echo(sRegex)
if sRegex.startswith("<js>"):
sRegex = sRegex.replace('<js>', '/').replace('</js>i', '/ig').replace('</js>', '/g')
else:
sRegex = "/" + sRegex + "/g"
if bCaseInsensitive and not sRegex.endswith("/ig"):
sRegex = sRegex + "i"
if not lNegLookBeforeRegex:
lNegLookBeforeRegex = None
return (sRegex, lNegLookBeforeRegex)
def pyRuleToJS (lRule):
lRuleJS = copy.deepcopy(lRule)
del lRule[-1] # tGroups positioning codes are useless for Python
# error messages
for aAction in lRuleJS[4]:
if aAction[1] == "-":
aAction[4] = aAction[4].replace("« ", "« ").replace(" »", " »")
# js regexes
lRuleJS[1], lNegLookBehindRegex = regex2js( JSREGEXES.get(lRuleJS[3], lRuleJS[1]) )
lRuleJS.append(lNegLookBehindRegex)
return lRuleJS
def writeRulesToJSArray_old (lRules):
s = "[\n"
for lRule in lRules:
# [sOption, sRegex, bCaseInsensitive, sRuleId, lActions, aGroups, aNegLookBehindRegex]
s += ' ["' + lRule[0] + '", ' if lRule[0] else " [false, "
s += lRule[1] + ", "
s += "true, " if lRule[2] else "false, "
s += '"' + lRule[3] + '", '
s += json.dumps(lRule[4], ensure_ascii=False) + ", "
s += json.dumps(lRule[5], ensure_ascii=False) + ", "
s += json.dumps(lRule[6], ensure_ascii=False) + "],\n"
s += "]"
return s
def writeRulesToJSArray (lRules):
s = "[\n"
for sOption, aRuleGroup in lRules:
s += ' ["' + sOption + '", [\n' if sOption else " [false, [\n"
for lRule in aRuleGroup:
# [sRegex, bCaseInsensitive, sRuleId, lActions, aGroups, aNegLookBehindRegex]
s += ' [' + lRule[0] + ", "
s += "true, " if lRule[1] else "false, "
s += '"' + lRule[2] + '", '
s += json.dumps(lRule[3], ensure_ascii=False) + ", "
s += json.dumps(lRule[4], ensure_ascii=False) + ", "
s += json.dumps(lRule[5], ensure_ascii=False) + "],\n"
s += " ]],\n"
s += "]"
return s
def groupsPositioningCodeToList (sGroupsPositioningCode):
if not sGroupsPositioningCode:
return None
return [ int(sCode) if sCode.isdigit() or (sCode[0:1] == "-" and sCode[1:].isdigit()) else sCode \
for sCode in sGroupsPositioningCode.split(",") ]
def displayStats (lRules):
print("{:>2} {:>18} {:>18} {:>18} {:>18}".format("#", "DISAMBIGUATOR", "TEXT PROCESSOR", "GRAMMAR CHECKING", "RULES"))
for i in range(len(lRules)):
d = {'=':0, '~': 0, '-': 0}
for aRule in lRules[i]:
for aAction in aRule[4]:
d[aAction[1]] = d[aAction[1]] + 1
print("{:>2} {:>18} {:>18} {:>18} {:>18}".format(i, d['='], d['~'], d['-'], len(lRules[i])))
def mergeRulesByOption (lRules):
"returns a list of tuples [option, list of rules] keeping the rules order"
lFinal = []
lTemp = []
sOption = None
for aRule in lRules:
if aRule[0] != sOption:
if sOption != None:
lFinal.append([sOption, lTemp])
# new tuple
sOption = aRule[0]
lTemp = []
lTemp.append(aRule[1:])
lFinal.append([sOption, lTemp])
return lFinal
def make (lRules, sLang, bJavaScript):
"compile rules"
# removing comments, zeroing empty lines, creating definitions
global DEF
lLine = []
lTest = []
for i, sLine in enumerate(lRules):
if sLine.startswith('#END'):
break
elif sLine.startswith("#"):
lLine.append("")
elif sLine.startswith("DEF:"):
m = re.match("DEF: +([a-zA-Z_][a-zA-Z_0-9]*) +(.+)$", sLine.strip())
if m:
DEF["{"+m.group(1)+"}"] = m.group(2)
else:
print("Error in definition: ", end="")
echo(sLine.strip())
lLine.append("")
elif sLine.startswith("TEST:"):
lTest.append("{:<8}".format(i+1) + " " + sLine[5:].lstrip())
lLine.append("")
elif sLine.startswith("TODO:"):
lLine.append("")
elif re.match("[ \t]+$", sLine):
lLine.append("")
else:
lLine.append(sLine)
# generating test files
with open("tests/"+sLang+"/gc_test.txt", "w", encoding="utf-8") as hDstPy, \
open("gc_lang/"+sLang+"/modules-js/tests_data.js", "w", encoding="utf-8") as hDstJS:
hDstPy.write("# TESTS FOR LANG ["+sLang+"]\n\n")
hDstJS.write("// JavaScript\n//Tests (generated code, do not edit)\n\nconst aData = [\n")
for sLine in lTest:
hDstPy.write(sLine)
hDstJS.write(' ' + json.dumps(sLine.strip(), ensure_ascii=False) +',\n')
hDstJS.write("];\nexports.aData = aData;\n")
# concatenate multiline commands
dLineIndex = {}
sStack = ""
for i in range(len(lLine)-1, -1, -1):
if lLine[i].startswith((" ", "\t")):
sStack = lLine[i].strip() + " " + sStack
del lLine[i]
elif lLine[i] == "":
del lLine[i]
else:
lLine[i] = lLine[i].strip() + " " + sStack
lLine[i] = lLine[i].strip()
dLineIndex[lLine[i]] = i + 1 # +1 because in text editor, line number begins by 1.
sStack = ""
# processing
bParagraph = True
lParagraphRules = []
lSentenceRules = []
lParagraphRulesJS = []
lSentenceRulesJS = []
for sLine in lLine:
if sLine:
if sLine == "[++]":
bParagraph = False
else:
lRule = createRule(sLine, dLineIndex[sLine], sLang, bParagraph)
if lRule:
if bParagraph:
lParagraphRules.append(lRule)
lParagraphRulesJS.append(pyRuleToJS(lRule))
else:
lSentenceRules.append(lRule)
lSentenceRulesJS.append(pyRuleToJS(lRule))
# creating file with all functions callable by rules
with open("gc_lang/"+sLang+"/modules/gc_tmp_eval.py", "w", encoding="utf-8") as hDstPy, \
open("gc_lang/"+sLang+"/modules-js/gc_tmp_eval.js", "w", encoding="utf-8") as hDstJS:
hDstPy.write("# generated code, do not edit\n")
hDstJS.write("// generated code, do not edit\nvar oEvalFunc = {\n")
for sFuncName, sReturn in FUNCTIONS:
cType = sFuncName[0:1]
if cType == "c": # condition
sParams = "s, sx, m, dDA, sCountry"
elif cType == "m": # message
sParams = "s, m"
elif cType == "s": # suggestion
sParams = "s, m"
elif cType == "p": # preprocessor
sParams = "s, m"
elif cType == "d": # disambiguator
sParams = "s, m, dDA"
else:
print("# Unknown function type in [" + sFuncName + "]")
continue
hDstPy.write("def {} ({}):\n".format(sFuncName, sParams))
hDstPy.write(" return " + sReturn + "\n")
hDstJS.write(" {}: function ({})".format(sFuncName, sParams) + " {\n")
hDstJS.write(" return " + py2js(sReturn) + ";\n")
hDstJS.write(" },\n")
hDstJS.write("}\n")
displayStats([lParagraphRules, lSentenceRules])
return { "paragraph_rules": mergeRulesByOption(lParagraphRules),
"sentence_rules": mergeRulesByOption(lSentenceRules),
"paragraph_rules_JS": writeRulesToJSArray(mergeRulesByOption(lParagraphRulesJS)),
"sentence_rules_JS": writeRulesToJSArray(mergeRulesByOption(lSentenceRulesJS)) }
| SamuelLongchamps/grammalecte | compile_rules.py | Python | gpl-3.0 | 24,371 |
"""Pygame event handler by J.
This module consists of the EventHandler class, which is used to assign
callbacks to events and keypresses in Pygame.
Release: 12.
Licensed under the GNU General Public License, version 3; if this was not
included, you can find it here:
http://www.gnu.org/licenses/gpl-3.0.txt
"""
# TODO:
# - match keys by event.unicode
# - ability to remove event/key/default handlers
# - joystick stuff
import sys
import pygame
MODE_HELD = 0
MODE_ONPRESS = 1
MODE_ONPRESS_REPEAT = 2
MODE_ONDOWN = 3
MODE_ONDOWN_REPEAT = 4
def quit (event):
pygame.quit()
sys.exit()
class EventHandler:
"""Assign callbacks to events and keypresses.
EventHandler(event_handlers = {}, key_handlers = [], suppress_quit = False,
quit_handler = evthandler.quit[, default_cbs],
ignore_locks = True)
event_handlers: (event.type: callbacks) dict.
key_handlers: list of (keys, callbacks, mode) tuples, where:
- keys is a list of (key_ID, mods, exact) tuples or key_ID ints, where:
- key_ID is as used in pygame.
- mods is a modifier bitmask or list of modifier bitmasks to match as
well. 'Matching' a bitmask is having any key it 'contains' pressed;
passing a list does an AND-type comparison, where we check for a
match against every bitmask in the list.
- exact is a bool, determining whether to match the modifiers exactly
(otherwise, it's a match if other modifiers are held as well).
Passing a key_ID is like passing (key_ID, 0, False).
- mode is one of those defined in this module. *_REPEAT modes require two
more arguments in each tuple, both integers greater than 0:
- initial_delay, the number of frames the key must be held down for
until it starts repeating.
- repeat_delay, the number of frames between repeats.
suppress_quit: don't exit (call quit_handler) on a pygame.QUIT event.
quit_handler: handler to attach to pygame.QUIT events; the default function
calls pygame.quit and sys.exit. This is ignored if suppress_quit
is True.
default_cbs: callbacks to call for events with no registered event handlers.
ignore_locks: whether to ignore num lock and caps lock when matching modifiers
for key handlers with exact = True.
In all cases, callbacks is a list of (callback, args) tuples, where args is a
list of arguments to pass to the callback (after any compulsory arguments).
(callback, args) can be reduced to callback if args is empty, and the whole
list can be reduced to just a callback if there's only one and its args list is
empty.
Event callbacks (includes those in default_cbs) take the event as an argument.
Key callbacks take three arguments:
- key_ID or the (key_ID, mods, exact) tuple as passed.
- the type of key event: -1 if the key is being held down, 0 if it was
pressed, 1 if released, 2 if this is a repeat call (simulated keypress).
(This means that for some modes, this argument is always the same.)
- the key modifiers being held at the time of the keypress/release/currently.
(This is a bitmask that can be compared to the pygame.KMOD_* constants.)
The available modes and the extra arguments needed in the key_handler entry are
as follows:
MODE_HELD: the key is currently being held down.
MODE_ONPRESS: the key was pressed or released since the last check.
MODE_ONPRESS_REPEAT: as MODE_ONPRESS, but call the callback repeatedly when
held down for some time.
MODE_ONDOWN: the key was pressed since the last check.
MODE_ONDOWN_REPEAT: as MODE_ONDOWN, but call the callback repeatedly when
held down for some time.
Frames, here, are the number of calls to EventHandler.update.
Note that the callbacks associated with any given key are not called more than
once per frame, even if the key is pressed more than once in the last frame
(could happen with a mode other than MODE_HELD).
METHODS
add_event_handlers
add_key_handlers
update
ATTRIBUTES
event_handlers: (event.type: callbacks) dict of registered event handlers.
default_cbs: callbacks for unhandled events.
key_handlers: (keycode: data) dict of registered key handlers, where data is a
(key_data: callbacks) dict and key_data is keycode or
(keycode, mods, exact) as given.
keys_down: keys pressed between the last two calls to update.
keys_up: keys released between the last two calls to update.
keys_pressed: keys held down at the time of the last call to update.
key_mods: the return value from pygame.key.get_mods at the time of the last
call to update.
events_active: whether event handlers are called.
keys_active: whether key handlers are called.
defaults_active: whether default handlers are called.
"""
def __init__ (self, event_handlers = {}, key_handlers = [],
suppress_quit = False, quit_handler = quit,
default_cbs = None, ignore_locks = True):
self.event_handlers = {}
self.add_event_handlers(event_handlers)
self.key_handlers = {}
self._keys_handled = [set(), set(), set(), set(), set()]
self.add_key_handlers(key_handlers)
self.default_cbs = []
if default_cbs is not None:
self.add_default_cbs(default_cbs)
if not suppress_quit:
self.add_event_handlers({pygame.QUIT: quit_handler})
self._ignore_locks = ignore_locks
self.keys_down = set()
self.keys_up = set()
self.keys_pressed = set()
self.key_mods = 0
self.repeat_count = {}
self.events_active = self.keys_active = self.defaults_active = True
def _clean_cbs (self, cbs):
# expand shorthand callback arguments
if hasattr(cbs, '__call__'):
cbs = [cbs]
return [(cb, ()) if hasattr(cb, '__call__') else cb for cb in cbs]
def _call_cbs (self, cbs, *args):
# call callbacks in list of accepted format
args = tuple(args)
for cb, extra_args in cbs:
extra_args = tuple(extra_args)
cb(*(args + extra_args))
def _call_key_cbs (self, cbs, key_data, press_type, current_mods):
# call key callbacks in list of accepted format if modifiers match
if isinstance(key_data, int):
# just got a key ID
key, mods, exact = (key_data, 0, False)
else:
# got (key_ID, mods, exact)
key, mods, exact = key_data
# check mods match
if isinstance(mods, int):
mods = (mods,)
mods = set(mods)
# check all wanted mods are currently pressed
match = all(mod == 0 or mod & current_mods for mod in mods)
if exact and match:
# 'subtracting' mods from current_mods gives 0 if current_mods
# 'contains' no other mods
subtract = list(mods)
if self._ignore_locks:
subtract += [pygame.KMOD_CAPS, pygame.KMOD_NUM]
match = current_mods & reduce(int.__or__, subtract)
match = (current_mods - match) == 0
if match:
self._call_cbs(cbs, key_data, press_type, current_mods)
def _call_all_cbs (self, key, press_type, modes, mods):
# call all callbacks for a key
for key_data, cb_data_sets in self.key_handlers[key].iteritems():
for cb_data in cb_data_sets:
if cb_data[1] in modes:
self._call_key_cbs(cb_data[0], key_data, press_type, mods)
def add_event_handlers (self, event_handlers):
"""Add more event handlers.
Takes an event_handlers argument in the same form as expected by the
constructor.
"""
for e, cbs in event_handlers.iteritems():
cbs = self._clean_cbs(cbs)
try:
self.event_handlers[e] += cbs
except KeyError:
self.event_handlers[e] = cbs
def add_key_handlers (self, key_handlers):
"""Add more key handlers.
Takes a key_handlers argument in the same form as expected by the constructor.
"""
for x in key_handlers:
keys, cbs, mode = x[:3]
cbs = self._clean_cbs(cbs)
args = list(x[3:])
for data in keys:
if isinstance(data, int):
# just got a key ID
k = data
else:
# got (key_ID, mods, exact)
k = data[0]
if k not in self.key_handlers:
self.key_handlers[k] = {}
if data not in self.key_handlers[k]:
self.key_handlers[k][data] = [[cbs] + [mode] + args]
else:
self.key_handlers[k][data].append([cbs] + [mode] + args)
self._keys_handled[mode].add(k)
def add_default_cbs (self, cbs):
"""Add more default event callbacks.
Takes a cbs argument in the same form as the default_cbs argument expected by
the constructor.
"""
self.default_cbs += self._clean_cbs(cbs)
def update (self):
"""Go through the event queue and call callbacks.
Call this every frame.
"""
events_active = self.events_active
keys_active = self.keys_active
defaults_active = self.defaults_active
self.keys_down = set()
down_mods = {}
self.keys_up = set()
up_mods = {}
pressed_mods = pygame.key.get_mods()
# call event callbacks and compile keypresses
for event in pygame.event.get():
if event.type in self.event_handlers:
cbs = self.event_handlers[event.type]
# call callbacks registered for this event type
if events_active:
self._call_cbs(cbs, event)
else:
# call default callbacks
if defaults_active:
self._call_cbs(self.default_cbs, event)
if event.type in (pygame.KEYDOWN, pygame.KEYUP):
# keep track of pressed and released keys
if event.type == pygame.KEYDOWN:
self.keys_down.add(event.key)
down_mods[event.key] = event.mod
else:
self.keys_up.add(event.key)
up_mods[event.key] = event.mod
pressed = pygame.key.get_pressed()
# form some reason this is faster than set(genexpr)
self.keys_pressed = set([i for i in xrange(len(pressed)) if pressed[i]])
# update repeated key counts
held = (self._keys_handled[2] | self._keys_handled[4]) & self.keys_pressed
for k in set(self.repeat_count) - held:
# no longer being held
del self.repeat_count[k]
for k in held:
if k in self.repeat_count:
self.repeat_count[k] += 1
else:
self.repeat_count[k] = 0
# call key callbacks
if keys_active:
for k in self._keys_handled[0] & self.keys_pressed:
self._call_all_cbs(k, -1, (0,), pressed_mods)
temp = self._keys_handled[1] | self._keys_handled[2]
called = set()
for k in (temp | self._keys_handled[3] | self._keys_handled[4]) & self.keys_down:
called.add(k)
self._call_all_cbs(k, 0, (1, 2, 3, 4), down_mods[k])
for k in temp & self.keys_up:
self._call_all_cbs(k, 1, (1, 2), up_mods[k])
# keys might have callbacks with different repeat delays/rates, so
# need to check each set of callbacks individually
for k, count in self.repeat_count.iteritems():
if k in called:
continue
for key_data, cb_data in self.key_handlers[k].iteritems():
for cb_datum in cb_data:
try:
cbs, mode, initial, repeat = cb_datum
except ValueError:
# a key might be used for both repeating and not
# repeating modes, and both uses will end up here
continue
if count >= initial and (count - initial) % repeat == 0:
self._call_key_cbs(cbs, key_data, 2, pressed_mods)
| ikn/brjaf | brjaf/ext/evthandler.py | Python | gpl-3.0 | 12,404 |
#!/usr/bin/env python
import os
import argparse
from os.path import join, abspath
from sh import which
import sleuth_automation as sleuth
from jinja2 import Environment, PackageLoader
description = """
This script will create a condor submit file for a batch of SLEUTH runs.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--sleuth_path', required=True,
help='path to SLEUTH directory')
parser.add_argument('--region_dir',
required=True,
help='path to region dir containing locations')
parser.add_argument('--mpi_cores', default=0, type=int,
help="""number of cores available for MPI,
if 0 (default) don't use mpi""")
parser.add_argument('--predict_end', type=int, required=True,
help='ending year of prediction interval')
parser.add_argument('--montecarlo_iterations', type=int, default=50,
help='monte carlo iterations')
args = parser.parse_args()
if args.mpi_cores > 0:
sleuth.configure(sleuth_path=args.sleuth_path,
use_mpi=True, mpi_cores=args.mpi_cores)
else:
sleuth.configure(sleuth_path=args.sleuth_path,
use_mpi=False)
r = sleuth.Region(region_dir=args.region_dir,
predict_end=args.predict_end,
monte_carlo_iterations=args.montecarlo_iterations)
print("wrote " + r.build())
| sostenibilidad-unam/sleuth_automation | bin/create_sleuth_condor_batch.py | Python | gpl-3.0 | 1,461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import imp
from django.core.management.base import BaseCommand
from optparse import make_option
from django.core.paginator import Paginator
from foo.offset.models import Log
from foo.july.models import BigBook
import logging
from datetime import datetime
from django.db import connection
class Command(BaseCommand):
help = 'Import datas'
def handle(self, *args, **options):
"""
Use prepared query on july_bigbook
"""
key = 'keypage_prepare_fields'
log = Log.objects.create(name=key,
start=datetime.now(),
stop=datetime.now())
nb = 0
keyid = 0
cursor = connection.cursor()
try:
cursor.execute('DEALLOCATE preptwo')
except:
pass
qry = " ".join(["PREPARE preptwo (integer) AS ",
"SELECT keyid,nbpages FROM july_bigbook",
"WHERE serie= 3 AND keyid > $1",
"ORDER BY keyid ASC LIMIT 250"])
try:
cursor.execute(qry)
except:
pass
while True:
cursor.execute('EXECUTE preptwo (%s)' % (keyid))
books = cursor.fetchall()
for book in books:
keyid = book[0]
# do want you want here
if book[1] > 500:
nb = nb + 1
if len(books) < 250:
break
log.stop = datetime.now()
log.save()
print key, log.stop - log.start, nb
| rodo/django-perf | foo/offset/management/commands/keypage_prepare_fields.py | Python | gpl-3.0 | 2,395 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2018-12-04 15:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('anagrafica', '0049_auto_20181028_1639'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('required', models.BooleanField(default=True, verbose_name='Obbligatorio')),
],
options={
'verbose_name': 'Domanda',
'verbose_name_plural': 'Domande',
},
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('text', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Questionario di gradimento',
'verbose_name_plural': 'Questionari di gradimento',
},
),
migrations.CreateModel(
name='SurveyResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('response', models.TextField(blank=True, max_length=1000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Question')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='anagrafica.Persona')),
],
options={
'verbose_name': "Risposta dell'utente",
'verbose_name_plural': 'Risposte degli utenti',
},
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey'),
),
]
| CroceRossaItaliana/jorvik | survey/migrations/0001_initial.py | Python | gpl-3.0 | 2,602 |
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | 01001110 01100101 01110100 01111010 01101111 01100010 |
# | |
# | Netzob : Inferring communication protocols |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : http://www.netzob.org |
# | @contact : [email protected] |
# | @sponsors : Amossys, http://www.amossys.fr |
# | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | File contributors : |
# | - Georges Bossert <georges.bossert (a) supelec.fr> |
# | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Standard library imports |
# +---------------------------------------------------------------------------+
import random
import os
from bitarray import bitarray
# +---------------------------------------------------------------------------+
# | Related third party imports |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Local application imports |
# +---------------------------------------------------------------------------+
from netzob.Common.Models.Types.AbstractType import AbstractType
class Raw(AbstractType):
"""Raw netzob data type expressed in bytes.
For instance, we can use this type to parse any raw field of 2 bytes:
>>> from netzob.all import *
>>> f = Field(Raw(nbBytes=2))
or with a specific value (default is little endianness)
>>> f = Field(Raw('\x01\x02\x03'))
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
>>> f.domain.dataType.endianness = AbstractType.ENDIAN_BIG
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
"""
def __init__(self, value=None, nbBytes=None, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
if value is not None and not isinstance(value, bitarray):
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
value = TypeConverter.convert(value, Raw, BitArray)
nbBits = self._convertNbBytesinNbBits(nbBytes)
super(Raw, self).__init__(self.__class__.__name__, value, nbBits, unitSize=unitSize, endianness=endianness, sign=sign)
def __str__(self):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
from netzob.Common.Models.Types.HexaString import HexaString
return "{0}={1} ({2})".format(self.typeName, repr(TypeConverter.convert(self.value, BitArray, Raw)), self.size)
else:
return "{0}={1} ({2})".format(self.typeName, self.value, self.size)
def __repr__(self):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
return str(TypeConverter.convert(self.value, BitArray, self.__class__))
else:
return str(self.value)
def _convertNbBytesinNbBits(self, nbBytes):
nbMinBit = None
nbMaxBit = None
if nbBytes is not None:
if isinstance(nbBytes, int):
nbMinBit = nbBytes * 8
nbMaxBit = nbMinBit
else:
if nbBytes[0] is not None:
nbMinBit = nbBytes[0] * 8
if nbBytes[1] is not None:
nbMaxBit = nbBytes[1] * 8
return (nbMinBit, nbMaxBit)
def generate(self, generationStrategy=None):
"""Generates a random Raw that respects the requested size.
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10))
>>> gen = a.generate()
>>> print len(gen)
80
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10, 20))
>>> gen = a.generate()
>>> print 10<=len(gen) and 20<=len(gen)
True
"""
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
minSize, maxSize = self.size
if maxSize is None:
maxSize = AbstractType.MAXIMUM_GENERATED_DATA_SIZE
if minSize is None:
minSize = 0
generatedSize = random.randint(minSize, maxSize)
return TypeConverter.convert(os.urandom(generatedSize / 8), Raw, BitArray)
@staticmethod
def decode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def encode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def canParse(data):
"""Computes if specified data can be parsed as raw which is always the case if the data is at least 1 length and aligned on a byte.
>>> from netzob.all import *
>>> Raw.canParse(TypeConverter.convert("hello netzob", ASCII, BitArray))
True
The ascii table is defined from 0 to 127:
>>> Raw.canParse(TypeConverter.convert(128, Decimal, BitArray, src_sign=AbstractType.SIGN_UNSIGNED))
True
:param data: the data to check
:type data: python raw
:return: True if data can be parsed as a Raw which is always the case (if len(data)>0)
:rtype: bool
:raise: TypeError if the data is None
"""
if data is None:
raise TypeError("data cannot be None")
if len(data) == 0:
return False
if len(data) % 8 != 0:
return False
return True
| dasbruns/netzob | src/netzob/Common/Models/Types/Raw.py | Python | gpl-3.0 | 7,876 |
import libhfst
transducers = []
istr = libhfst.HfstInputStream()
while not istr.is_eof():
transducers.append(istr.read())
istr.close()
if not len(transducers) == 3:
raise RuntimeError('Wrong number of transducers read.')
i = 0
for re in ['föö:bär','0','0-0']:
if not transducers[i].compare(libhfst.regex(re)):
raise RuntimeError('Transducers are not equivalent.')
i += 1
if len(transducers) > 0:
f = libhfst.hfst_stdout()
i=0
transducers[i].write_att(f)
i += 1
while i < len(transducers):
f.write('--\n')
transducers[i].write_att(f)
i += 1
| unhammer/hfst3 | python/test/test_streams_2.py | Python | gpl-3.0 | 615 |
# coding: utf-8
from gui import *
import thread
import color
import time
import os
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import YaoSpeech
from dialogue import *
class dialogue_thread(QThread):
def __init__(self):
QThread.__init__(self)
self.voiceface = VoiceInterface()
self.voiceface.hear_display = self.display
self.voiceface.hear_voice = self.talk
self.dialogue = DialogueManager(self.voiceface, DialogueModelTagging())
def run(self):
self.dialogue.run()
return None
def listen(self):
YaoSpeech.listen()
print "YaoSpeech: listening..."
def recognize(self):
result = YaoSpeech.recognize()
if result == None or result == '':
return
result = result.decode('utf-8')
print "speech recognized: ", result
self.emit(QtCore.SIGNAL('display(QString, QString)'), u'Human: '+result, '#bb2244')
self.voiceface.addMessage(result)
print "YaoSpeech: recognizing..."
def display(self, msg):
self.emit(QtCore.SIGNAL('display(QString, QString)'), u'Machine: '+msg, '#00aa00')
def talk(self, msg):
msg = msg.replace(u'\'', u'\\\'')
msg = msg.replace(u'\n', u'\\\n')
msg = msg.replace(u'\t', u'\n')
print msg
YaoSpeech.speak((u'\"'+msg+u'\"').encode('utf-8'))
def main():
app = QtGui.QApplication(sys.argv)
gui = GUI()
thread = QThread()
dialogue = dialogue_thread()
gui.connect(dialogue, QtCore.SIGNAL("display(QString, QString)"), gui.display)
dialogue.connect(gui, QtCore.SIGNAL('listen()'), dialogue.listen)
dialogue.connect(gui, QtCore.SIGNAL('recognize()'), dialogue.recognize)
dialogue.start()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | strin/yaotalk | main.py | Python | gpl-3.0 | 1,714 |
from django.shortcuts import render
from .models import GeoLocation
from django.http import HttpResponse
def log_location(request):
"""
:params
:lat - latitude
:lon - longitude
:user_agent - useful for IOT applications that needs to log the client
that send the location
"""
if request.method == 'GET':
user_agent = request.GET.get('user_agent','test')
try:
lat = request.GET['lat']
lon = request.GET['lon']
GeoLocation.objects.create(user_agent=user_agent,lat=lat,lon=lon)
except:
return HttpResponse(0, status=500)
return HttpResponse(1, status=200)
| abhijithmannath/ml-ceeri | geo/views.py | Python | gpl-3.0 | 594 |
import io
import os
import pickle
import sys
import tempfile
import unittest
import numpy as np
import pystan
from pystan.tests.helper import get_model
from pystan.experimental import unpickle_fit
class TestPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pickle_file = os.path.join(tempfile.mkdtemp(), 'stanmodel.pkl')
cls.model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
def test_pickle_model(self):
pickle_file = self.pickle_file
model_code = self.model_code
m = pystan.StanModel(model_code=model_code, model_name="normal2")
module_name = m.module.__name__
module_filename = m.module.__file__
with open(pickle_file, 'wb') as f:
pickle.dump(m, f)
del m
del sys.modules[module_name]
with open(pickle_file, 'rb') as f:
m = pickle.load(f)
self.assertTrue(m.model_name.startswith("normal2"))
self.assertIsNotNone(m.module)
if not sys.platform.startswith('win'):
# will fail on Windows
self.assertNotEqual(module_filename, m.module.__file__)
fit = m.sampling()
y = fit.extract()['y']
assert len(y) == 4000
def test_pickle_fit(self):
model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
sm = pystan.StanModel(model_code=model_code, model_name="normal1")
# additional error checking
fit = sm.sampling(iter=100)
y = fit.extract()['y'].copy()
self.assertIsNotNone(y)
# pickle
pickled_model = pickle.dumps(sm)
module_name = sm.module.__name__
del sm
pickled_fit = pickle.dumps(fit)
del fit
# unload module
if module_name in sys.modules:
del(sys.modules[module_name])
# load from file
sm_from_pickle = pickle.loads(pickled_model)
fit_from_pickle = pickle.loads(pickled_fit)
self.assertIsNotNone(fit_from_pickle)
self.assertTrue((fit_from_pickle.extract()['y'] == y).all())
def test_pickle_model_and_reload(self):
pickle_file = self.pickle_file
pickle_file2 = os.path.join(tempfile.mkdtemp(), 'stanmodel.pkl')
model_code = self.model_code
model = pystan.StanModel(model_code=model_code, model_name="normal1")
with open(pickle_file, 'wb') as f:
pickle.dump(model, f)
with open(pickle_file2, 'wb') as f:
pickle.dump(model, f)
del model
with open(pickle_file, 'rb') as f:
model_from_pickle = pickle.load(f)
self.assertIsNotNone(model_from_pickle.sampling(iter=100).extract())
with open(pickle_file2, 'rb') as f:
model_from_pickle = pickle.load(f)
self.assertIsNotNone(model_from_pickle.sampling(iter=100).extract())
def test_model_unique_names(self):
model_code = self.model_code
model1 = pystan.StanModel(model_code=model_code, model_name="normal1")
model2 = pystan.StanModel(model_code=model_code, model_name="normal1")
self.assertNotEqual(model1.module_name, model2.module_name)
class TestPickleFitOnly(unittest.TestCase):
@classmethod
def setUpClass(cls):
model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
model = get_model("standard_normal_model",
model_code, model_name="normal1",
verbose=True, obfuscate_model_name=False)
fit = model.sampling()
tempfolder = tempfile.mkdtemp()
cls.pickle_fit = os.path.join(tempfolder, 'stanfit.pkl')
cls.pickle_extract = os.path.join(tempfolder, 'stanextract.pkl')
with io.open(cls.pickle_fit, mode="wb") as f:
pickle.dump(fit, f)
with io.open(cls.pickle_extract, mode="wb") as f:
pickle.dump(fit.extract(), f)
module_name = model.module.__name__
del model
del sys.modules[module_name]
@unittest.expectedFailure
def test_unpickle_fit_fail(self):
with io.open(self.pickle_file, "rb") as f:
pickle.load(f)
def test_load_fit(self):
fit, model = unpickle_fit(self.pickle_fit, open_func=io.open, open_kwargs={"mode" : "rb"}, return_model=True)
self.assertIsNotNone(fit)
self.assertIsNotNone(model)
self.assertIsNotNone(fit.extract())
self.assertTrue("y" in fit.extract())
with io.open(self.pickle_extract, "rb") as f:
extract = pickle.load(f)
self.assertTrue(np.all(fit.extract()["y"] == extract["y"]))
| ariddell/pystan | pystan/tests/test_pickle.py | Python | gpl-3.0 | 4,607 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
from django.core.validators import validate_email, ValidationError
from slugify import slugify
from django.utils.translation import ugettext as _
from modeltranslation.forms import TranslationModelForm
from django.contrib.auth import get_user_model
from geonode.groups.models import GroupProfile
class GroupForm(TranslationModelForm):
slug = forms.SlugField(
max_length=20,
help_text=_("a short version of the name consisting only of letters, numbers, underscores and hyphens."),
widget=forms.HiddenInput,
required=False)
def clean_slug(self):
if GroupProfile.objects.filter(
slug__iexact=self.cleaned_data["slug"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that slug."))
return self.cleaned_data["slug"].lower()
def clean_title(self):
if GroupProfile.objects.filter(
title__iexact=self.cleaned_data["title"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
def clean(self):
cleaned_data = self.cleaned_data
name = cleaned_data.get("title")
slug = slugify(name)
cleaned_data["slug"] = slug
return cleaned_data
class Meta:
model = GroupProfile
exclude = ['group']
class GroupUpdateForm(forms.ModelForm):
def clean_name(self):
if GroupProfile.objects.filter(
name__iexact=self.cleaned_data["title"]).count() > 0:
if self.cleaned_data["title"] == self.instance.name:
pass # same instance
else:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
class Meta:
model = GroupProfile
exclude = ['group']
class GroupMemberForm(forms.Form):
role = forms.ChoiceField(choices=[
("member", "Member"),
("manager", "Manager"),
])
user_identifiers = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'user-select'}))
def clean_user_identifiers(self):
value = self.cleaned_data["user_identifiers"]
new_members, errors = [], []
for ui in value.split(","):
ui = ui.strip()
try:
validate_email(ui)
try:
new_members.append(get_user_model().objects.get(email=ui))
except get_user_model().DoesNotExist:
new_members.append(ui)
except ValidationError:
try:
new_members.append(
get_user_model().objects.get(
username=ui))
except get_user_model().DoesNotExist:
errors.append(ui)
if errors:
message = (
"The following are not valid email addresses or "
"usernames: %s; not added to the group" %
", ".join(errors))
raise forms.ValidationError(message)
return new_members
class GroupInviteForm(forms.Form):
invite_role = forms.ChoiceField(label="Role", choices=[
("member", "Member"),
("manager", "Manager"),
])
invite_user_identifiers = forms.CharField(
label="E-mail addresses list",
widget=forms.Textarea)
def clean_user_identifiers(self):
value = self.cleaned_data["invite_user_identifiers"]
invitees, errors = [], []
for ui in value.split(","):
ui = ui.strip()
try:
validate_email(ui)
try:
invitees.append(get_user_model().objects.get(email=ui))
except get_user_model().DoesNotExist:
invitees.append(ui)
except ValidationError:
try:
invitees.append(get_user_model().objects.get(username=ui))
except get_user_model().DoesNotExist:
errors.append(ui)
if errors:
message = (
"The following are not valid email addresses or "
"usernames: %s; no invitations sent" %
", ".join(errors))
raise forms.ValidationError(message)
return invitees
| ingenieroariel/geonode | geonode/groups/forms.py | Python | gpl-3.0 | 5,314 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# === This file is part of RateItSeven ===
#
# Copyright 2015, Paolo de Vathaire <[email protected]>
#
# RateItSeven is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RateItSeven is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RateItSeven. If not, see <http://www.gnu.org/licenses/>.
#
import guessit
from rateItSeven.scan.legacy.filescanner import FileScanner
from rateItSeven.scan.legacy.containers.movieguess import MovieGuess
class MovieScanner(object):
"""
Scan file system directories for video files
Find info for each file wrapped into a MovieGuess
"""
def __init__(self, dir_paths: list):
self.dir_paths = dir_paths
def list_movies(self):
return self.list_videos_in_types(["movie"])
def list_episodes(self):
return self.list_videos_in_types(["episode"])
def list_videos_in_types(self, video_types):
file_scanner = FileScanner(self.dir_paths)
for abs_path in file_scanner.absolute_file_paths():
guess = MovieGuess(guessit.guessit(abs_path), abs_path)
if guess.is_video_in_types(video_types):
yield guess
| AltarBeastiful/rateItSeven | rateItSeven/scan/legacy/moviescanner.py | Python | gpl-3.0 | 1,660 |
# -*- coding: utf-8 -*-
'''
@author: Hung-Hsin Chen
@mail: [email protected]
kelly multivariate investment
'''
if __name__ == '__main__':
pass | chenhh/PySPPortfolio | PySPPortfolio/KellySPPortfolio.py | Python | gpl-3.0 | 159 |
"""Database interface module.
app/db.py
"""
# standard imports
import os
import sqlite3
from sqlite3 import Error
from ast import literal_eval
# 3rd party imports
from termcolor import cprint
# local imports
from app.room import Office, Living
from app.person import Staff, Fellow
def create_connection(database):
"""Create a database connection to a given db."""
try:
if not os.path.exists(database):
print('{0} database does not exist'.format(database))
else:
conn = sqlite3.connect(database)
return conn
except Error as e:
print('An error occurred: {0}'.format(e.args[0]))
def load_schema(db, db_schema='databases/amity_default.sql'):
"""Create database structure."""
try:
if not os.path.exists(db):
raise Exception('Database {0} does not exist'.format(db))
if not os.path.exists(db_schema):
raise Exception('Schema {0} does not exist'.format(db_schema))
except Exception as e:
return e
else:
with sqlite3.connect(db) as conn:
cur = conn.cursor()
with open(db_schema, 'rt') as f:
schema = f.read()
cur.executescript(schema)
def save_office(dictoffice, cur):
"""Save office rooms data into database table offices."""
# check for data existence
try:
if dictoffice:
cur.execute('''SELECT COUNT(*) FROM offices''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM offices''')
# save current records
for obj in list(dictoffice.values()):
cur.execute("""INSERT INTO offices(id, name, type, occupants,
MAX_CAPACITY)
VALUES(?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.type_, str(obj.occupants),
obj.MAX_CAPACITY))
except Error as e:
print('Error: {0}'.format(e))
def load_office(dictoffice, cur):
"""Load office rooms data to application."""
cur.execute('''SELECT COUNT(*) FROM offices''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM offices''')
records = cur.fetchall()
for record in records:
dictoffice[record[1]] = Office(record[1], record[0],
literal_eval(record[3]))
cprint('offices data loaded successfully.', 'green')
def save_living(dictliving, cur):
"""Save living rooms data into database."""
# check for data existence
try:
if dictliving:
cur.execute('''SELECT COUNT(*) FROM livingspaces''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM livingspaces''')
# save current records
for obj in list(dictliving.values()):
cur.execute("""INSERT INTO livingspaces(id, name, type,
occupants, MAX_CAPACITY)
VALUES(?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.type_, str(obj.occupants),
obj.MAX_CAPACITY))
except Error as e:
print('Error: {0}'.format(e))
def load_living(dictliving, cur):
"""Load living rooms to application."""
cur.execute('''SELECT COUNT(*) FROM livingspaces''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM livingspaces''')
records = cur.fetchall()
for record in records:
dictliving[record[1]] = Living(record[1], record[0],
literal_eval(record[3]))
cprint('Living rooms data loaded successfully.', 'green')
def save_staff(dictstaff, cur):
"""Save staff persons data into database."""
# check for data existence
try:
if dictstaff:
cur.execute('''SELECT COUNT(*) FROM staff''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM staff''')
# save current records
for obj in list(dictstaff.values()):
cur.execute("""INSERT INTO staff(id, name, type, office_space)
VALUES(?, ?, ?, ?)""",
(obj.id, obj.name, obj.role, obj.office_space))
except Error as e:
print('Error: {0}'.format(e))
def load_staff(dictstaff, cur):
"""Load staff to application."""
cur.execute('''SELECT COUNT(*) FROM staff''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM staff''')
records = cur.fetchall()
for record in records:
dictstaff[record[1]] = Staff(record[1], record[0], record[3])
cprint('staff data loaded successfully.', 'green')
def save_fellow(dictfellow, cur):
"""Save fellow persons data into database."""
# check for data existence
try:
if dictfellow:
cur.execute('''SELECT COUNT(*) FROM fellows''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM fellows''')
# save current records
for obj in list(dictfellow.values()):
cur.execute("""INSERT INTO fellows(id, name, type,
office_space, living_space, accommodation)
VALUES(?, ?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.role, obj.office_space,
obj.living_space, obj.accommodation))
except Exception as e:
print('Error: {0}'.format(e))
def load_fellow(dictfellow, cur):
"""Load staff to application."""
cur.execute('''SELECT COUNT(*) FROM fellows''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM fellows''')
records = cur.fetchall()
for record in records:
dictfellow[record[1]] = Fellow(record[1], record[0], record[3],
record[5], record[4])
cprint('Fellows data loaded successfully.', 'green')
| Mbarak-Mbigo/cp1_project | app/db.py | Python | gpl-3.0 | 6,719 |
# -*- coding: utf-8 -*-
"""
Django settings for app-framework project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import sys
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import *
# ==============================================================================
# 应用基本信息配置 (请按照说明修改)
# ==============================================================================
# 在蓝鲸智云开发者中心 -> 点击应用ID -> 基本信息 中获取 APP_ID 和 APP_TOKEN 的值
APP_ID = 'hello-world'
APP_TOKEN = 'c52de43b-ef43-49b2-8268-b53c5271750a'
# 蓝鲸智云开发者中心的域名,形如:http://paas.bking.com
BK_PAAS_HOST = 'http://paas.bking.com'
# 是否启用celery任务
IS_USE_CELERY = True
# 本地开发的 celery 的消息队列(RabbitMQ)信息
BROKER_URL_DEV = 'amqp://guest:[email protected]:5672/'
# TOCHANGE 调用celery任务的文件路径, List of modules to import when celery starts.
CELERY_IMPORTS = (
'home_application.celery_tasks',
)
# ==============================================================================
# 应用运行环境配置信息
# ==============================================================================
ENVIRONMENT = os.environ.get('BK_ENV', 'development')
# 应用基本信息从环境变量中获取,未设置环境变量(如:本地开发)时,则用用户在文件开头的填写的值
APP_ID = os.environ.get('APP_ID', APP_ID)
APP_TOKEN = os.environ.get('APP_TOKEN', APP_TOKEN)
BK_PAAS_HOST = os.environ.get('BK_PAAS_HOST', BK_PAAS_HOST)
# 应用访问路径
SITE_URL = '/'
# 运行模式, DEVELOP(开发模式), TEST(测试模式), PRODUCT(正式模式)
RUN_MODE = 'DEVELOP'
if ENVIRONMENT.endswith('production'):
RUN_MODE = 'PRODUCT'
DEBUG = False
SITE_URL = '/o/%s/' % APP_ID
elif ENVIRONMENT.endswith('testing'):
RUN_MODE = 'TEST'
DEBUG = False
SITE_URL = '/t/%s/' % APP_ID
else:
RUN_MODE = 'DEVELOP'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
try:
import pymysql
pymysql.install_as_MySQLdb()
except:
pass
# ===============================================================================
# 应用基本信息
# ===============================================================================
# 应用密钥
SECRET_KEY = 'MQtd_0cw&AiY5jT&&#w7%9sCK=HW$O_e%ch4xDd*AaP(xU0s3X'
# CSRF的COOKIE域,默认使用当前域
# CSRF_COOKIE_DOMAIN =''
CSRF_COOKIE_PATH = SITE_URL
ALLOWED_HOSTS = ['*']
# ==============================================================================
# Middleware and apps
# ==============================================================================
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'account.middlewares.LoginMiddleware', # 登录鉴权中间件
'common.middlewares.CheckXssMiddleware', # Xss攻击处理中间件
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# OTHER 3rd Party App
'app_control',
'account',
'home_application',
)
# ==============================================================================
# Django 项目配置
# ==============================================================================
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# 项目路径
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT, PROJECT_MODULE_NAME = os.path.split(PROJECT_PATH)
BASE_DIR = os.path.dirname(os.path.dirname(PROJECT_PATH))
PYTHON_BIN = os.path.dirname(sys.executable)
# ===============================================================================
# 静态资源设置
# ===============================================================================
# 静态资源文件(js,css等)在应用上线更新后, 由于浏览器有缓存, 可能会造成没更新的情况.
# 所以在引用静态资源的地方,都需要加上这个版本号,如:<script src="/a.js?v=${STATIC_VERSION}"></script>;
# 如果静态资源修改了以后,上线前修改这个版本号即可
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATIC_VERSION = 0.1
# 应用本地静态资源目录
STATIC_URL = '%sstatic/' % SITE_URL
ROOT_URLCONF = 'urls'
# ==============================================================================
# Templates
# ==============================================================================
# mako template dir
MAKO_TEMPLATE_DIR = os.path.join(PROJECT_ROOT, 'templates')
MAKO_TEMPLATE_MODULE_DIR = os.path.join(BASE_DIR, 'templates_module', APP_ID)
if RUN_MODE not in ['DEVELOP']:
MAKO_TEMPLATE_MODULE_DIR = os.path.join(PROJECT_ROOT, 'templates_module', APP_ID)
# Django TEMPLATES配置
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# the context to the templates
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.csrf',
'common.context_processors.mysetting', # 自定义模版context,可在页面中使用STATIC_URL等变量
'django.template.context_processors.i18n',
],
},
},
]
# ==============================================================================
# session and cache
# ==============================================================================
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 默认为false,为true时SESSION_COOKIE_AGE无效
SESSION_COOKIE_PATH = SITE_URL # NOTE 不要改动,否则,可能会改成和其他app的一样,这样会影响登录
# ===============================================================================
# Authentication
# ===============================================================================
AUTH_USER_MODEL = 'account.BkUser'
AUTHENTICATION_BACKENDS = ('account.backends.BkBackend', 'django.contrib.auth.backends.ModelBackend')
LOGIN_URL = "%s/login/?app_id=%s" % (BK_PAAS_HOST, APP_ID)
LOGOUT_URL = '%saccount/logout/' % SITE_URL
LOGIN_REDIRECT_URL = SITE_URL
REDIRECT_FIELD_NAME = "c_url"
# 验证登录的cookie名
BK_COOKIE_NAME = 'bk_token'
# 数据库初始化 管理员列表
ADMIN_USERNAME_LIST = ['admin']
# ===============================================================================
# CELERY 配置
# ===============================================================================
if IS_USE_CELERY:
try:
import djcelery
INSTALLED_APPS += (
'djcelery', # djcelery
)
djcelery.setup_loader()
CELERY_ENABLE_UTC = False
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
if "celery" in sys.argv:
DEBUG = False
# celery 的消息队列(RabbitMQ)信息
BROKER_URL = os.environ.get('BK_BROKER_URL', BROKER_URL_DEV)
if RUN_MODE == 'DEVELOP':
from celery.signals import worker_process_init
@worker_process_init.connect
def configure_workers(*args, **kwargs):
import django
django.setup()
except:
pass
# ==============================================================================
# logging
# ==============================================================================
# 应用日志配置
BK_LOG_DIR = os.environ.get('BK_LOG_DIR', '/data/paas/apps/logs/')
LOGGING_DIR = os.path.join(BASE_DIR, 'logs', APP_ID)
LOG_CLASS = 'logging.handlers.RotatingFileHandler'
if RUN_MODE == 'DEVELOP':
LOG_LEVEL = 'DEBUG'
elif RUN_MODE == 'TEST':
LOGGING_DIR = os.path.join(BK_LOG_DIR, APP_ID)
LOG_LEVEL = 'INFO'
elif RUN_MODE == 'PRODUCT':
LOGGING_DIR = os.path.join(BK_LOG_DIR, APP_ID)
LOG_LEVEL = 'ERROR'
# 自动建立日志目录
if not os.path.exists(LOGGING_DIR):
try:
os.makedirs(LOGGING_DIR)
except:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s [%(asctime)s] %(pathname)s %(lineno)d %(funcName)s %(process)d %(thread)d \n \t %(message)s \n',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s \n'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'root': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, '%s.log' % APP_ID),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'component': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, 'component.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'wb_mysql': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, 'wb_mysql.log'),
'maxBytes': 1024 * 1024 * 4,
'backupCount': 5
},
},
'loggers': {
'django': {
'handlers': ['null'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
# the root logger ,用于整个project的logger
'root': {
'handlers': ['root'],
'level': LOG_LEVEL,
'propagate': True,
},
# 组件调用日志
'component': {
'handlers': ['component'],
'level': 'WARN',
'propagate': True,
},
# other loggers...
'django.db.backends': {
'handlers': ['wb_mysql'],
'level': 'DEBUG',
'propagate': True,
},
}
}
| tjm-1990/blueking | conf/default.py | Python | gpl-3.0 | 11,273 |
# -*- coding: UTF-8 -*-
# Syntax definition automatically generated by hljs2xt.py
# source: ruby.js
name = 'Ruby'
file_patterns = ['*.ruby', '*.rb', '*.gemspec', '*.podspec', '*.thor', '*.irb']
keyword = """
and false then defined module in return redo if BEGIN retry end for
true self when next until do begin unless END rescue nil else break
undef not super class case require yield alias while ensure elsif or
include attr_reader attr_writer attr_accessor
""".split()
doctag = ('doctag', [RE(r"@[A-Za-z]+")])
class comment:
default_text_color = DELIMITER
rules = [
doctag,
# ignore {'begin': {'pattern': "\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\\b", 'type': 'RegExp'}},
('doctag', [RE(r"(?:TODO|FIXME|NOTE|BUG|XXX):")]),
]
comment0 = ('comment', RE(r"#"), [RE(r"$")], comment)
comment1 = ('comment', RE(r"^\=begin"), [RE(r"^\=end")], comment)
class comment2:
default_text_color = DELIMITER
rules = [
# ignore {'begin': {'pattern': "\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\\b", 'type': 'RegExp'}},
('doctag', [RE(r"(?:TODO|FIXME|NOTE|BUG|XXX):")]),
]
comment2.__name__ = 'comment'
comment3 = ('comment', RE(r"^__END__"), [RE(r"\n$")], comment2)
class _group10:
default_text_color = DELIMITER
rules = [('_group1', RE(r"^\s*=>"), [RE(r"\B|\b")])]
_group10.__name__ = '_group1'
operator_escape = ('operator.escape', [RE(r"\\[\s\S]")])
class subst:
default_text_color = DELIMITER
rules = [('keyword', keyword)]
subst0 = ('subst', RE(r"#\{"), [RE(r"}")], subst)
class string:
default_text_color = DELIMITER
rules = [operator_escape, subst0]
string1 = ('string', RE(r"'"), [RE(r"'")], string)
string2 = ('string', RE(r"\""), [RE(r"\"")], string)
string3 = ('string', RE(r"`"), [RE(r"`")], string)
string4 = ('string', RE(r"%[qQwWx]?\("), [RE(r"\)")], string)
string5 = ('string', RE(r"%[qQwWx]?\["), [RE(r"\]")], string)
string6 = ('string', RE(r"%[qQwWx]?{"), [RE(r"}")], string)
string7 = ('string', RE(r"%[qQwWx]?<"), [RE(r">")], string)
string8 = ('string', RE(r"%[qQwWx]?/"), [RE(r"/")], string)
string9 = ('string', RE(r"%[qQwWx]?%"), [RE(r"%")], string)
string10 = ('string', RE(r"%[qQwWx]?-"), [RE(r"-")], string)
string11 = ('string', RE(r"%[qQwWx]?\|"), [RE(r"\|")], string)
string12 = ('string', RE(r"\B\?(?:\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b"), [RE(r"$")], string)
_group3 = ('_group3', RE(r"#<"), [RE(r">")])
#class _group4:
# default_text_color = DELIMITER
# rules = [
# # ignore {'begin': '([a-zA-Z]\\w*::)?[a-zA-Z]\\w*'},
# ]
class class0:
default_text_color = DELIMITER
rules = [
('keyword', ['class', 'module']),
('title', [RE(r"[A-Za-z_]\w*(?:::\w+)*(?:\?|\!)?")]),
('_group4', RE(r"<\s*"), [RE(r"(?=$|;)")]), #, _group4),
comment0,
comment1,
comment3,
]
class0.__name__ = 'class'
class1 = ('class', RE(r"\b(?:class|module)"), [RE(r"$|;")], class0)
title0 = [
RE(r"[a-zA-Z_]\w*[!?=]?|[-+~]\@|<<|>>|=~|===?|<=>|[<>]=?|\*\*|[-/+%^&*~`|]|\[\]=?"),
]
class params:
default_text_color = DELIMITER
rules = [('keyword', keyword)]
class params1:
default_text_color = DELIMITER
rules = [('params', RE(r"\("), [RE(r"\)")], params)]
params1.__name__ = 'params'
class function:
default_text_color = DELIMITER
rules = [
('keyword', ['def']),
('title', title0),
comment0,
comment1,
comment3,
]
function0 = ('function', RE(r"\b(?:def)"), [RE(r"$|;"), params1], function)
symbol = ('symbol', [RE(r"[a-zA-Z_]\w*(?:\!|\?)?:")])
class symbol0:
default_text_color = DELIMITER
rules = [
string1,
string2,
string3,
string4,
string5,
string6,
string7,
string8,
string9,
string10,
string11,
string12,
# ignore {'begin': '[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?'},
]
symbol0.__name__ = 'symbol'
symbol1 = ('symbol', RE(r":"), [RE(r"$")], symbol0)
number = [
RE(r"(?:\b0[0-7_]+)|(?:\b0x[0-9a-fA-F_]+)|(?:\b[1-9][0-9_]*(?:\.[0-9_]+)?)|[0_]\b"),
]
number0 = ('number', number)
class regexp:
default_text_color = DELIMITER
rules = [operator_escape, subst0]
class _group8:
default_text_color = DELIMITER
rules = [
_group3,
('regexp', RE(r"/"), [RE(r"/[a-z]*")], regexp),
('regexp', RE(r"%r{"), [RE(r"}[a-z]*")], regexp),
('regexp', RE(r"%r\("), [RE(r"\)[a-z]*")], regexp),
('regexp', RE(r"%r!"), [RE(r"![a-z]*")], regexp),
('regexp', RE(r"%r\["), [RE(r"\][a-z]*")], regexp),
comment0,
comment1,
comment3,
]
_group80 = ('_group8', RE(r"(?:!|!=|!==|%|%=|&|&&|&=|\*|\*=|\+|\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\?|\[|\{|\(|\^|\^=|\||\|=|\|\||~)\s*"), [RE(r"$")], _group8)
class _group2:
default_text_color = DELIMITER
rules = [
string1,
string2,
string3,
string4,
string5,
string6,
string7,
string8,
string9,
string10,
string11,
string12,
_group3,
class1,
function0,
symbol,
symbol1,
number0,
# ignore {'begin': '(\\$\\W)|((\\$|\\@\\@?)(\\w+))'},
_group80,
comment0,
comment1,
comment3,
]
class meta0:
default_text_color = DELIMITER
rules = [
('meta', RE(r"^(?:[>?]>|[\w#]+\(\w+\):\d+:\d+>|(?:\w+-)?\d+\.\d+\.\d(?:p\d+)?[^>]+>)"), [RE(r"\B|\b")]),
]
meta0.__name__ = 'meta'
class _group9:
default_text_color = DELIMITER
rules = []
rules = [
('keyword', keyword),
comment0,
comment1,
comment3,
('_group1', _group10, [RE(r"$")], _group2),
('meta', meta0, [RE(r"$")], _group9),
string1,
string2,
string3,
string4,
string5,
string6,
string7,
string8,
string9,
string10,
string11,
string12,
_group3,
class1,
function0,
symbol,
symbol1,
number0,
# ignore {'begin': '(\\$\\W)|((\\$|\\@\\@?)(\\w+))'},
_group80,
comment0,
comment1,
comment3,
]
subst.rules.extend(_group2.rules)
params.rules.extend(_group2.rules)
_group9.rules.extend(_group2.rules)
| editxt/editxt | resources/syntax/ruby.syntax.py | Python | gpl-3.0 | 6,546 |
# Copyright (C) 2017 Xavier Lucas
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
import keyring
def build_option_parser(parser):
parser.add_argument(
"--keyring-service",
metavar="<service>",
default="confluence-cli",
help="Service entry",
)
parser.add_argument(
"--keyring-username",
metavar="<username>",
help="User name",
)
def after_command(app, cmd, result, error):
pass
def before_command(app, cmd):
pass
def initialize(app):
_load_credentials(app, app.options)
def _load_credentials(app, options):
app.username = options.keyring_username
app.password = _get_or_save(options.keyring_service, app.username)
def _get_or_save(service, entry):
value = keyring.get_password(service, entry)
if value is None:
value = getpass.getpass("Password to store: ")
keyring.set_password(service, entry, value)
return value
| xlucas/confluence-python-cli | confluenceclient/plugins/credentials/keyring/plugin.py | Python | gpl-3.0 | 1,553 |
# solflag.py:
#
# Copyright (C) 2007
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
import sys
import math
import numpy
import pylab
import pyrap.tables
import lofar.parmdb
from . import solfetch
def flag(msName, dbName, half_window, threshold, sources=None, storeFlags=True,
updateMain=True, cutoffLow=None, cutoffHigh=None, debug=False):
"""
Solution based flagging.
msName: name of the measurement to flag
dbName: name of solution parameter database
half_window: half the size of the window used in the flagging algorithm
threshold: threshold for the flagging algorithm (median of the absolute
distance to the median); typical values 2, 3, 4
sources: (default None) for directional gains solutions, specify the
source directions that should be considered
storeFlags: (default True) if set to False, the flags will not be
written to the measurement
updateMain: (default True) if set to True, both the FLAG and the
FLAG_ROW column will be updated if set to False, only the
FLAG_ROW column will be updated
cutoffLow: (default None) if set, all values less than or equal to
cutoffLow will be flagged
cutoffHigh: (default None) if set, all values greater than or equal to
cutoffHigh will be flagged
debug: (default False) if set to True, a plot is generated for each
source - station combination that shows what has been flagged.
"""
# Read station names from MS.
antennaTable = pyrap.tables.table("%s/ANTENNA" % msName)
stations = antennaTable.getcol("NAME")
antennaTable.done()
del antennaTable
# Open main MS table.
ms = None
if storeFlags:
ms = pyrap.tables.table(msName, readonly=False)
# Open solution database.
db = lofar.parmdb.parmdb(dbName)
# Get solutions from solution database.
elements = ["0:0", "1:1"]
print("fetching solutions from %s..." % dbName, end=' ')
sys.stdout.flush()
ampl = __fetch(db, elements, stations, sources)
print("done.")
sys.stdout.flush()
# Determine the number of directions.
if sources is None:
n_directions = 1
else:
n_directions = len(sources)
# Get the number of time samples.
n_samples = ampl.shape[-1]
# Flag based on solutions.
print("flagging...")
sys.stdout.flush()
for stat in range(0, len(stations)):
# Allocate flag array for this station.
flags = numpy.zeros(n_samples, bool)
for src in range(0, n_directions):
for el in range(0, len(elements)):
# Create padded data array.
sol = numpy.zeros(n_samples + 2 * half_window)
sol[half_window:half_window + n_samples] = ampl[el][src][stat]
for i in range(0, half_window):
# Mirror at left edge.
idx = min(n_samples - 1, half_window - i)
sol[i] = ampl[el][src][stat][idx]
# Mirror at right edge
idx = max(0, n_samples - 2 - i)
sol[n_samples + half_window + i] = ampl[el][src][stat][idx]
sol_flag = numpy.zeros(n_samples + 2 * half_window, dtype=bool)
# Thresholding.
if not cutoffLow is None:
sol_flag[sol <= cutoffLow] = True
if not cutoffHigh is None:
sol_flag[sol >= cutoffHigh] = True
for i in range(half_window, half_window + n_samples):
# Compute median of the absolute distance to the median.
window = sol[i - half_window:i + half_window + 1]
window_flag = sol_flag[i - half_window:i + half_window + 1]
window_masked = window[~window_flag]
if len(window_masked) < math.sqrt(len(window)):
# Not enough data to get accurate statistics.
continue
median = numpy.median(window_masked)
q = 1.4826 * numpy.median(numpy.abs(window_masked - median))
# Flag sample if it is more than 1.4826 * threshold * the
# median distance away from the median.
if abs(sol[i] - median) > (threshold * q):
sol_flag[i] = True
if debug:
# Get masked x-axis and solutions.
mask = ~sol_flag[half_window:half_window + n_samples]
x_axis = numpy.array(list(range(0, n_samples)))
x_axis = x_axis[mask]
sol_masked = sol[half_window:half_window + n_samples]
sol_masked = sol_masked[mask]
fig_index = stat * n_directions + src + 1
pylab.figure(fig_index)
if el == 0:
pylab.clf()
pylab.subplot("21%d" % (el + 1))
pylab.plot(ampl[el][src][stat], 'r-')
pylab.plot(x_axis, sol_masked, 'go', markersize=4)
# Merge flags based on the solutions for the current direction
# into the station flags.
flags = flags | sol_flag[half_window:half_window + n_samples]
print("(%.2f%%) %s" % (100.0 * numpy.sum(flags) / n_samples, stations[stat]))
sys.stdout.flush()
if storeFlags:
stationTable = ms.query("ANTENNA1 == %d || ANTENNA2 == %d" % (stat, stat), sortlist="TIME,ANTENNA1,ANTENNA2")
baselineIter = pyrap.tables.tableiter(stationTable, ["ANTENNA1", "ANTENNA2"])
for baseline in baselineIter:
assert(baseline.nrows() == len(flags))
# Update row flags
msRowFlags = baseline.getcol("FLAG_ROW")
msRowFlags |= flags
baseline.putcol("FLAG_ROW", msRowFlags)
# Update main flags
if updateMain:
msFlags = baseline.getcol("FLAG")
for i in range(0, n_samples):
msFlags[i, :, :] |= flags[i]
baseline.putcol("FLAG", msFlags)
print("done.")
def __fetch(db, elements, stations, directions=None):
result = None
if directions is None:
for i in range(0, len(elements)):
(ampl, phase) = solfetch.fetch(db, stations, parm="Gain:%s" %
elements[i])
# Allocate result array if necessary.
if result is None:
result = numpy.zeros((len(elements), 1, len(stations),
ampl.shape[-1]))
# Copy solutions into result array.
assert(result[i][0].shape == ampl.shape)
result[i][0] = ampl
else:
for i in range(0, len(elements)):
for j in range(0, len(directions)):
# Fetch solutions.
(ampl, phase) = solfetch.fetch(db, stations,
parm="DirectionalGain:%s" % elements[i],
direction=directions[j])
# Allocate result array if necessary.
if result is None:
result = numpy.zeros((len(elements), len(directions),
len(stations), ampl.shape[-1]))
# Copy solutions into result array.
assert(result[i][j].shape == ampl.shape)
result[i][j] = ampl
return result
| kernsuite-debian/lofar | CEP/Calibration/BBSControl/scripts/solflag.py | Python | gpl-3.0 | 8,453 |
from __future__ import (absolute_import, division, print_function)
from mantid.kernel import logger
import AbinsModules
import six
from mantid.kernel import Atom
class GeneralAbInitioProgramName(type):
def __str__(self):
return self.__name__
# noinspection PyMethodMayBeStatic
@six.add_metaclass(GeneralAbInitioProgramName)
class GeneralAbInitioProgram(object):
"""
A general class which groups all methods which should be inherited or implemented by an ab initio program used
in INS analysis.
"""
def __init__(self, input_ab_initio_filename=None):
self._num_k = None
self._num_atoms = None
self._sample_form = None
self._ab_initio_program = None
self._clerk = AbinsModules.IOmodule(input_filename=input_ab_initio_filename,
group_name=AbinsModules.AbinsParameters.ab_initio_group)
def read_vibrational_or_phonon_data(self):
"""
This method is different for different ab initio programs. It has to be overridden by inheriting class.
This method reads vibrational or phonon data produced by an ab initio program.
This method should do the following:
1) Open file with vibrational or phonon data (CASTEP: foo.phonon). Name of a file should be stored in
self._input_filename. There must be no spaces in the name
of a file. Extension of a file (part of a name after '.') is arbitrary.
2) Method should read from an ab initio file information about frequencies, atomic displacements,
k-point vectors, weights of k-points and ions.
3) Method should reconstruct data for symmetry equivalent k-points
(protected method _recover_symmetry_points).
**Notice: this step is not implemented now. At the moment only Gamma point calculations are supported.**
4) Method should determine symmetry equivalent atoms
**Notice: this step is not implemented now.**
5) Method should calculate hash of a file with vibrational or phonon data (protected method _calculateHash).
6) Method should store vibrational or phonon data in an hdf file (inherited method save()). The name of an hdf file is
foo.hdf5 (CASTEP: foo.phonon -> foo.hdf5). In order to save the data to hdf file the following fields
should be set:
self._hdf_filename
self._group_name
self._attributes
self._datasets
The datasets should be a dictionary with the following entries:
"frequencies" - frequencies for all k-points grouped in one numpy.array in cm^-1
"weights" - weights of all k-points in one numpy.array
"k_vectors" - all k-points in one numpy array
**Notice: both symmetry equivalent and inequivalent points should be stored; at
the moment only Gamma point calculations are supported**
"atomic_displacements" - atomic displacements for all atoms and all k-points in one numpy array
"unit_cell" - numpy array with unit cell vectors in Angstroms
The following structured datasets should be also defined:
"atoms" - Python dictionary with the information about ions. Each entry in the
dictionary has the following format 'atom_n'. Here n means number of
atom in the unit cell.
Each entry 'atom_n' in the dictionary is a dictionary with the following
entries:
"symbol" - chemical symbol of the element (for example hydrogen -> H)
"sort" - defines symmetry equivalent atoms, e.g, atoms with the same
sort are symmetry equivalent
**Notice at the moment this parameter is not functional
in LoadCastep**
"coord" - equilibrium position of atom in Angstroms;
it has a form of numpy array with three floats
"mass" - mass of atom
The attributes should be a dictionary with the following entries:
"hash" - hash of a file with the vibrational or phonon data. It should be a string
representation of hash.
"ab_initio_program" - name of the ab initio program which was used to obtain vibrational or
phonon data (for CASTEP -> CASTEP).
"filename" - name of input ab initio file
For more details about these fields please look at the documentation of IOmodule class.
:returns: Method should return an object of type AbinsData.
"""
return None
def load_formatted_data(self):
"""
Loads data from hdf file. After data is loaded it is put into AbinsData object.
:returns: object of type AbinsData
"""
data = self._clerk.load(list_of_datasets=["frequencies", "weights", "k_vectors",
"atomic_displacements", "unit_cell", "atoms"])
datasets = data["datasets"]
self._num_k = datasets["k_vectors"].shape[0]
self._num_atoms = len(datasets["atoms"])
loaded_data = {"frequencies": datasets["frequencies"],
"weights": datasets["weights"],
"k_vectors": datasets["k_vectors"],
"atomic_displacements": datasets["atomic_displacements"],
"unit_cell": datasets["unit_cell"],
"atoms": datasets["atoms"]}
return self._rearrange_data(data=loaded_data)
# Protected methods which should be reused by classes which read ab initio phonon data
def _recover_symmetry_points(self, data=None):
"""
This method reconstructs symmetry equivalent k-points.
:param data: dictionary with the data for only symmetry inequivalent k-points. This methods
adds to this dictionary phonon data for symmetry equivalent k-points.
"""
pass
def _rearrange_data(self, data=None):
"""
This method rearranges data read from input ab initio file.
:param data: dictionary with the data to rearrange
:returns: Returns an object of type AbinsData
"""
k_points = AbinsModules.KpointsData(num_atoms=self._num_atoms, num_k=self._num_k)
# 1D [k] (one entry corresponds to weight of one k-point)
k_points.set({"weights": data["weights"],
# 2D [k][3] (one entry corresponds to one coordinate of particular k-point)
"k_vectors": data["k_vectors"],
# 2D array [k][freq] (one entry corresponds to one frequency for the k-point k)
"frequencies": data["frequencies"],
# 4D array [k][atom_n][freq][3] (one entry corresponds to
# one coordinate for atom atom_n, frequency freq and k-point k )
"atomic_displacements": data["atomic_displacements"],
"unit_cell": data["unit_cell"]
})
atoms = AbinsModules.AtomsDaTa(num_atoms=self._num_atoms)
atoms.set(data["atoms"])
result_data = AbinsModules.AbinsData()
result_data.set(k_points_data=k_points, atoms_data=atoms)
return result_data
def save_ab_initio_data(self, data=None):
"""
Saves ab initio data to an HDF5 file.
:param data: dictionary with data to be saved.
"""
for name in data:
self._clerk.add_data(name=name, value=data[name])
self._clerk.add_file_attributes()
self._clerk.add_attribute("ab_initio_program", self._ab_initio_program)
self._clerk.save()
def get_formatted_data(self):
# try to load ab initio data from *.hdf5 file
try:
if self._ab_initio_program != self._clerk.get_previous_ab_initio_program():
raise ValueError("Different ab initio program was used in the previous calculation. Data in the hdf "
"file will be erased.")
self._clerk.check_previous_data()
ab_initio_data = self.load_formatted_data()
logger.notice(str(ab_initio_data) + " has been loaded from the HDF file.")
# if loading from *.hdf5 file failed than read data directly from input ab initio file and erase hdf file
except (IOError, ValueError) as err:
logger.notice(str(err))
self._clerk.erase_hdf_file()
ab_initio_data = self.read_vibrational_or_phonon_data()
logger.notice(str(ab_initio_data) + " from ab initio input file has been loaded.")
return ab_initio_data
def check_isotopes_substitution(self, atoms=None, masses=None, approximate=False):
"""
Updates atomic mass in case of isotopes.
:param atoms: dictionary with atoms to check
:param masses: atomic masses read from an ab initio file
:param approximate: whether or not look for isotopes in the approximated way
"""
num_atoms = len(atoms)
eps = AbinsModules.AbinsConstants.MASS_EPS
if approximate:
isotopes_found = [abs(round(atoms["atom_%s" % i]["mass"]) - round(masses[i])) > eps
for i in range(num_atoms)]
else:
isotopes_found = [abs(atoms["atom_%s" % i]["mass"] - masses[i]) > eps for i in range(num_atoms)]
if any(isotopes_found):
for i in range(num_atoms):
if isotopes_found[i]:
z_num = Atom(symbol=atoms["atom_{}".format(i)]["symbol"]).z_number
a_num = int(round(masses[i]))
try:
temp = Atom(a_number=a_num, z_number=z_num).mass
atoms["atom_{}".format(i)]["mass"] = temp
# no mass for isotopes available; assume no isotopic substitution for this atom
except RuntimeError:
pass
| ScreamingUdder/mantid | scripts/AbinsModules/GeneralAbInitioProgram.py | Python | gpl-3.0 | 10,789 |
#!/usr/bin/python
# Welcome to CLRenew, a simple python script that automates mouse clicks to
# renew craigslist postings credit to https://github.com/yuqianli for base code
import pyautogui
import os
# Set a counter to count the # of exceptions occur
counter = 0
# Start the while loop
while True:
try:
print ("Be sure your active listings page is up and active")
pyautogui.time.sleep(2)
renewButtonLocationX, renewButtonLocationY = pyautogui.locateCenterOnScreen('renew.png')
pyautogui.moveTo(renewButtonLocationX, renewButtonLocationY)
pyautogui.click()
pyautogui.time.sleep(2)
# This part of the loop will depend on your browser binding to go back a page:
pyautogui.keyDown('alt')
pyautogui.press('left')
pyautogui.keyUp('alt')
pyautogui.time.sleep(2)
# Exception handle when pyautogui can't locate the renew button on the screen
# or if it clicks away by mistake
# this section needs work and sometimes fails to function properly
except Exception:
print ("Exception thrown, calculating course of action")
pyautogui.press('pgdn')
counter += 1
print ("counter =" + str(counter))
if counter >= 3: counter = 0
pyautogui.time.sleep(2)
renewButtonLocationX,renewButtonLocationY = pyautogui.locateCenterOnScreen('page2.png')
pyautogui.moveTo(renewButtonLocationX, renewButtonLocationY)
pyautogui.click()
pyautogui.time.sleep(2)
| calexil/CLRenew | renew.py | Python | gpl-3.0 | 1,501 |
import data
from utils import assert_403, assert_404, assert_200, parse_xml, xpath
PRD = 'prd'
def test_sharing(IndivoClient):
DS = 'ds'
def get_datastore(obj):
if hasattr(obj, DS):
return getattr(obj, DS).values()
return False
def set_datastore(obj, **kwargs):
if hasattr(obj, DS):
ds = getattr(obj, DS)
for kwarg, value in kwargs.items():
if hasattr(ds, kwarg):
setattr(ds, kwarg, value)
return obj
raise ValueError
def alice_setup(record_id, bob_account_id):
allergy_type = {'type' : 'http://indivo.org/vocab/xml/documents#Allergy'}
alice_chrome_client = IndivoClient('chrome', 'chrome')
alice_chrome_client.create_session(data.account)
alice_chrome_client.read_record(record_id=record_id)
alice_chrome_client.get_account_permissions(account_id=data.account['account_id'])
alice_chrome_client.get_account_records(account_id = data.account['account_id'])
# Alice posts a document
# (We save the first doc instead of zero
# due to the contact doc already in alice's account)
alice_chrome_client.post_document(data=data.doc01)
document_id = alice_chrome_client.read_documents().response[PRD]['Document'][1]
# Save the document_id in the client's datastore
alice_chrome_client.ds.document_id = document_id
# Save the first carenet_id in the client's datastore
carenet_id = alice_chrome_client.get_record_carenets().response[PRD]['Carenet'][0]
# post four documents to Alice's record, 2 allergies and 2 immunizations
document_1_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy00)), "/Document/@id")[0]
document_2_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy01)), "/Document/@id")[0]
document_3_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization)), "/Document/@id")[0]
document_4_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization2)), "/Document/@id")[0]
# and one more to test nevershare
document_5_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy02)), "/Document/@id")[0]
# auto-share allergies
alice_chrome_client.post_autoshare(data=allergy_type, carenet_id=carenet_id)
assert_200(alice_chrome_client.get_autoshare_bytype_all(record_id=record_id))
# unshare that one allergy, which should negate the autoshare
alice_chrome_client.delete_carenet_document(record_id = record_id, document_id = document_2_id, carenet_id=carenet_id)
# nevershare the third allergy
alice_chrome_client.document_nevershare_set(record_id = record_id, document_id = document_5_id)
# immunizations are individually shared (well only one of them)
alice_chrome_client.post_carenet_document(document_id = document_3_id, carenet_id=carenet_id)
# Alice shares her contact document(s) with the carenet
contact_doc = parse_xml(alice_chrome_client.read_documents(record_id = record_id, parameters={'type':'Contact'}))
for doc_id in xpath(contact_doc, '/Documents/Document/@id'):
alice_chrome_client.post_carenet_document(record_id = record_id, document_id = doc_id, carenet_id = carenet_id)
# Alice adds bob_account_id to carenet[0]
alice_chrome_client.post_carenet_account(carenet_id = carenet_id, data='account_id=' + bob_account_id + '&write=false')
# Review all accounts within carenet[0]
assert xpath(parse_xml(alice_chrome_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')
alice_chrome_client.get_carenet_apps(carenet_id = carenet_id)
alice_chrome_client.read_allergies(record_id = record_id)
# Finally, return the carenet_id, document_id
# in order to check Bob's access
# and a second document that is disallowed
return carenet_id, [document_1_id, document_3_id], [document_2_id, document_4_id, document_5_id]
def bob_setup(bob_account_id, record_id, carenet_id, allowed_docs, disallowed_docs):
bob_chrome_client = IndivoClient('chrome', 'chrome')
bob_chrome_client.create_session(data.account02)
# SZ: Bob should NOT be able to read the docs directly in the record
for doc_id in allowed_docs+disallowed_docs:
assert_403(bob_chrome_client.read_document(record_id=record_id, document_id=doc_id))
assert_403(bob_chrome_client.get_record_carenets(record_id=record_id))
# Bob should be able to read the allowed docs
for doc_id in allowed_docs:
assert_200(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should not be able to read the disallowed docs
for doc_id in disallowed_docs:
assert_404(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should be able to list docs in the carenet
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id).response[PRD]['Document']
# with a parameter
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id, parameters={'type': 'http://indivo.org/vocab/xml/documents#Allergy'}).response[PRD]['Document']
# Read carenet allergies
assert_200(bob_chrome_client.read_carenet_allergies(carenet_id = carenet_id))
assert_200(bob_chrome_client.read_carenet_problems(carenet_id = carenet_id))
# Read the contact document, this should work
contact_doc = parse_xml(bob_chrome_client.read_carenet_special_document(carenet_id = carenet_id, special_document='contact'))
contact_name = xpath(contact_doc, '/ns:Contact/ns:name/ns:fullName/text()', namespaces={'ns':'http://indivo.org/vocab/xml/documents#'})
assert(contact_name)
bob_chrome_client.get_account_permissions(account_id=bob_account_id)
bob_chrome_client.get_carenet_account_permissions(carenet_id= carenet_id,
record_id=record_id,
account_id=bob_account_id)
# Not yet implemented
#bob_chrome_client.get_carenet_app_permissions(account_id=bob_account_id)
return True
def admin_setup(bob_account_id):
admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret)
admin_client.set_app_id(data.app_email)
# Create a record for Alice and set her at the owner
record_id = admin_client.create_record(data=data.contact).response[PRD]['Record'][0]
admin_client.set_record_owner(data=data.account['account_id'])
# Create a basic set of carenets
carenet_names = ['Family2', 'Friends2', 'School/Office']
for cname in carenet_names:
admin_client.create_carenet(data='name=' + cname)
# Check to make sure the admin can list the carenets and the accounts within each one
carenets = xpath(parse_xml(admin_client.get_record_carenets(record_id = record_id)),'/Carenets/Carenet/@id')
for carenet_id in carenets:
assert len(xpath(parse_xml(admin_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')) > 0
return record_id
bob_account_id = '[email protected]'
# Admin spawning carenets under Alice's newly created record
record_id = admin_setup(bob_account_id)
# Given Bob's account id and a record that has been set up for her
# Alice gives Bob the document_id that she'd like to share with him
# Even though Alice gives Bob a document_id, Bob has the ability
# to read all documents within the carenet that Alice added him to
# 2010-09-13 now Alice also shares her contact URL and we check
# that Bob can read it at the special URL
carenet_id, allowed_documents, disallowed_documents = alice_setup(record_id, bob_account_id)
return bob_setup(bob_account_id, record_id, carenet_id, allowed_documents, disallowed_documents)
| newmediamedicine/indivo_server_1_0 | indivo/tests/integration/test_modules/sharing.py | Python | gpl-3.0 | 7,841 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
fig, ax = plt.subplots()
#ax = plt.gca()
#ax.set_autoscale_on(False)
polygons = []
color = []
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
# polygon
my_seg = [[125.12, 539.69, 140.94, 522.43, 100.67, 496.54, 84.85, 469.21, 73.35, 450.52, 104.99, 342.65, 168.27, 290.88, 179.78, 288, 189.84, 286.56, 191.28, 260.67, 202.79, 240.54, 221.48, 237.66, 248.81, 243.42, 257.44, 256.36, 253.12, 262.11, 253.12, 275.06, 299.15, 233.35, 329.35, 207.46, 355.24, 206.02, 363.87, 206.02, 365.3, 210.34, 373.93, 221.84, 363.87, 226.16, 363.87, 237.66, 350.92, 237.66, 332.22, 234.79, 314.97, 249.17, 271.82, 313.89, 253.12, 326.83, 227.24, 352.72, 214.29, 357.03, 212.85, 372.85, 208.54, 395.87, 228.67, 414.56, 245.93, 421.75, 266.07, 424.63, 276.13, 437.57, 266.07, 450.52, 284.76, 464.9, 286.2, 479.28, 291.96, 489.35, 310.65, 512.36, 284.76, 549.75, 244.49, 522.43, 215.73, 546.88, 199.91, 558.38, 204.22, 565.57, 189.84, 568.45, 184.09, 575.64, 172.58, 578.52, 145.26, 567.01, 117.93, 551.19, 133.75, 532.49]]
for seg in my_seg:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
#p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
#ax.add_collection(p)
#p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
p = PatchCollection(polygons, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(100*np.random.rand(1) )
ax.add_collection(p)
plt.show() | CivilNet/Gemfield | src/python/pascal_voc_xml2json/seg.py | Python | gpl-3.0 | 1,609 |
# Copyright (C) 2008 David Bern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# To do:
# fix avg packet size computation for file header
# find bug where the program just spits out stopping factory and doesn't
# return the deferred callback/errback
from twisted.web import client
from twisted.internet import defer, reactor
from twisted.python import failure, log
from twisted.protocols import basic
from twisted.python.util import InsensitiveDict
from cStringIO import StringIO
from optparse import OptionParser
from urlparse import urlsplit
import base64
import sys
import math
import time
import struct
import re
import hashlib
from md5 import md5
from rmff import *
from rtsp import RTSPClient, RTSPClientFactory
from sdpp import Sdpplin
from asmrp import Asmrp
# http://blogmag.net/blog/read/38/Print_human_readable_file_size
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
# http://lists.mplayerhq.hu/pipermail/mplayer-dev-eng/2008-March/056903.html
def rn5_auth(username, realm, password, nonce, uuid):
MUNGE_TEMPLATE ='%-.200s%-.200s%-.200sCopyright (C) 1995,1996,1997 RealNetworks, Inc.'
authstr ="%-.200s:%-.200s:%-.200s" % (username, realm, password)
first_pass = hashlib.md5(authstr).hexdigest()
munged = MUNGE_TEMPLATE % (first_pass, nonce, uuid)
return hashlib.md5(munged).hexdigest()
print first_pass
class RealChallenge(object):
XOR_TABLE = [ 0x05, 0x18, 0x74, 0xd0, 0x0d, 0x09, 0x02, 0x53, 0xc0, 0x01,
0x05, 0x05, 0x67, 0x03, 0x19, 0x70, 0x08, 0x27, 0x66, 0x10,
0x10, 0x72, 0x08, 0x09, 0x63, 0x11, 0x03, 0x71, 0x08, 0x08,
0x70, 0x02, 0x10, 0x57, 0x05, 0x18, 0x54 ]
def AV_WB32(d):
""" Used by RealChallenge() """
d = d.decode('hex')
return list(struct.unpack('%sB' % len(d), d))
def compute(rc1):
""" Translated from MPlayer's source
Computes the realchallenge response and checksum """
buf = list()
buf.extend( RealChallenge.AV_WB32('a1e9149d') )
buf.extend( RealChallenge.AV_WB32('0e6b3b59') )
rc1 = rc1.strip()
if rc1:
if len(rc1) == 40: rc1 = rc1[:32]
if len(rc1) > 56: rc1 = rc1[:56]
buf.extend( [ ord(i) for i in rc1 ] )
buf.extend( [ 0 for i in range(0, 56 - len(rc1)) ] )
# xor challenge bytewise with xor_table
for i in range(0, len(RealChallenge.XOR_TABLE)):
buf[8 + i] ^= RealChallenge.XOR_TABLE[i];
sum = md5( ''.join([ chr(i) for i in buf ]) )
response = sum.hexdigest() + '01d0a8e3'
chksum = list()
for i in range(0, 8):
chksum.append(response[i * 4])
chksum = ''.join(chksum)
return (response, chksum)
compute = staticmethod(compute)
AV_WB32 = staticmethod(AV_WB32)
class RDTClient(RTSPClient):
data_received = 0
out_file = None
prev_timestamp = None
prev_stream_num = None
streamids = []
setup_streamids = []
ended_streamids = []
sent_options = False
sent_describe = False
sent_parameter = False
sent_bandwidth = False
sent_realchallenge2 = False
sent_rn5_auth = False
rn5_authdata = None
EOF = 0xff06
LATENCY_REPORT = 0xff08
# RDT Header:
# Packet Flags (1 byte)
# Sequence number / packet type (2 bytes)
# Packet Length (if specified in flags) (2 bytes)
# Flags2 (1 byte)
# Timestamp (4 bytes)
# Total reliable (2 bytes)
# Data --
# packet_flags:
# 0... .... = length included & 0x80: 0
# .1.. .... = need reliable & 0x40: 1
# ..00 000. = Stream ID: 0
# .... ...0 = Is reliable & 0x01: 0
# Flags2:
# 0... .... = Back-to-back & 0x80: 0
# .1.. .... = Slow data & 0x40: 1
# ..00 0011 = Asm Rule & 0x3F: 3
def select_mlti_data(self, mlti_chunk, selection):
""" Takes a MLTI-chunk from an SDP OpaqueData and a rule selection
Returns the codec data based on the given rule selection """
if not mlti_chunk.startswith('MLTI'):
print('MLTI tag missing')
return mlti_chunk
idx = 4 # past 'MLTI'
numrules = struct.unpack('!H', mlti_chunk[idx:idx + 2])[0]
idx += 2
rules = []
for i in range(0, numrules):
rules.append(struct.unpack('!H', mlti_chunk[idx:idx + 2])[0])
idx += 2
if selection > numrules:
return 0
numcodecs = struct.unpack('!H', mlti_chunk[idx:idx + 2])[0]
idx += 2
codecs = []
for i in range(numcodecs):
codec_length = struct.unpack('!I', mlti_chunk[idx:idx + 4])[0]
idx += 4 # skip over codec length integer
codecs.append(mlti_chunk[idx:idx + codec_length])
idx += codec_length # skip over codec length worth of data
return codecs[rules[selection]]
def handleEndHeaders(self, headers):
if headers.get('realchallenge1'):
self.realchallenge1 = headers['realchallenge1'][0]
elif headers.get('www-authenticate', [''])[0].startswith('RN5'):
##hack: resent describe header with auth
self.sent_describe = False
print 'RN5 Authendication'
self.rn5_authdata ={}
for authdate in headers['www-authenticate'][0][3:].split(','):
key, value = authdate.split('=')
##remove "
self.rn5_authdata[key.strip()] = value[1:-1]
if self.content_length is None:
self.sendNextMessage()
def handleSdp(self, data):
""" Called with SDP Response data
Uses the SDP response to construct the file header """
sdp = Sdpplin(data)
header = rmff_header_t()
try: abstract = sdp['Abstract']
except KeyError: abstract = ''
header.fileheader = rmff_fileheader_t(4 + sdp['StreamCount'])
try: title = sdp['Title']
except KeyError: title = ''
try: author = sdp['Author']
except KeyError: author = ''
try: copyright = sdp['Copyright']
except KeyError: copyright = ''
header.cont = rmff_cont_t(title, author,
copyright, abstract)
header.data = rmff_data_t(0, 0)
duration = 0
max_bit_rate = 0
avg_bit_rate = 0
max_packet_size = 0
avg_packet_size = None
self.streammatches = {}
# the rulebook is sometimes truncated and spread across the streams
# not sure if this is common, or even the correct way to handle it
rulebook = ''.join([s['ASMRuleBook'] for s in sdp.streams])
symbols = {'Bandwidth':self.factory.bandwidth,'OldPNMPlayer':'0'}
rulematches, symbols = Asmrp.asmrp_match(rulebook, symbols)
# Avg packet size seems off
for s in sdp.streams:
self.streammatches[s['streamid']] = rulematches
mlti = self.select_mlti_data(s['OpaqueData'], rulematches[0])
# some streams don't have the starttime, but do have endtime
# and other meta data
try: start_time = s['StartTime']
except: start_time = 0
mdpr = rmff_mdpr_t(s['streamid'], s['MaxBitRate'],
s['AvgBitRate'], s['MaxPacketSize'],
s['AvgPacketSize'], start_time,
s['Preroll'], s.duration,
s['StreamName'], s['mimetype'], mlti)
header.streams.append(mdpr)
if s.duration > duration:
duration = s.duration
if mdpr.max_packet_size > max_packet_size:
max_packet_size = mdpr.max_packet_size
max_bit_rate += mdpr.max_bit_rate
avg_bit_rate += mdpr.avg_bit_rate
if avg_packet_size is None:
avg_packet_size = mdpr.avg_packet_size
else:
avg_packet_size = (avg_packet_size + mdpr.avg_packet_size)/2
header.prop = rmff_prop_t(max_bit_rate, avg_bit_rate,
max_packet_size, avg_packet_size,
0, duration, 0, 0, 0, sdp['StreamCount'],
sdp['Flags'])
return header
def heartbeat(self):
target = '%s://%s:%s' % (self.factory.scheme,
self.factory.host,
self.factory.port)
headers = {}
headers['User-Agent'] = self.factory.agent
headers['PlayerStarttime'] = self.factory.PLAYER_START_TIME
headers['CompanyID'] = self.factory.companyID
headers['GUID'] = self.factory.GUID
headers['RegionData'] = '0'
headers['ClientID'] = self.factory.clientID
headers['Pragma'] = 'initiate-session'
self.sendOptions('*', headers)
reactor.callLater(30, self.heartbeat)
def handleContentResponse(self, data, content_type):
""" Called when the entire content-length has been received
Exepect to receive type application/sdp """
f = open('sdp.txt', 'w')
f.write(data)
f.close()
if content_type == 'application/sdp':
reactor.callLater(30, self.heartbeat)
self.out_file = open(self.factory.filename, 'wb')
self.header = self.handleSdp(data)
self.streamids = [i for i in range(self.header.prop.num_streams)]
self.subscribe = ''
for i,rules in self.streammatches.items():
for r in rules:
self.subscribe += 'stream=%s;rule=%s,' % (i,r)
self.subscribe = self.subscribe[:-1] # Removes trailing comma
self.out_file.write(self.header.dump())
self.num_packets = 0
self.data_size = 0
def handleRDTData(self, data, rmff_ph):
self.num_packets += 1
self.data_size += len(data)
rmff_str = str(rmff_ph)
self.data_size += len(rmff_str)
self.out_file.write(rmff_str)
self.out_file.write(data)
def handleStreamEnd(self):
self.header.prop.num_packets = self.num_packets
self.header.data.num_packets = self.num_packets
self.header.data.size += self.data_size
if self.out_file:
self.out_file.seek(0)
self.out_file.write(self.header.dump())
self.out_file.close()
self.factory.success(0)
def handleRDTPacket(self, data):
""" Called with a full RDT data packet """
header, data = data[:10], data[10:]
packet_flags = struct.unpack('B', header[0])[0]
packet_type = struct.unpack('!H', header[1:3])[0]
if packet_type == self.EOF:
# EOF Flags:
# 1... .... = Need reliable: 1
# .000 01.. = Stream ID: 1
# .... ..1. = Packet sent: 1
# .... ...0 = Ext Flag: 0
streamid = (packet_flags >> 2) & 0x1F
if streamid not in self.ended_streamids:
self.ended_streamids.append(streamid)
# Waits for all streamids to send their EOF
if len(self.streamids) != len(self.ended_streamids):
return
self.handleStreamEnd()
return
if packet_type == self.LATENCY_REPORT:
return
timestamp = struct.unpack('!I', header[4:8])[0]
stream_num = (packet_flags >> 1) & 0x1f
flags2 = struct.unpack('B', header[3])[0]
# Creates the rmff_header_t which is
# inserted between packets for output
rmff_ph = rmff_pheader_t()
rmff_ph.length = len(data) + 12 # + 12 for the size of rmff_ph
rmff_ph.stream_number = stream_num
rmff_ph.timestamp = timestamp
if (flags2 & 0x01) == 0 and (self.prev_timestamp != timestamp or self.prev_stream_num != stream_num):
# I believe this flag signifies a stream change
self.prev_timestamp = timestamp
self.prev_stream_num = stream_num
rmff_ph.flags = 2
else:
rmff_ph.flags = 0
self.handleRDTData(data, rmff_ph)
def handleInterleavedData(self, data):
""" Called when an interleaved data frame is received """
self.data_received += len(data)
self.factory.data_received = self.data_received
# Each Interleaved packet can have multiple RDT packets
while len(data) > 0:
# Here we check packet_flags to see if the RDT header includes
# the length of the RDT packet. If it does, we try to handle
# multiple RDT packets.
packet_flags = struct.unpack('B', data[0])[0]
packet_type = struct.unpack('!H', data[1:3])[0]
if packet_type == self.EOF:
self.handleRDTPacket(data)
return
len_included = packet_flags & 0x80 == 0x80
if len_included:
packet_length = struct.unpack('!H', data[3:5])[0]
packet, data = data[:packet_length], data[packet_length:]
self.handleRDTPacket(packet)
else:
# If no length is given, assume remaining data is one packet
self.handleRDTPacket(data)
break
# ----------------------
# Packet Sending Methods
# ----------------------
def _sendOptions(self, headers={}):
target = '%s://%s:%s' % (self.factory.scheme,
self.factory.host,
self.factory.port)
headers['User-Agent'] = self.factory.agent
headers['ClientChallenge'] = self.factory.CLIENT_CHALLENGE
headers['PlayerStarttime'] = self.factory.PLAYER_START_TIME
headers['CompanyID'] = self.factory.companyID
headers['GUID'] = self.factory.GUID
headers['RegionData'] = '0'
headers['ClientID'] = self.factory.clientID
headers['Pragma'] = 'initiate-session'
self.sendOptions(target, headers)
def _sendDescribe(self, headers={}):
target = '%s://%s:%s%s' % (self.factory.scheme,
self.factory.host,
self.factory.port,
self.factory.path)
headers['Accept'] = 'application/sdp'
# headers['Bandwidth'] = str(self.factory.bandwidth)
headers['GUID'] = self.factory.GUID
headers['RegionData'] = '0'
headers['ClientID'] = self.factory.clientID
headers['SupportsMaximumASMBandwidth'] = '1'
headers['Language'] = 'en-US'
headers['Require'] = 'com.real.retain-entity-for-setup'
##rn5 auth
if self.rn5_authdata:
authstring ='RN5 '
self.rn5_authdata['username'] = self.factory.username
self.rn5_authdata['GUID'] = '00000000-0000-0000-0000-000000000000'
self.rn5_authdata['response'] = \
rn5_auth(nonce=self.rn5_authdata['nonce'],
username=self.factory.username,
password=self.factory.password,
uuid=self.rn5_authdata['GUID'],
realm=self.rn5_authdata['realm'])
## a string like 'RN5 username="foo",realm="bla"...'
headers['Authorization'] = 'RN5 ' + ', '.join(
['%s="%s"' % (key, val) for key,val in self.rn5_authdata.items()])
if not self.rn5_authdata and self.factory.username is not None:
authstr = '%s:%s' % (self.factory.username,
self.factory.password
if self.factory.password else '')
authstr = base64.b64encode(authstr)
headers['Authorization'] = 'Basic %s' % authstr
self.sendDescribe(target, headers)
def _sendSetup(self, headers={}, streamid=0):
target = '%s://%s:%s%s/streamid=%s' % (self.factory.scheme,
self.factory.host,
self.factory.port,
self.factory.path,
streamid)
headers['If-Match'] = self.session
headers['Transport'] = 'x-pn-tng/tcp;mode=play,rtp/avp/tcp;unicast;mode=play'
self.sendSetup(target, headers)
def _sendSetParameter(self, key, value, headers=None):
target = '%s://%s:%s%s' % (self.factory.scheme, self.factory.host,
self.factory.port, self.factory.path)
if headers is None:
headers = {}
headers['Session'] = self.session
headers[key] = value
self.sendSetParameter(target, headers)
def _sendPlay(self, range='0-', headers={}):
target = '%s://%s:%s%s' % (self.factory.scheme,
self.factory.host,
self.factory.port,
self.factory.path)
if self.session:
headers['Session'] = self.session
self.sendPlay(range, target, headers)
def sendNextMessage(self):
""" This method goes in order sending messages to the server:
OPTIONS, DESCRIBE, SETUP, SET_PARAMETER, SET_PARAMETER, PLAY
Returns True if it sent a packet, False if it didn't """
if not self.sent_options:
self.sent_options = True
self._sendOptions()
return True
if not self.sent_describe:
print 'sending describe'
self.sent_describe = True
self._sendDescribe()
return True
if len(self.streamids) > len(self.setup_streamids):
headers = {}
if not self.sent_realchallenge2:
self.sent_realchallenge2 = True
challenge_tuple = RealChallenge.compute(self.realchallenge1)
headers['RealChallenge2'] = '%s, sd=%s' % challenge_tuple
# Gets a streamid that hasn't been setup yet
s = [s for s in self.streamids if s not in self.setup_streamids][0]
self.setup_streamids.append(s)
self._sendSetup(streamid=s, headers=headers)
return True
if not self.sent_parameter:
self.sent_parameter = True
self._sendSetParameter('Subscribe', self.subscribe)
return True
if not self.sent_bandwidth:
self.sent_bandwidth = True
self._sendSetParameter('SetDeliveryBandwidth',
'Bandwidth=%s;BackOff=0' % self.factory.bandwidth)
return True
if not self.sent_play:
self.sent_play = True
self._sendPlay()
return True
return False
def success(result):
if result == 0:
print('Success!')
else:
print('Result: %s' % result)
reactor.stop()
def error(failure):
print('Failure!: %s' % failure.getErrorMessage())
reactor.stop()
def progress(factory):
print('Downloaded %s' % sizeof_fmt(factory.data_received))
reactor.callLater(1, progress, factory)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-u', '', dest='url', help='url to download',
metavar='URL')
parser.add_option('-f', '', dest='file', help='file to save to',
metavar='FILENAME')
options, args = parser.parse_args()
if options.url is None:
print('You must enter a url to download\n')
parser.print_help()
exit()
if not options.file:
options.file = re.search('[^/]*$', options.url).group(0)
if not options.file or len(options.file) < 1:
print('Invalid file name specified\n')
parser.print_help()
exit()
log.startLogging(sys.stdout)
factory = RTSPClientFactory(options.url, options.file)
factory.protocol = RDTClient
factory.bandwidth = 99999999999
factory.deferred.addCallback(success).addErrback(error)
reactor.connectTCP(factory.host, factory.port, factory)
reactor.callLater(1, progress, factory)
reactor.run()
| odie5533/Python-RTSP | rdt.py | Python | gpl-3.0 | 21,513 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-05 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rules', '0057_auto_20180302_1312'),
]
operations = [
migrations.AddField(
model_name='source',
name='public_source',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| StamusNetworks/scirius | rules/migrations/0058_source_public_source.py | Python | gpl-3.0 | 440 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note: The deserialization code originally comes from ABE.
from bitcoin import *
from util import print_error
import time
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_redeemScript(bytes):
dec = [ x for x in script_GetOp(bytes.decode('hex')) ]
# 2 of 2
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex') ]
return 2, pubkeys
# 2 of 3
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex'), dec[3][1].encode('hex') ]
return 2, pubkeys
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def get_address_from_input_script(bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return [], {}, "(None)"
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return None, {}, "(pubkey)"
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
pubkey = decoded[1][1].encode('hex')
if sig[-2:] == '01':
sig = sig[:-2]
return [pubkey], {pubkey:sig}, public_key_to_bc_address(pubkey.decode('hex'))
else:
print_error("cannot find address in input script", bytes.encode('hex'))
return [], {}, "(None)"
# p2sh transaction, 2 of n
match = [ opcodes.OP_0 ]
while len(match) < len(decoded):
match.append(opcodes.OP_PUSHDATA4)
if match_decoded(decoded, match):
redeemScript = decoded[-1][1]
num = len(match) - 2
signatures = map(lambda x:x[1][:-1].encode('hex'), decoded[1:-1])
dec2 = [ x for x in script_GetOp(redeemScript) ]
# 2 of 2
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_bc_address(hash_160(redeemScript), 5)
# 2 of 3
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_bc_address(hash_160(redeemScript), 5)
print_error("cannot find address in input script", bytes.encode('hex'))
return [], {}, "(None)"
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return True, public_key_to_bc_address(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return False, hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return False, hash_160_to_bc_address(decoded[1][1],5)
return False, "(None)"
class Transaction:
def __init__(self, raw):
self.raw = raw
self.deserialize()
self.inputs = self.d['inputs']
self.outputs = self.d['outputs']
self.outputs = map(lambda x: (x['address'],x['value']), self.outputs)
self.locktime = self.d['lockTime']
def __str__(self):
return self.raw
@classmethod
def from_io(klass, inputs, outputs):
raw = klass.serialize(inputs, outputs, for_sig = None) # for_sig=-1 means do not sign
self = klass(raw)
self.inputs = inputs
self.outputs = outputs
return self
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get([ ('blockchain.address.listunspent',[address])])[0]
pay_script = klass.pay_script(address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
inputs += u
if not inputs:
return
total = sum( map(lambda x:int(x.get('value')), inputs) ) - fee
outputs = [(to_address, total)]
self = klass.from_io(inputs, outputs)
self.sign({ pubkey:privkey })
return self
@classmethod
def multisig_script(klass, public_keys, num=None):
n = len(public_keys)
if num is None: num = n
# supports only "2 of 2", and "2 of 3" transactions
assert num <= n and n in [2,3]
if num==2:
s = '52'
elif num == 3:
s = '53'
else:
raise
for k in public_keys:
s += var_int(len(k)/2)
s += k
if n==2:
s += '52'
elif n==3:
s += '53'
else:
raise
s += 'ae'
return s
@classmethod
def pay_script(self, addr):
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 70:
script = '76a9' # op_dup, op_hash_160
script += '14' # push 0x14 bytes
script += hash_160.encode('hex')
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 125:
script = 'a9' # op_hash_160
script += '14' # push 0x14 bytes
script += hash_160.encode('hex')
script += '87' # op_equal
else:
raise
return script
@classmethod
def serialize( klass, inputs, outputs, for_sig = None ):
push_script = lambda x: op_push(len(x)/2) + x
s = int_to_hex(1,4) # version
s += var_int( len(inputs) ) # number of inputs
for i in range(len(inputs)):
txin = inputs[i]
s += txin['prevout_hash'].decode('hex')[::-1].encode('hex') # prev hash
s += int_to_hex(txin['prevout_n'],4) # prev index
signatures = txin.get('signatures', {})
if for_sig is None and not signatures:
script = ''
elif for_sig is None:
pubkeys = txin['pubkeys']
sig_list = ''
for pubkey in pubkeys:
sig = signatures.get(pubkey)
if not sig:
continue
sig = sig + '01'
sig_list += push_script(sig)
if not txin.get('redeemScript'):
script = sig_list
script += push_script(pubkeys[0])
else:
script = '00' # op_0
script += sig_list
redeem_script = klass.multisig_script(pubkeys,2)
assert redeem_script == txin.get('redeemScript')
script += push_script(redeem_script)
elif for_sig==i:
if txin.get('redeemScript'):
script = txin['redeemScript'] # p2sh uses the inner script
else:
script = txin['scriptPubKey'] # scriptsig
else:
script = ''
s += var_int( len(script)/2 ) # script length
s += script
s += "ffffffff" # sequence
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
addr, amount = output
s += int_to_hex( amount, 8) # amount
script = klass.pay_script(addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(self.inputs, self.outputs, for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_signature(self, i, pubkey, sig):
txin = self.inputs[i]
signatures = txin.get("signatures",{})
signatures[pubkey] = sig
txin["signatures"] = signatures
self.inputs[i] = txin
print_error("adding signature for", pubkey)
self.raw = self.serialize( self.inputs, self.outputs )
def is_complete(self):
for i, txin in enumerate(self.inputs):
redeem_script = txin.get('redeemScript')
num, redeem_pubkeys = parse_redeemScript(redeem_script) if redeem_script else (1, [txin.get('redeemPubkey')])
signatures = txin.get("signatures",{})
if len(signatures) == num:
continue
else:
return False
return True
def sign(self, keypairs):
print_error("tx.sign(), keypairs:", keypairs)
for i, txin in enumerate(self.inputs):
# if the input is multisig, parse redeem script
redeem_script = txin.get('redeemScript')
num, redeem_pubkeys = parse_redeemScript(redeem_script) if redeem_script else (1, [txin.get('redeemPubkey')])
# add pubkeys
txin["pubkeys"] = redeem_pubkeys
# get list of already existing signatures
signatures = txin.get("signatures",{})
# continue if this txin is complete
if len(signatures) == num:
continue
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
for pubkey in redeem_pubkeys:
if pubkey in keypairs.keys():
# add signature
sec = keypairs[pubkey]
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
self.add_signature(i, pubkey, sig.encode('hex'))
print_error("is_complete", self.is_complete())
self.raw = self.serialize( self.inputs, self.outputs )
def deserialize(self):
vds = BCDataStream()
vds.write(self.raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = []
for i in xrange(n_vin):
d['inputs'].append(self.parse_input(vds))
n_vout = vds.read_compact_size()
d['outputs'] = []
for i in xrange(n_vout):
d['outputs'].append(self.parse_output(vds, i))
d['lockTime'] = vds.read_uint32()
self.d = d
return self.d
def parse_input(self, vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
if scriptSig:
pubkeys, signatures, address = get_address_from_input_script(scriptSig)
else:
pubkeys = []
signatures = {}
address = None
d['address'] = address
d['pubkeys'] = pubkeys
d['signatures'] = signatures
return d
def parse_output(self, vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
is_pubkey, address = get_address_from_output_script(scriptPubKey)
d['is_pubkey'] = is_pubkey
d['address'] = address
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def add_extra_addresses(self, txlist):
for i in self.inputs:
if i.get("address") == "(pubkey)":
prev_tx = txlist.get(i.get('prevout_hash'))
if prev_tx:
address, value = prev_tx.outputs[i.get('prevout_n')]
print_error("found pay-to-pubkey address:", address)
i["address"] = address
def has_address(self, addr):
found = False
for txin in self.inputs:
if addr == txin.get('address'):
found = True
break
for txout in self.outputs:
if addr == txout[0]:
found = True
break
return found
def get_value(self, addresses, prevout_values):
# return the balance for that tx
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in self.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
key = item['prevout_hash'] + ':%d'%item['prevout_n']
value = prevout_values.get( key )
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send: is_partial = False
for item in self.outputs:
addr, value = item
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def get_input_info(self):
keys = ['prevout_hash', 'prevout_n', 'address', 'KeyID', 'scriptPubKey', 'redeemScript', 'redeemPubkey', 'pubkeys', 'signatures', 'is_coinbase']
info = []
for i in self.inputs:
item = {}
for k in keys:
v = i.get(k)
if v is not None:
item[k] = v
info.append(item)
return info
def as_dict(self):
import json
out = {
"hex":self.raw,
"complete":self.is_complete()
}
if not self.is_complete():
input_info = self.get_input_info()
out['input_info'] = json.dumps(input_info).replace(' ','')
return out
def required_fee(self, verifier):
# see https://en.bitcoin.it/wiki/Transaction_fees
threshold = 57600000*4
size = len(self.raw)/2
fee = 0
for o in self.outputs:
value = o[1]
if value < DUST_SOFT_LIMIT:
fee += MIN_RELAY_TX_FEE
sum = 0
for i in self.inputs:
age = verifier.get_confirmations(i["prevout_hash"])[0]
sum += i["value"] * age
priority = sum / size
print_error(priority, threshold)
if size < 5000 and fee == 0 and priority > threshold:
return 0
fee += (1 + size / 1000) * MIN_RELAY_TX_FEE
print_error(fee)
return fee
def add_input_info(self, input_info):
for i, txin in enumerate(self.inputs):
item = input_info[i]
txin['scriptPubKey'] = item['scriptPubKey']
txin['redeemScript'] = item.get('redeemScript')
txin['redeemPubkey'] = item.get('redeemPubkey')
txin['KeyID'] = item.get('KeyID')
txin['signatures'] = item.get('signatures',{})
| ROIV/ViorCoin-ElectrumWallet | lib/transaction.py | Python | gpl-3.0 | 27,739 |
# coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import io
import ctypes
import random
import re
import socket
import stat
import tempfile
import time
import traceback
import urllib
import urllib2
import hashlib
import httplib
import urlparse
import uuid
import base64
import zipfile
import datetime
import errno
import ast
import operator
import platform
import sickbeard
import adba
import requests
import certifi
from contextlib import closing
from socket import timeout as SocketTimeout
from sickbeard import logger, classes
from sickbeard.common import USER_AGENT
from sickbeard import db
from sickbeard.notifiers import synoindex_notifier
from sickrage.helper.common import http_code_description, media_extensions, pretty_file_size, subtitle_extensions, episode_num
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickrage.show.Show import Show
from itertools import izip, cycle
import shutil
import shutil_custom
import xml.etree.ElementTree as ET
import json
shutil.copyfile = shutil_custom.copyfile_custom
# pylint: disable=protected-access
# Access to a protected member of a client class
urllib._urlopener = classes.SickBeardURLopener()
def fixGlob(path):
path = re.sub(r'\[', '[[]', path)
return re.sub(r'(?<!\[)\]', '[]]', path)
def indentXML(elem, level=0):
"""
Does our pretty printing, makes Matt very happy
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXML(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def remove_non_release_groups(name):
"""
Remove non release groups from name
"""
if not name:
return name
# Do not remove all [....] suffixes, or it will break anime releases ## Need to verify this is true now
# Check your database for funky release_names and add them here, to improve failed handling, archiving, and history.
# select release_name from tv_episodes WHERE LENGTH(release_name);
# [eSc], [SSG], [GWC] are valid release groups for non-anime
removeWordsList = {
r'\[rartv\]$': 'searchre',
r'\[rarbg\]$': 'searchre',
r'\[eztv\]$': 'searchre',
r'\[ettv\]$': 'searchre',
r'\[cttv\]$': 'searchre',
r'\[vtv\]$': 'searchre',
r'\[EtHD\]$': 'searchre',
r'\[GloDLS\]$': 'searchre',
r'\[silv4\]$': 'searchre',
r'\[Seedbox\]$': 'searchre',
r'\[PublicHD\]$': 'searchre',
r'\[AndroidTwoU\]$': 'searchre',
r'\[brassetv]\]$': 'searchre',
r'\.\[BT\]$': 'searchre',
r' \[1044\]$': 'searchre',
r'\.RiPSaLoT$': 'searchre',
r'\.GiuseppeTnT$': 'searchre',
r'\.Renc$': 'searchre',
r'\.gz$': 'searchre',
r'(?<![57])\.1$': 'searchre',
r'-NZBGEEK$': 'searchre',
r'-Siklopentan$': 'searchre',
r'-Chamele0n$': 'searchre',
r'-Obfuscated$': 'searchre',
r'-\[SpastikusTV\]$': 'searchre',
r'-RP$': 'searchre',
r'-20-40$': 'searchre',
r'\.\[www\.usabit\.com\]$': 'searchre',
r'^\[www\.Cpasbien\.pe\] ': 'searchre',
r'^\[www\.Cpasbien\.com\] ': 'searchre',
r'^\[ www\.Cpasbien\.pw \] ': 'searchre',
r'^\.www\.Cpasbien\.pw': 'searchre',
r'^\[www\.newpct1\.com\]': 'searchre',
r'^\[ www\.Cpasbien\.com \] ': 'searchre',
r'- \{ www\.SceneTime\.com \}$': 'searchre',
r'^\{ www\.SceneTime\.com \} - ': 'searchre',
r'^\]\.\[www\.tensiontorrent.com\] - ': 'searchre',
r'^\]\.\[ www\.tensiontorrent.com \] - ': 'searchre',
r'- \[ www\.torrentday\.com \]$': 'searchre',
r'^\[ www\.TorrentDay\.com \] - ': 'searchre',
r'\[NO-RAR\] - \[ www\.torrentday\.com \]$': 'searchre',
}
_name = name
for remove_string, remove_type in removeWordsList.iteritems():
if remove_type == 'search':
_name = _name.replace(remove_string, '')
elif remove_type == 'searchre':
_name = re.sub(r'(?i)' + remove_string, '', _name)
return _name
def isMediaFile(filename):
"""
Check if named file may contain media
:param filename: Filename to check
:return: True if this is a known media file, False if not
"""
# ignore samples
try:
if re.search(r'(^|[\W_])(?<!shomin.)(sample\d*)[\W_]', filename, re.I):
return False
# ignore RARBG release intro
if re.search(r'^RARBG\.\w+\.(mp4|avi|txt)$', filename, re.I):
return False
# ignore MAC OS's retarded "resource fork" files
if filename.startswith('._'):
return False
sepFile = filename.rpartition(".")
if re.search('extras?$', sepFile[0], re.I):
return False
if sepFile[2].lower() in media_extensions:
return True
else:
return False
except TypeError as error: # Not a string
logger.log('Invalid filename. Filename must be a string. %s' % error, logger.DEBUG) # pylint: disable=no-member
return False
def isRarFile(filename):
"""
Check if file is a RAR file, or part of a RAR set
:param filename: Filename to check
:return: True if this is RAR/Part file, False if not
"""
archive_regex = r'(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
if re.search(archive_regex, filename):
return True
return False
def isBeingWritten(filepath):
"""
Check if file has been written in last 60 seconds
:param filepath: Filename to check
:return: True if file has been written recently, False if none
"""
# Return True if file was modified within 60 seconds. it might still be being written to.
ctime = max(ek(os.path.getctime, filepath), ek(os.path.getmtime, filepath))
if ctime > time.time() - 60:
return True
return False
def remove_file_failed(failed_file):
"""
Remove file from filesystem
:param file: File to remove
"""
try:
ek(os.remove, failed_file)
except Exception:
pass
def makeDir(path):
"""
Make a directory on the filesystem
:param path: directory to make
:return: True if success, False if failure
"""
if not ek(os.path.isdir, path):
try:
ek(os.makedirs, path)
# do the library update for synoindex
synoindex_notifier.addFolder(path)
except OSError:
return False
return True
def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
"""
Contacts indexer to check for information on shows by showid
:param regShowName: Name of show
:param indexer: Which indexer to use
:param indexer_id: Which indexer ID to look for
:param ui: Custom UI for indexer use
:return:
"""
showNames = [re.sub('[. -]', ' ', regShowName)]
# Query Indexers for each search term and build the list of results
for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
# Query Indexers for each search term and build the list of results
lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
if ui is not None:
lINDEXER_API_PARMS['custom_ui'] = ui
t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
for name in showNames:
logger.log(u"Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
try:
search = t[indexer_id] if indexer_id else t[name]
except Exception:
continue
try:
seriesname = search[0]['seriesname']
except Exception:
seriesname = None
try:
series_id = search[0]['id']
except Exception:
series_id = None
if not (seriesname and series_id):
continue
ShowObj = Show.find(sickbeard.showList, int(series_id))
# Check if we can find the show in our list (if not, it's not the right show)
if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
return seriesname, i, int(series_id)
elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
return seriesname, i, int(indexer_id)
if indexer:
break
return None, None, None
def listMediaFiles(path):
"""
Get a list of files possibly containing media in a path
:param path: Path to check for files
:return: list of files
"""
if not dir or not ek(os.path.isdir, path):
return []
files = []
for curFile in ek(os.listdir, path):
fullCurFile = ek(os.path.join, path, curFile)
# if it's a folder do it recursively
if ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
files += listMediaFiles(fullCurFile)
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
def copyFile(srcFile, destFile):
"""
Copy a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
ek(shutil.copyfile, srcFile, destFile)
try:
ek(shutil.copymode, srcFile, destFile)
except OSError:
pass
def moveFile(srcFile, destFile):
"""
Move a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
except OSError:
copyFile(srcFile, destFile)
ek(os.unlink, srcFile)
def link(src, dst):
"""
Create a file link from source to destination.
TODO: Make this unicode proof
:param src: Source file
:param dst: Destination file
"""
if os.name == 'nt':
if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0:
raise ctypes.WinError()
else:
ek(os.link, src, dst)
def hardlinkFile(srcFile, destFile):
"""
Create a hard-link (inside filesystem link) between source and destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(link, srcFile, destFile)
fixSetGroupID(destFile)
except Exception as e:
logger.log(u"Failed to create hardlink of %s at %s. Error: %r. Copying instead"
% (srcFile, destFile, ex(e)), logger.WARNING)
copyFile(srcFile, destFile)
def symlink(src, dst):
"""
Create a soft/symlink between source and destination
:param src: Source file
:param dst: Destination file
"""
if os.name == 'nt':
if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if ek(os.path.isdir, src) else 0) in [0, 1280]:
raise ctypes.WinError()
else:
ek(os.symlink, src, dst)
def moveAndSymlinkFile(srcFile, destFile):
"""
Move a file from source to destination, then create a symlink back from destination from source. If this fails, copy
the file from source to destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
ek(symlink, destFile, srcFile)
except Exception as e:
logger.log(u"Failed to create symlink of %s at %s. Error: %r. Copying instead"
% (srcFile, destFile, ex(e)), logger.WARNING)
copyFile(srcFile, destFile)
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log(u"Checking if the path %s already exists" % path, logger.DEBUG)
if not ek(os.path.isdir, path):
# Windows, create all missing folders
if os.name == 'nt' or os.name == 'ce':
try:
logger.log(u"Folder %s didn't exist, creating it" % path, logger.DEBUG)
ek(os.makedirs, path)
except (OSError, IOError) as e:
logger.log(u"Failed creating %s : %r" % (path, ex(e)), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep
# if it exists then just keep walking down the line
if ek(os.path.isdir, sofar):
continue
try:
logger.log(u"Folder %s didn't exist, creating it" % sofar, logger.DEBUG)
ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek(os.path.normpath, sofar))
# do the library update for synoindex
synoindex_notifier.addFolder(sofar)
except (OSError, IOError) as e:
logger.log(u"Failed creating %s : %r" % (sofar, ex(e)), logger.ERROR)
return False
return True
def rename_ep_file(cur_path, new_path, old_path_length=0):
"""
Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
left that are now empty.
:param cur_path: The absolute path to the file you want to move/rename
:param new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
:param old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
"""
# new_dest_dir, new_dest_name = ek(os.path.split, new_path) # @UnusedVariable
if old_path_length == 0 or old_path_length > len(cur_path):
# approach from the right
cur_file_name, cur_file_ext = ek(os.path.splitext, cur_path) # @UnusedVariable
else:
# approach from the left
cur_file_ext = cur_path[old_path_length:]
cur_file_name = cur_path[:old_path_length]
if cur_file_ext[1:] in subtitle_extensions:
# Extract subtitle language from filename
sublang = ek(os.path.splitext, cur_file_name)[1][1:]
# Check if the language extracted from filename is a valid language
if sublang in sickbeard.subtitles.subtitle_code_filter():
cur_file_ext = '.' + sublang + cur_file_ext
# put the extension on the incoming file
new_path += cur_file_ext
make_dirs(ek(os.path.dirname, new_path))
# move the file
try:
logger.log(u"Renaming file from %s to %s" % (cur_path, new_path))
ek(shutil.move, cur_path, new_path)
except (OSError, IOError) as e:
logger.log(u"Failed renaming %s to %s : %r" % (cur_path, new_path, ex(e)), logger.ERROR)
return False
# clean up any old folders that are empty
delete_empty_folders(ek(os.path.dirname, cur_path))
return True
def delete_empty_folders(check_empty_dir, keep_dir=None):
"""
Walks backwards up the path and deletes any empty folders found.
:param check_empty_dir: The path to clean (absolute path to a folder)
:param keep_dir: Clean until this path is reached
"""
# treat check_empty_dir as empty when it only contains these items
ignore_items = []
logger.log(u"Trying to clean any empty folders under " + check_empty_dir)
# as long as the folder exists and doesn't contain any files, delete it
while ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
check_files = ek(os.listdir, check_empty_dir)
if not check_files or (len(check_files) <= len(ignore_items) and all(
[check_file in ignore_items for check_file in check_files])):
# directory is empty or contains only ignore_items
try:
logger.log(u"Deleting empty folder: " + check_empty_dir)
# need shutil.rmtree when ignore_items is really implemented
ek(os.rmdir, check_empty_dir)
# do the library update for synoindex
synoindex_notifier.deleteFolder(check_empty_dir)
except OSError as e:
logger.log(u"Unable to delete %s. Error: %r" % (check_empty_dir, repr(e)), logger.WARNING)
break
check_empty_dir = ek(os.path.dirname, check_empty_dir)
else:
break
def fileBitFilter(mode):
"""
Strip special filesystem bits from file
:param mode: mode to check and strip
:return: required mode for media file
"""
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def chmodAsParent(childPath):
"""
Retain permissions of parent for childs
(Does not work for Windows hosts)
:param childPath: Child Path to change permissions to sync from parent
"""
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek(os.path.dirname, childPath)
if not parentPath:
logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
return
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
parentPathStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
childPathStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
if ek(os.path.isfile, childPath):
childMode = fileBitFilter(parentMode)
else:
childMode = parentMode
if childPath_mode == childMode:
return
childPath_owner = childPathStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
return
try:
ek(os.chmod, childPath, childMode)
logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode),
logger.DEBUG)
except OSError:
logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.DEBUG)
def fixSetGroupID(childPath):
"""
Inherid SGID from parent
(does not work on Windows hosts)
:param childPath: Path to inherit SGID permissions from parent
"""
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek(os.path.dirname, childPath)
parentStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
if parentMode & stat.S_ISGID:
parentGID = parentStat[stat.ST_GID]
childStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childGID = childStat[stat.ST_GID]
if childGID == parentGID:
return
childPath_owner = childStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
logger.DEBUG)
return
try:
ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX
logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % childPath, logger.DEBUG)
except OSError:
logger.log(
u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (
childPath, parentGID), logger.ERROR)
def is_anime_in_show_list():
"""
Check if any shows in list contain anime
:return: True if global showlist contains Anime, False if not
"""
for show in sickbeard.showList:
if show.is_anime:
return True
return False
def update_anime_support():
"""Check if we need to support anime, and if we do, enable the feature"""
sickbeard.ANIMESUPPORT = is_anime_in_show_list()
def get_absolute_number_from_season_and_episode(show, season, episode):
"""
Find the absolute number for a show episode
:param show: Show object
:param season: Season number
:param episode: Episode number
:return: The absolute number
"""
absolute_number = None
if season and episode:
main_db_con = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
sql_results = main_db_con.select(sql, [show.indexerid, season, episode])
if len(sql_results) == 1:
absolute_number = int(sql_results[0]["absolute_number"])
logger.log(u"Found absolute number {absolute} for show {show} {ep}".format
(absolute=absolute_number, show=show.name,
ep=episode_num(season, episode)), logger.DEBUG)
else:
logger.log(u"No entries for absolute number for show {show} {ep}".format
(show=show.name, ep=episode_num(season, episode)), logger.DEBUG)
return absolute_number
def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
episodes = []
season = None
if len(absolute_numbers):
if not show and indexer_id:
show = Show.find(sickbeard.showList, indexer_id)
for absolute_number in absolute_numbers if show else []:
ep = show.getEpisode(None, None, absolute_number=absolute_number)
if ep:
episodes.append(ep.episode)
season = ep.season # this will always take the last found season so eps that cross the season border are not handeled well
return season, episodes
def sanitizeSceneName(name, anime=False):
"""
Takes a show name and returns the "scenified" version of it.
:param anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
:return: A string containing the scene version of the show name given.
"""
if not name:
return ''
bad_chars = u',:()!?\u2019'
if not anime:
bad_chars += u"'"
# strip out any bad chars
for x in bad_chars:
name = name.replace(x, "")
# tidy up stuff that doesn't belong in scene names
name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.')
name = re.sub(r"\.\.*", ".", name)
if name.endswith('.'):
name = name[:-1]
return name
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {}'.format(node))
return _eval(node.body)
def create_https_certificates(ssl_cert, ssl_key):
"""
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
:param ssl_cert: Path of SSL certificate file to write
:param ssl_key: Path of SSL keyfile to write
:return: True on success, False on failure
"""
# assert isinstance(ssl_key, unicode)
# assert isinstance(ssl_cert, unicode)
try:
from OpenSSL import crypto # @UnresolvedImport
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
serial # @UnresolvedImport
except Exception:
logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING)
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'SickRage'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
# pylint: disable=no-member
# Module has no member
io.open(ssl_key, 'wb').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
io.open(ssl_cert, 'wb').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except Exception:
logger.log(u"Error creating SSL key and certificate", logger.ERROR)
return False
return True
def backupVersionedFile(old_file, version):
"""
Back up an old version of a file
:param old_file: Original file, to take a backup from
:param version: Version of file to store in backup
:return: True if success, False if failure
"""
numTries = 0
new_file = old_file + '.' + 'v' + str(version)
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, old_file):
logger.log(u"Not creating backup, %s doesn't exist" % old_file, logger.DEBUG)
break
try:
logger.log(u"Trying to back up %s to %s" % (old_file, new_file), logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log(u"Backup done", logger.DEBUG)
break
except Exception as e:
logger.log(u"Error while trying to back up %s to %s : %r" % (old_file, new_file, ex(e)), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log(u"Trying again.", logger.DEBUG)
if numTries >= 10:
logger.log(u"Unable to back up %s to %s please do it manually." % (old_file, new_file), logger.ERROR)
return False
return True
def restoreVersionedFile(backup_file, version):
"""
Restore a file version to original state
:param backup_file: File to restore
:param version: Version of file to restore
:return: True on success, False on failure
"""
numTries = 0
new_file, _ = ek(os.path.splitext, backup_file)
restore_file = new_file + '.' + 'v' + str(version)
if not ek(os.path.isfile, new_file):
logger.log(u"Not restoring, %s doesn't exist" % new_file, logger.DEBUG)
return False
try:
logger.log(u"Trying to backup %s to %s.r%s before restoring backup"
% (new_file, new_file, version), logger.DEBUG)
shutil.move(new_file, new_file + '.' + 'r' + str(version))
except Exception as e:
logger.log(u"Error while trying to backup DB file %s before proceeding with restore: %r"
% (restore_file, ex(e)), logger.WARNING)
return False
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, restore_file):
logger.log(u"Not restoring, %s doesn't exist" % restore_file, logger.DEBUG)
break
try:
logger.log(u"Trying to restore file %s to %s" % (restore_file, new_file), logger.DEBUG)
shutil.copy(restore_file, new_file)
logger.log(u"Restore done", logger.DEBUG)
break
except Exception as e:
logger.log(u"Error while trying to restore file %s. Error: %r" % (restore_file, ex(e)), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log(u"Trying again. Attempt #: %s" % numTries, logger.DEBUG)
if numTries >= 10:
logger.log(u"Unable to restore file %s to %s" % (restore_file, new_file), logger.WARNING)
return False
return True
# generates a md5 hash of a file
def md5_for_file(filename, block_size=2 ** 16):
"""
Generate an md5 hash for a file
:param filename: File to generate md5 hash for
:param block_size: Block size to use (defaults to 2^16)
:return MD5 hexdigest on success, or None on failure
"""
# assert isinstance(filename, unicode)
try:
with io.open(filename, 'rb') as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5.hexdigest()
except Exception:
return None
def get_lan_ip():
"""Returns IP of system"""
try:
return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
except Exception:
return socket.gethostname()
def check_url(url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
# see also http://stackoverflow.com/questions/2924422
# http://stackoverflow.com/questions/1140661
good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]
try:
conn = httplib.HTTPConnection(host)
conn.request('HEAD', path)
return conn.getresponse().status in good_codes
except StandardError:
return None
def anon_url(*url):
"""
Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
"""
return '' if None in url else '%s%s' % (sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
"""
Encryption
==========
By Pedro Jose Pereira Vieito <[email protected]> (@pvieito)
* If encryption_version==0 then return data without encryption
* The keys should be unique for each device
To add a new encryption_version:
1) Code your new encryption_version
2) Update the last encryption_version available in webserve.py
3) Remember to maintain old encryption versions and key generators for retrocompatibility
"""
# Key Generators
unique_key1 = hex(uuid.getnode() ** 2) # Used in encryption v1
# Encryption Functions
def encrypt(data, encryption_version=0, _decrypt=False):
# Version 1: Simple XOR encryption (this is not very secure, but works)
if encryption_version == 1:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
# Version 2: Simple XOR encryption (this is not very secure, but works)
elif encryption_version == 2:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(sickbeard.ENCRYPTION_SECRET)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(sickbeard.ENCRYPTION_SECRET)))).strip()
# Version 0: Plain text
else:
return data
def decrypt(data, encryption_version=0):
return encrypt(data, encryption_version, _decrypt=True)
def full_sanitizeSceneName(name):
return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().lstrip()
def _check_against_names(nameInQuestion, show, season=-1):
showNames = []
if season in [-1, 1]:
showNames = [show.name]
showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
for showName in showNames:
nameFromList = full_sanitizeSceneName(showName)
if nameFromList == nameInQuestion:
return True
return False
def get_show(name, tryIndexers=False):
if not sickbeard.showList:
return
showObj = None
fromCache = False
if not name:
return showObj
try:
# check cache for show
cache = sickbeard.name_cache.retrieveNameFromCache(name)
if cache:
fromCache = True
showObj = Show.find(sickbeard.showList, int(cache))
# try indexers
if not showObj and tryIndexers:
showObj = Show.find(
sickbeard.showList, searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
# try scene exceptions
if not showObj:
ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
if ShowID:
showObj = Show.find(sickbeard.showList, int(ShowID))
# add show to cache
if showObj and not fromCache:
sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
except Exception as e:
logger.log(u"Error when attempting to find show: %s in SickRage. Error: %r " % (name, repr(e)), logger.DEBUG)
return showObj
def is_hidden_folder(folder):
"""
Returns True if folder is hidden.
On Linux based systems hidden folders start with . (dot)
:param folder: Full path of folder to check
"""
def is_hidden(filepath):
name = ek(os.path.basename, ek(os.path.abspath, filepath))
return name.startswith('.') or has_hidden_attribute(filepath)
def has_hidden_attribute(filepath):
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
if ek(os.path.isdir, folder):
if is_hidden(folder):
return True
return False
def real_path(path):
"""
Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
"""
return ek(os.path.normpath, ek(os.path.normcase, ek(os.path.realpath, path)))
def validateShow(show, season=None, episode=None):
indexer_lang = show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
if show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
if season is None and episode is None:
return t
return t[show.indexerid][season][episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
pass
def set_up_anidb_connection():
"""Connect to anidb"""
if not sickbeard.USE_ANIDB:
logger.log(u"Usage of anidb disabled. Skiping", logger.DEBUG)
return False
if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
logger.log(u"anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
return False
if not sickbeard.ADBA_CONNECTION:
def anidb_logger(msg):
return logger.log(u"anidb: %s " % msg, logger.DEBUG)
try:
sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
except Exception as e:
logger.log(u"anidb exception msg: %r " % repr(e), logger.WARNING)
return False
try:
if not sickbeard.ADBA_CONNECTION.authed():
sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
else:
return True
except Exception as e:
logger.log(u"anidb exception msg: %r " % repr(e), logger.WARNING)
return False
return sickbeard.ADBA_CONNECTION.authed()
def makeZip(fileList, archive):
"""
Create a ZIP of files
:param fileList: A list of file names - full path each name
:param archive: File name for the archive with a full path
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f)
a.close()
return True
except Exception as e:
logger.log(u"Zip creation error: %r " % repr(e), logger.ERROR)
return False
def extractZip(archive, targetDir):
"""
Unzip a file to a directory
:param fileList: A list of file names - full path each name
:param archive: The file name for the archive with a full path
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
filename = ek(os.path.basename, member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(ek(os.path.join, targetDir, filename), "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
zip_file.close()
return True
except Exception as e:
logger.log(u"Zip extraction error: %r " % repr(e), logger.ERROR)
return False
def backupConfigZip(fileList, archive, arcname=None):
"""
Store the config file as a ZIP
:param fileList: List of files to store
:param archive: ZIP file name
:param arcname: Archive path
:return: True on success, False on failure
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f, ek(os.path.relpath, f, arcname))
a.close()
return True
except Exception as e:
logger.log(u"Zip creation error: %r " % repr(e), logger.ERROR)
return False
def restoreConfigZip(archive, targetDir):
"""
Restores a Config ZIP file back in place
:param archive: ZIP filename
:param targetDir: Directory to restore to
:return: True on success, False on failure
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
else:
def path_leaf(path):
head, tail = ek(os.path.split, path)
return tail or ek(os.path.basename, head)
bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
shutil.move(targetDir, ek(os.path.join, ek(os.path.dirname, targetDir), bakFilename))
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
zip_file.extract(member, targetDir)
zip_file.close()
return True
except Exception as e:
logger.log(u"Zip extraction error: %r" % ex(e), logger.ERROR)
shutil.rmtree(targetDir)
return False
def mapIndexersToShow(showObj):
mapped = {}
# init mapped indexers object
for indexer in sickbeard.indexerApi().indexers:
mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
main_db_con = db.DBConnection()
sql_results = main_db_con.select(
"SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
[showObj.indexerid, showObj.indexer])
# for each mapped entry
for curResult in sql_results:
nlist = [i for i in curResult if i is not None]
# Check if its mapped with both tvdb and tvrage.
if len(nlist) >= 4:
logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id'])
break
else:
sql_l = []
for indexer in sickbeard.indexerApi().indexers:
if indexer == showObj.indexer:
mapped[indexer] = showObj.indexerid
continue
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
try:
mapped_show = t[showObj.name]
except Exception:
logger.log(u"Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
continue
if mapped_show and len(mapped_show) == 1:
logger.log(u"Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name, logger.DEBUG)
mapped[indexer] = int(mapped_show[0]['id'])
logger.log(u"Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
sql_l.append([
"INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
[showObj.indexerid, showObj.indexer, int(mapped_show[0]['id']), indexer]])
if len(sql_l) > 0:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return mapped
def touchFile(fname, atime=None):
"""
Touch a file (change modification date)
:param fname: Filename to touch
:param atime: Specific access time (defaults to None)
:return: True on success, False on failure
"""
if atime is not None:
try:
with file(fname, 'a'):
os.utime(fname, (atime, atime))
return True
except Exception as e:
if e.errno == errno.ENOSYS:
logger.log(u"File air date stamping not available on your OS. Please disable setting", logger.DEBUG)
elif e.errno == errno.EACCES:
logger.log(u"File air date stamping failed(Permission denied). Check permissions for file: %s" % fname, logger.ERROR)
else:
logger.log(u"File air date stamping failed. The error is: %r" % ex(e), logger.ERROR)
return False
def _getTempDir():
"""
Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
import getpass
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return ek(os.path.join, tempfile.gettempdir(), "sickrage")
return ek(os.path.join, tempfile.gettempdir(), "sickrage-%s" % uid)
def _setUpSession(session, headers):
"""
Returns a session initialized with default cache and parameter settings
:param session: session object to (re)use
:param headers: Headers to pass to session
:return: session object
"""
# request session
# Lets try without caching sessions to disk for awhile
# cache_dir = sickbeard.CACHE_DIR or _getTempDir()
# session = CacheControl(sess=session, cache=caches.FileCache(ek(os.path.join, cache_dir, 'sessions'), use_dir_lock=True), cache_etags=False)
# request session clear residual referer
# pylint: disable=superfluous-parens
# These extra parens are necessary!
if 'Referer' in session.headers and 'Referer' not in (headers or {}):
session.headers.pop('Referer')
# request session headers
session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
if headers:
session.headers.update(headers)
# request session ssl verify
session.verify = certifi.old_where() if sickbeard.SSL_VERIFY else False
# request session proxies
if 'Referer' not in session.headers and sickbeard.PROXY_SETTING:
logger.log(u"Using global proxy: " + sickbeard.PROXY_SETTING, logger.DEBUG)
scheme, address = urllib2.splittype(sickbeard.PROXY_SETTING)
address = sickbeard.PROXY_SETTING if scheme else 'http://' + sickbeard.PROXY_SETTING
session.proxies = {
"http": address,
"https": address,
}
session.headers.update({'Referer': address})
if 'Content-Type' in session.headers:
session.headers.pop('Content-Type')
return session
def getURL(url, post_data=None, params=None, headers=None, timeout=30, session=None, json=False, need_bytes=False):
"""
Returns a byte-string retrieved from the url provider.
"""
session = _setUpSession(session, headers)
if params and isinstance(params, (list, dict)):
for param in params:
if isinstance(params[param], unicode):
params[param] = params[param].encode('utf-8')
session.params = params
try:
# decide if we get or post data to server
if post_data:
if isinstance(post_data, (list, dict)):
for param in post_data:
if isinstance(post_data[param], unicode):
post_data[param] = post_data[param].encode('utf-8')
session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
resp = session.post(url, data=post_data, timeout=timeout, allow_redirects=True, verify=session.verify)
else:
resp = session.get(url, timeout=timeout, allow_redirects=True, verify=session.verify)
if not resp.ok:
logger.log(u"Requested getURL %s returned status code is %s: %s"
% (url, resp.status_code, http_code_description(resp.status_code)), logger.DEBUG)
return None
except (SocketTimeout, TypeError) as e:
logger.log(u"Connection timed out (sockets) accessing getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except (requests.exceptions.HTTPError, requests.exceptions.TooManyRedirects) as e:
logger.log(u"HTTP error in getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.ConnectionError as e:
logger.log(u"Connection error to getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.Timeout as e:
logger.log(u"Connection timed out accessing getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.ContentDecodingError:
logger.log(u"Content-Encoding was gzip, but content was not compressed. getURL: %s" % url, logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
return None
except Exception as e:
logger.log(u"Unknown exception in getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
logger.log(traceback.format_exc(), logger.WARNING)
return None
return (resp.text, resp.content)[need_bytes] if not json else resp.json()
def download_file(url, filename, session=None, headers=None):
"""
Downloads a file specified
:param url: Source URL
:param filename: Target file on filesystem
:param session: request session to use
:param headers: override existing headers in request session
:return: True on success, False on failure
"""
session = _setUpSession(session, headers)
session.stream = True
try:
with closing(session.get(url, allow_redirects=True, verify=session.verify)) as resp:
if not resp.ok:
logger.log(u"Requested download url %s returned status code is %s: %s"
% (url, resp.status_code, http_code_description(resp.status_code)), logger.DEBUG)
return False
try:
with io.open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
chmodAsParent(filename)
except Exception:
logger.log(u"Problem setting permissions or writing file to: %s" % filename, logger.WARNING)
except (SocketTimeout, TypeError) as e:
remove_file_failed(filename)
logger.log(u"Connection timed out (sockets) while loading download URL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except (requests.exceptions.HTTPError, requests.exceptions.TooManyRedirects) as e:
remove_file_failed(filename)
logger.log(u"HTTP error %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except requests.exceptions.ConnectionError as e:
remove_file_failed(filename)
logger.log(u"Connection error %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except requests.exceptions.Timeout as e:
remove_file_failed(filename)
logger.log(u"Connection timed out %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except EnvironmentError as e:
remove_file_failed(filename)
logger.log(u"Unable to save the file: %r " % ex(e), logger.WARNING)
return False
except Exception:
remove_file_failed(filename)
logger.log(u"Unknown exception while loading download URL %s : %r" % (url, traceback.format_exc()), logger.WARNING)
return False
return True
def get_size(start_path='.'):
"""
Find the total dir and filesize of a path
:param start_path: Path to recursively count size
:return: total filesize
"""
if not ek(os.path.isdir, start_path):
return -1
total_size = 0
for dirpath, _, filenames in ek(os.walk, start_path):
for f in filenames:
fp = ek(os.path.join, dirpath, f)
try:
total_size += ek(os.path.getsize, fp)
except OSError as e:
logger.log(u"Unable to get size for file %s Error: %r" % (fp, ex(e)), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
return total_size
def generateApiKey():
""" Return a new randomized API_KEY"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
def remove_article(text=''):
"""Remove the english articles from a text string"""
return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
def generateCookieSecret():
"""Generate a new cookie secret"""
return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
def verify_freespace(src, dest, oldfile=None):
"""
Checks if the target system has enough free space to copy or move a file.
:param src: Source filename
:param dest: Destination path
:param oldfile: File to be replaced (defaults to None)
:return: True if there is enough space for the file, False if there isn't. Also returns True if the OS doesn't support this option
"""
if not isinstance(oldfile, list):
oldfile = [oldfile]
logger.log(u"Trying to determine free space on destination drive", logger.DEBUG)
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = ek(os.statvfs, path)
free = st.f_bavail * st.f_frsize
return free
elif os.name == 'nt': # Windows
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
logger.log(u"Unable to determine free space, something went wrong", logger.WARNING)
raise ctypes.WinError()
return free.value
else:
logger.log(u"Unable to determine free space on your OS")
return True
if not ek(os.path.isfile, src):
logger.log(u"A path to a file is required for the source. " + src + " is not a file.", logger.WARNING)
return True
try:
diskfree = disk_usage(dest)
except Exception:
logger.log(u"Unable to determine free space, so I will assume there is enough.", logger.WARNING)
return True
neededspace = ek(os.path.getsize, src)
if oldfile:
for f in oldfile:
if ek(os.path.isfile, f.location):
diskfree += ek(os.path.getsize, f.location)
if diskfree > neededspace:
return True
else:
logger.log(u"Not enough free space: Needed: %s bytes ( %s ), found: %s bytes ( %s )"
% (neededspace, pretty_file_size(neededspace), diskfree, pretty_file_size(diskfree)), logger.WARNING)
return False
# https://gist.github.com/thatalextaylor/7408395
def pretty_time_delta(seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
time_delta = sign_string
if days > 0:
time_delta += ' %dd' % days
if hours > 0:
time_delta += ' %dh' % hours
if minutes > 0:
time_delta += ' %dm' % minutes
if seconds > 0:
time_delta += ' %ds' % seconds
return time_delta
def isFileLocked(checkfile, writeLockCheck=False):
"""
Checks to see if a file is locked. Performs three checks
1. Checks if the file even exists
2. Attempts to open the file for reading. This will determine if the file has a write lock.
Write locks occur when the file is being edited or copied to, e.g. a file copy destination
3. If the readLockCheck parameter is True, attempts to rename the file. If this fails the
file is open by some other process for reading. The file can be read, but not written to
or deleted.
:param file: the file being checked
:param writeLockCheck: when true will check if the file is locked for writing (prevents move operations)
"""
checkfile = ek(os.path.abspath, checkfile)
if not ek(os.path.exists, checkfile):
return True
try:
f = io.open(checkfile, 'rb')
f.close()
except IOError:
return True
if writeLockCheck:
lockFile = checkfile + ".lckchk"
if ek(os.path.exists, lockFile):
ek(os.remove, lockFile)
try:
ek(os.rename, checkfile, lockFile)
time.sleep(1)
ek(os.rename, lockFile, checkfile)
except (OSError, IOError):
return True
return False
def getDiskSpaceUsage(diskPath=None):
"""
returns the free space in human readable bytes for a given path or False if no path given
:param diskPath: the filesystem path being checked
"""
if diskPath and ek(os.path.exists, diskPath):
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(diskPath), None, None, ctypes.pointer(free_bytes))
return pretty_file_size(free_bytes.value)
else:
st = ek(os.statvfs, diskPath)
return pretty_file_size(st.f_bavail * st.f_frsize)
else:
return False
def getTVDBFromID(indexer_id, indexer):
session = requests.Session()
tvdb_id = ''
if indexer == 'IMDB':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s" % indexer_id
data = getURL(url, session=session, need_bytes=True)
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'ZAP2IT':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?zap2it=%s" % indexer_id
data = getURL(url, session=session, need_bytes=True)
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'TVMAZE':
url = "http://api.tvmaze.com/shows/%s" % indexer_id
data = getURL(url, session=session, json=True)
if data is None:
return tvdb_id
tvdb_id = data['externals']['thetvdb']
return tvdb_id
else:
return tvdb_id
def get_showname_from_indexer(indexer, indexer_id, lang='en'):
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
if lang:
lINDEXER_API_PARMS['language'] = lang
logger.log(u"" + str(sickbeard.indexerApi(indexer).name) + ": " + repr(lINDEXER_API_PARMS))
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
s = t[int(indexer_id)]
if hasattr(s,'data'):
return s.data.get('seriesname')
return None
def is_ip_private(ip):
priv_lo = re.compile(r"^127\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_24 = re.compile(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_20 = re.compile(r"^192\.168\.\d{1,3}.\d{1,3}$")
priv_16 = re.compile(r"^172.(1[6-9]|2[0-9]|3[0-1]).[0-9]{1,3}.[0-9]{1,3}$")
return priv_lo.match(ip) or priv_24.match(ip) or priv_20.match(ip) or priv_16.match(ip)
| p0psicles/SickRage | sickbeard/helpers.py | Python | gpl-3.0 | 59,494 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 The ProteinDF development team.
# see also AUTHORS and README if provided.
#
# This file is a part of the ProteinDF software package.
#
# The ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
from .superposer import Superposer
from .matrix import Matrix
from .atomgroup import AtomGroup
from .atom import Atom
from .functions import load_msgpack
from .position import Position
from .error import BrInputError
# from .xyz import Xyz
import os
import math
import re
import logging
logger = logging.getLogger(__name__)
class Modeling:
_ACE_ALA_NME_path_base = os.path.join(
os.environ.get('PDF_HOME', '.'),
'data',
"ACE_ALA_NME_{}.brd")
_ACE_ALA_NME_comformers = ["trans1", "trans2", "cis1", "cis2"]
def __init__(self):
self._ACE_ALA_NME = {}
for comformer in self._ACE_ALA_NME_comformers:
brd_path = self._ACE_ALA_NME_path_base.format(comformer)
# print(comformer, brd_path)
atomgroup = AtomGroup(load_msgpack(brd_path))
assert(atomgroup.get_number_of_all_atoms() > 0)
self._ACE_ALA_NME[comformer] = atomgroup
def _get_ACE_ALA_NME(self, comformer):
assert(comformer in self._ACE_ALA_NME_comformers)
return self._ACE_ALA_NME[comformer]
# -----------------------------------------------------------------
def get_ACE_simple(self, next_aa):
"""
隣のC-alphaの位置をメチル基にする。
"""
answer = AtomGroup()
CAs = next_aa.pickup_atoms('CA')
if len(CAs) > 0:
answer.set_atom('CA', CAs[0])
else:
raise BrInputError(next_aa,
'cannot found "CA" atom on building ACE.')
Cs = next_aa.pickup_atoms('C')
if len(Cs) > 0:
answer.set_atom('C', Cs[0])
else:
raise BrInputError(next_aa,
'cannot found "C" atom on building ACE.')
Os = next_aa.pickup_atoms('O')
if len(Os) > 0:
answer.set_atom('O', Os[0])
else:
raise BrInputError(next_aa,
'cannot found "O" atom on building ACE.')
answer |= self.add_methyl(answer['CA'], answer['C'])
answer.path = '/ACE'
return answer
def get_NME_simple(self, next_aa):
"""
隣のC-alphaの位置をメチル基にする。
"""
answer = AtomGroup()
CAs = next_aa.pickup_atoms('CA')
if len(CAs) > 0:
answer.set_atom('CA', CAs[0])
else:
raise BrInputError(next_aa,
'cannot found "CA" atom on building NME.')
Ns = next_aa.pickup_atoms('N')
if len(Ns) > 0:
answer.set_atom('N', Ns[0])
else:
raise BrInputError(next_aa,
'cannot found "N" atom on building NME.')
Hs = next_aa.pickup_atoms('H')
if len(Hs) > 0:
answer.set_atom('H', Hs[0])
else:
# for proline
CDs = next_aa.pickup_atoms('CD')
if len(CDs) > 0:
dummy_H = Atom(CDs[0])
dummy_H.symbol = 'H'
answer.set_atom('H', dummy_H)
else:
raise BrInputError(next_aa,
'cannot found "H" or "CD" atom(for proline) on building NME.')
answer |= self.add_methyl(answer['CA'], answer['N'])
answer.path = '/NME'
return answer
# -----------------------------------------------------------------
def get_ACE(self, res, next_aa=None):
"""
template (ACE-ALA-NME) format:
HH3[1-3]-CH3-C - N-CA(HA)-C- N-CH3-HH3[1-3]
|| | | || |
O H CB O H
"""
AAN = None
rmsd_min = 1000.0
for comformer in self._ACE_ALA_NME_comformers:
ref_AAN = self._get_ACE_ALA_NME(comformer)
(matched, rmsd) = self._match_ACE(ref_AAN, res, next_aa)
# print(comformer, rmsd)
if rmsd < rmsd_min:
rmsd_min = rmsd
AAN = matched
if rmsd_min > 1.0:
logger.warn("RMSD value is too large: {}".format(rmsd))
answer = AtomGroup(AAN['1'])
answer.path = '/ACE'
return answer
def _match_ACE(self, AAN, res, next_aa):
'''AAN (ACE-ALA-NME)
'''
assert(isinstance(AAN, AtomGroup))
assert(isinstance(res, AtomGroup))
(AAN_part, res_part) = self._match_residues(AAN['2'], res)
# for ACE
if next_aa is not None:
if next_aa.has_atom('N'):
AAN_part.set_atom('N2', AAN['3']['N'])
res_part.set_atom('N2', next_aa['N'])
if next_aa.has_atom('H'):
AAN_part.set_atom('NH2', AAN['3']['H'])
res_part.set_atom('NH2', next_aa['H'])
if next_aa.has_atom('CA'):
AAN_part.set_atom('CH3', AAN['3']['CH3'])
res_part.set_atom('CH3', next_aa['CA'])
sp = Superposer(AAN_part, res_part)
rmsd = sp.rmsd
matched_AAN = sp.superimpose(AAN)
return (matched_AAN, rmsd)
def get_NME(self, res, next_aa=None):
"""
template (ACE-ALA-NME) format:
HH3[1-3]-CH3-C - N-CA(HA)-C- N-CH3-HH3[1-3]
|| | | || |
O H CB O H
"""
AAN = None
rmsd_min = 1000.0
for comformer in self._ACE_ALA_NME_comformers:
ref_AAN = self._get_ACE_ALA_NME(comformer)
(matched, rmsd) = self._match_NME(ref_AAN, res, next_aa)
# print(comformer, rmsd)
if rmsd < rmsd_min:
rmsd_min = rmsd
AAN = matched
if rmsd_min > 1.0:
logger.warn("RMSD value is too large: {}".format(rmsd))
answer = AtomGroup(AAN['3'])
answer.path = '/NME'
return answer
def _match_NME(self, AAN, res, next_aa):
'''AAN (ACE-ALA-NME)
'''
assert(isinstance(AAN, AtomGroup))
assert(isinstance(res, AtomGroup))
(AAN_part, res_part) = self._match_residues(AAN['2'], res)
# for NME
if next_aa is not None:
if next_aa.has_atom('C'):
AAN_part.set_atom('C2', AAN['1']['C'])
res_part.set_atom('C2', next_aa['C'])
if next_aa.has_atom('O'):
AAN_part.set_atom('O2', AAN['1']['O'])
res_part.set_atom('O2', next_aa['O'])
if next_aa.has_atom('CA'):
AAN_part.set_atom('CH3', AAN['1']['CH3'])
res_part.set_atom('CH3', next_aa['CA'])
sp = Superposer(AAN_part, res_part)
rmsd = sp.rmsd
matched_AAN = sp.superimpose(AAN)
return (matched_AAN, rmsd)
def _match_residues(self, res1, res2, max_number_of_atoms=-1):
"""
2つのアミノ酸残基のN, H, CA, HA, C, Oの原子を突き合わせる。
アミノ酸残基がプロリンだった場合は、CDの炭素をHに命名する。
GLYはHA1, HA2とあるので突き合せない。
"""
atom_names = ['CA', 'O', 'C', 'N', 'CB', 'HA']
if max_number_of_atoms == -1:
max_number_of_atoms = len(atom_names)
ans_res1 = AtomGroup()
ans_res2 = AtomGroup()
for atom_name in atom_names:
pickup_atoms1 = res1.pickup_atoms(atom_name)
if len(pickup_atoms1) > 0:
pickup_atoms2 = res2.pickup_atoms(atom_name)
if len(pickup_atoms2) > 0:
ans_res1.set_atom(atom_name, pickup_atoms1[0])
ans_res2.set_atom(atom_name, pickup_atoms2[0])
if ans_res1.get_number_of_atoms() >= max_number_of_atoms:
break
# match amino-'H'
if ans_res1.get_number_of_atoms() < max_number_of_atoms:
res1_H = None
res2_H = None
if res1.has_atom('H'):
res1_H = res1['H']
elif res1.has_atom('CD'):
# for proline
res1_H = res1['CD']
if res2.has_atom('H'):
res2_H = res2['H']
elif res2.has_atom('CD'):
res2_H = res2['CD']
if ((res1_H is not None) and (res2_H is not None)):
ans_res1.set_atom('H', res1_H)
ans_res2.set_atom('H', res2_H)
return (ans_res1, ans_res2)
# -----------------------------------------------------------------
def add_methyl(self, C1, C2):
"""
-CH3の水素を付加
C1に水素を付加
"""
assert(isinstance(C1, Atom))
assert(isinstance(C2, Atom))
ethane = AtomGroup()
ethane.set_atom('C1', Atom(symbol='C', name='C1',
position=Position(0.00000, 0.00000, 0.00000)))
ethane.set_atom('H11', Atom(symbol='H', name='H11',
position=Position(-0.85617, -0.58901, -0.35051)))
ethane.set_atom('H12', Atom(symbol='H', name='H12',
position=Position(-0.08202, 1.03597, -0.35051)))
ethane.set_atom('H13', Atom(symbol='H', name='H13',
position=Position(0.93818, -0.44696, -0.35051)))
ethane.set_atom('C2', Atom(symbol='C', name='C2',
position=Position(0.00000, 0.00000, 1.47685)))
ethane.set_atom('H21', Atom(symbol='H', name='H21',
position=Position(-0.93818, 0.44696, 1.82736)))
ethane.set_atom('H22', Atom(symbol='H', name='H22',
position=Position(0.85617, 0.58901, 1.82736)))
ethane.set_atom('H23', Atom(symbol='H', name='H23',
position=Position(0.08202, -1.03597, 1.82736)))
inC21 = C2.xyz - C1.xyz
refC21 = ethane['C2'].xyz - ethane['C1'].xyz
shift = C1.xyz - ethane['C1'].xyz
rot = self.arbitary_rotate_matrix(inC21, refC21)
ethane.rotate(rot)
ethane.shift_by(shift)
assert(C1.xyz == ethane['C1'].xyz)
answer = AtomGroup()
answer.set_atom('H11', ethane['H11'])
answer.set_atom('H12', ethane['H12'])
answer.set_atom('H13', ethane['H13'])
return answer
# -----------------------------------------------------------------
def get_NH3(self, angle=0.5 * math.pi, length=1.0):
pi23 = math.pi * 2.0 / 3.0 # (pi * 2/3)
sin23 = math.sin(pi23)
cos23 = math.cos(pi23)
# pi43 = math.pi * 4.0 / 3.0 # (pi * 4/3)
# sin43 = math.sin(pi43)
# cos43 = math.cos(pi43)
sin_input = math.sin(angle)
cos_input = math.cos(angle)
# z軸まわりに120度回転
# z1_rot = Matrix(3, 3)
# z1_rot.set(0, 0, cos23)
# z1_rot.set(0, 1, -sin23)
# z1_rot.set(1, 0, sin23)
# z1_rot.set(1, 1, cos23)
# z1_rot.set(2, 2, 1.0)
# z軸まわりに240度回転
# z2_rot = Matrix(3, 3)
# z2_rot.set(0, 0, cos43)
# z2_rot.set(0, 1, -sin43)
# z2_rot.set(1, 0, sin43)
# z2_rot.set(1, 1, cos43)
# z2_rot.set(2, 2, 1.0)
# y軸まわりに回転
# y_rot = Matrix(3, 3)
# y_rot.set(0, 0, cos_input)
# y_rot.set(0, 2, -sin_input)
# y_rot.set(2, 0, sin_input)
# y_rot.set(2, 2, cos_input)
# y_rot.set(1, 1, 1.0)
# pos_H1 = Position(1.0, 0.0, 0.0)
# pos_H1.rotate(y_rot)
# pos_H1 *= length
# pos_H2 = Position(1.0, 0.0, 0.0)
# pos_H2.rotate(y_rot)
# pos_H2.rotate(z1_rot)
# pos_H2 *= length
# pos_H3 = Position(1.0, 0.0, 0.0)
# pos_H3.rotate(y_rot)
# pos_H3.rotate(z2_rot)
# pos_H3 *= length
# X-Z平面上、Y軸に対してangle度開く
xz_rot = Matrix(3, 3)
xz_rot.set(0, 0, cos_input)
xz_rot.set(0, 2, -sin_input)
xz_rot.set(2, 0, sin_input)
xz_rot.set(2, 2, cos_input)
xz_rot.set(1, 1, 1.0)
# X-Y平面上、Z軸に対して120度開く
xy_rot = Matrix(3, 3)
xy_rot.set(0, 0, cos23)
xy_rot.set(0, 1, -sin23)
xy_rot.set(1, 0, sin23)
xy_rot.set(1, 1, cos23)
xy_rot.set(2, 2, 1.0)
pos_H1 = Position(0.0, 0.0, 1.0)
pos_H1.rotate(xz_rot)
pos_H2 = Position(0.0, 0.0, 1.0)
pos_H2.rotate(xz_rot)
pos_H2.rotate(xy_rot)
pos_H3 = Position(0.0, 0.0, 1.0)
pos_H3.rotate(xz_rot)
pos_H3.rotate(xy_rot)
pos_H3.rotate(xy_rot)
pos_H1 *= length
pos_H2 *= length
pos_H3 *= length
NH3 = AtomGroup()
N = Atom(symbol='N',
position=Position(0.0, 0.0, 0.0))
H1 = Atom(symbol='H',
position=pos_H1)
H2 = Atom(symbol='H',
position=pos_H2)
H3 = Atom(symbol='H',
position=pos_H3)
# X1 = Atom(symbol = 'X',
# position = Position(1.0, 0.0, 0.0))
# X2 = Atom(symbol = 'X',
# position = Position(0.0, 1.0, 0.0))
# X3 = Atom(symbol = 'X',
# position = Position(0.0, 0.0, 1.0))
NH3.set_atom('N', N)
NH3.set_atom('H1', H1)
NH3.set_atom('H2', H2)
NH3.set_atom('H3', H3)
# NH3.set_atom('X1', X1)
# NH3.set_atom('X2', X2)
# NH3.set_atom('X3', X3)
return NH3
# -----------------------------------------------------------------
def select_residues(self, chain, from_resid, to_resid):
'''
連続したアミノ酸残基を返す
'''
answer = AtomGroup()
for resid, res in chain.groups():
resid = int(resid)
if from_resid <= resid <= to_resid:
answer |= res
return answer
# -----------------------------------------------------------------
def arbitary_rotate_matrix(self, in_a, in_b):
"""
ベクトルaをbへ合わせる回転行列(3x3)を返す
"""
assert(isinstance(in_a, Position))
assert(isinstance(in_b, Position))
a = Position(in_a)
b = Position(in_b)
a.norm()
b.norm()
cos_theta = a.dot(b)
sin_theta = math.sqrt(1 - cos_theta * cos_theta)
n = a.cross(b)
n.norm()
nx = n.x
ny = n.y
nz = n.z
rot = Matrix(3, 3)
rot.set(0, 0, nx * nx * (1.0 - cos_theta) + cos_theta)
rot.set(0, 1, nx * ny * (1.0 - cos_theta) + nz * sin_theta)
rot.set(0, 2, nx * nz * (1.0 - cos_theta) - ny * sin_theta)
rot.set(1, 0, nx * ny * (1.0 - cos_theta) - nz * sin_theta)
rot.set(1, 1, ny * ny * (1.0 - cos_theta) + cos_theta)
rot.set(1, 2, nx * nz * (1.0 - cos_theta) + nx * sin_theta)
rot.set(2, 0, nx * nz * (1.0 - cos_theta) + ny * sin_theta)
rot.set(2, 1, ny * nz * (1.0 - cos_theta) - nx * sin_theta)
rot.set(2, 2, nz * nz * (1.0 - cos_theta) + cos_theta)
return rot
# -----------------------------------------------------------------
def get_last_index(self, res):
answer = 0
re_obj = re.compile('([0-9]+)')
for key, atom in res.atoms():
m = re_obj.search(key)
if m is not None:
num = m.group(0)
num = int(num)
answer = max(num, answer)
return answer
# -----------------------------------------------------------------
def neutralize_Nterm(self, res):
answer = None
if res.name == "PRO":
answer = self._neutralize_Nterm_PRO(res)
else:
answer = self._neutralize_Nterm(res)
return answer
def _neutralize_Nterm(self, res):
"""
N末端側を中性化するためにCl-(AtomGroup)を返す
H1, N2, HXT(or H3)が指定されている必要があります。
"""
ag = AtomGroup()
ag.set_atom('N', res['N'])
ag.set_atom('H1', res['H1'])
ag.set_atom('H2', res['H2'])
if res.has_atom('HXT'):
ag.set_atom('H3', res['HXT'])
elif res.has_atom('H3'):
ag.set_atom('H3', res['H3'])
pos = self._get_neutralize_pos_NH3_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
answer.set_atom('Cl', Cl)
return answer
def _neutralize_Nterm_PRO(self, res):
"""in case of 'PRO', neutralize N-term
"""
ag = AtomGroup()
ag.set_atom('N', res['N'])
ag.set_atom('H2', res['H2'])
if res.has_atom('HXT'):
ag.set_atom('H1', res['HXT'])
elif res.has_atom('H3'):
ag.set_atom('H1', res['H3'])
pos = self._get_neutralize_pos_NH2_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
answer.set_atom('Cl', Cl)
return answer
def neutralize_Cterm(self, res):
"""
C末端側を中性化するためにNa+(AtomGroup)を返す
"""
ag = AtomGroup()
ag.set_atom('C', res['C'])
ag.set_atom('O1', res['O'])
ag.set_atom('O2', res['OXT'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
answer.set_atom('Na', Na)
return answer
# -----------------------------------------------------------------
def neutralize_GLU(self, res):
ag = AtomGroup()
ag.set_atom('C', res['CD'])
ag.set_atom('O1', res['OE1'])
ag.set_atom('O2', res['OE2'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Na'.format(key + 1), Na)
return answer
def neutralize_ASP(self, res):
ag = AtomGroup()
ag.set_atom('C', res['CG'])
ag.set_atom('O1', res['OD1'])
ag.set_atom('O2', res['OD2'])
pos = self._get_neutralize_pos_COO_type(ag)
answer = AtomGroup()
Na = Atom(symbol='Na',
name='Na',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Na'.format(key + 1), Na)
return answer
def neutralize_LYS(self, res):
ag = AtomGroup()
ag.set_atom('N', res['NZ'])
ag.set_atom('H1', res['HZ1'])
ag.set_atom('H2', res['HZ2'])
ag.set_atom('H3', res['HZ3'])
pos = self._get_neutralize_pos_NH3_type(ag)
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Cl'.format(key + 1), Cl)
return answer
def neutralize_ARG(self, res, case=0):
"""
case: 0; 中央
case: 1; NH1側
case: 2; NH2側
"""
case = int(case)
pos = Position()
if case == 0:
length = 3.0
NH1 = res['NH1']
NH2 = res['NH2']
CZ = res['CZ']
M = Position(0.5 * (NH1.xyz.x + NH2.xyz.x),
0.5 * (NH1.xyz.y + NH2.xyz.y),
0.5 * (NH1.xyz.z + NH2.xyz.z))
vCM = M - CZ.xyz
vCM.norm()
pos = CZ.xyz + length * vCM
elif case == 1:
length = 2.0
HH11 = res['HH11']
HH12 = res['HH12']
N = res['NH1']
M = Position(0.5 * (HH11.xyz.x + HH12.xyz.x),
0.5 * (HH11.xyz.y + HH12.xyz.y),
0.5 * (HH11.xyz.z + HH12.xyz.z))
vNM = M - N.xyz
vNM.norm()
pos = N.xyz + length * vNM
elif case == 2:
length = 2.0
HH21 = res['HH21']
HH22 = res['HH22']
N = res['NH2']
M = Position(0.5 * (HH21.xyz.x + HH22.xyz.x),
0.5 * (HH21.xyz.y + HH22.xyz.y),
0.5 * (HH21.xyz.z + HH22.xyz.z))
vNM = M - N.xyz
vNM.norm()
pos = N.xyz + length * vNM
else:
pass
answer = AtomGroup()
Cl = Atom(symbol='Cl',
name='Cl',
position=pos)
key = self.get_last_index(res)
answer.set_atom('{}_Cl'.format(key + 1), Cl)
return answer
# ------------------------------------------------------------------
def neutralize_FAD(self, ag):
print("neutralize_FAD")
print(ag)
answer = AtomGroup()
POO1 = AtomGroup()
POO1.set_atom('P', ag['P'])
# amber format: OP1, pdb: O1P
if ag.has_atom('O1P'):
POO1.set_atom('O1', ag['O1P'])
elif ag.has_atom('OP1'):
POO1.set_atom('O1', ag['OP1'])
else:
raise
# amber format: OP2, pdb: O2P
if ag.has_atom('O2P'):
POO1.set_atom('O2', ag['O2P'])
elif ag.has_atom('OP2'):
POO1.set_atom('O2', ag['OP2'])
else:
raise
Na1 = Atom(symbol='Na',
name='Na',
position=self._get_neutralize_pos_POO_type(POO1))
POO2 = AtomGroup()
POO2.set_atom('P', ag['PA'])
POO2.set_atom('O1', ag['O1A']) # amber format: OA1, pdb: O1A
POO2.set_atom('O2', ag['O2A']) # amber format: OA2, pdb: O2A
Na2 = Atom(symbol='Na',
name='Na',
position=self._get_neutralize_pos_POO_type(POO2))
key = self.get_last_index(ag)
answer.set_atom('{}_Na1'.format(key + 1), Na1)
answer.set_atom('{}_Na2'.format(key + 1), Na2)
return answer
# ------------------------------------------------------------------
def _get_neutralize_pos_NH3_type(self, ag):
length = 3.187
H1 = ag['H1']
H2 = ag['H2']
H3 = ag['H3']
N = ag['N']
# 重心を計算
M = Position((H1.xyz.x + H2.xyz.x + H3.xyz.x) / 3.0,
(H1.xyz.y + H2.xyz.y + H3.xyz.y) / 3.0,
(H1.xyz.z + H2.xyz.z + H3.xyz.z) / 3.0)
vNM = M - N.xyz
vNM.norm()
return N.xyz + length * vNM
def _get_neutralize_pos_NH2_type(self, ag):
length = 3.187
H1 = ag['H1']
H2 = ag['H2']
N = ag['N']
vNH1 = H1.xyz - N.xyz
vNH2 = H2.xyz - N.xyz
vM = 0.5 * (vNH1 + vNH2)
vM.norm()
answer = N.xyz + length * vM
return answer
def _get_neutralize_pos_COO_type(self, ag):
length = 2.521
O1 = ag['O1']
O2 = ag['O2']
C = ag['C']
# 中点を計算
M = Position(0.5 * (O1.xyz.x + O2.xyz.x),
0.5 * (O1.xyz.y + O2.xyz.y),
0.5 * (O1.xyz.z + O2.xyz.z))
vCM = M - C.xyz
vCM.norm()
return C.xyz + length * vCM
# -----------------------------------------------------------------
def _get_neutralize_pos_POO_type(self, ag):
length = 2.748
O1 = ag['O1']
O2 = ag['O2']
P = ag['P']
M = Position(0.5 * (O1.xyz.x + O2.xyz.x),
0.5 * (O1.xyz.y + O2.xyz.y),
0.5 * (O1.xyz.z + O2.xyz.z))
vPM = M - P.xyz
vPM.norm()
return P.xyz + length * vPM
if __name__ == "__main__":
import doctest
doctest.testmod()
| ProteinDF/ProteinDF_bridge | proteindf_bridge/modeling.py | Python | gpl-3.0 | 24,765 |
#!/usr/bin/python
def kvadraticka(a, b, c):
return 20
def kubicka(a, b, c, d):
return 77
x = 16
y = 'ahoj'
print 'Inicializuji se'
| tlapicka/2013-2014.3A | rovnice.py | Python | gpl-3.0 | 145 |
"""Business logic for all URLs in the ``main`` application.
For details on what each function is responsible for, see ``main/urls.py``.
That module documents both URL-to-function mappings and the exact
responsiblities of each function.
"""
from django.core import urlresolvers
from django import http
def index(request): # pylint: disable=W0613
"""Redirect user to ELTS application."""
return http.HttpResponseRedirect(urlresolvers.reverse('elts.views.index'))
| Ichimonji10/elts | apps/main/views.py | Python | gpl-3.0 | 472 |
# -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import loader
loader.init()
import gevent
import requests
from client import interface, event
from client.captcha import recaptcha
def send_input(e, input):
gevent.spawn_later(0.1, interface.call, 'input', 'answer', id=input.id, answer={'captcha': 'invalid test recaptcha answer'})
def test_recaptcha():
browser = requests.session()
resp = browser.get("http://www.google.com/recaptcha/demo/")
challenge_id = recaptcha.parse(resp.text)
result, challenge = recaptcha.solve(browser, challenge_id)
data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": result}
resp = browser.post("http://www.google.com/recaptcha/demo/", data=data)
try:
assert "Correct" in resp.text or "Incorrect" in resp.text or "Richtig" in resp.text or "Falsch" in resp.text or "Rangt." in resp.text or u"Rétt!" in resp.text or u"Feil." in resp.text or u"Fel." in resp.text
except:
print resp.text
raise
test_recaptcha.setUp = lambda: event.add("input:request", send_input)
test_recaptcha.tearDown = lambda: event.remove('input:request', send_input)
if __name__ == '__main__':
test_recaptcha()
| MoroGasper/client | tests/test_recaptcha.py | Python | gpl-3.0 | 1,831 |
# Copyright (C) 2016 Red Hat, Inc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test cases for the commissaire.handlers.status module.
"""
import json
import mock
import etcd
import falcon
from . import TestCase
from mock import MagicMock
from commissaire.handlers import status
from commissaire.middleware import JSONify
class Test_Status(TestCase):
"""
Tests for the Status model.
"""
def test_status_creation(self):
"""
Verify Status model.
"""
# Make sure status has required inputs
self.assertRaises(
TypeError,
status.Status
)
# Make sure a Cluster is accepted as expected
status_model = status.Status(
etcd={}, investigator={})
self.assertEquals(type(str()), type(status_model.to_json()))
class Test_StatusResource(TestCase):
"""
Tests for the Status resource.
"""
astatus = ('{"etcd": {"status": "OK"}, "investigator": {"status": '
'"OK", "info": {"size": 1, "in_use": 1, "errors": []}}}')
def before(self):
self.api = falcon.API(middleware=[JSONify()])
self.return_value = MagicMock(etcd.EtcdResult)
self.resource = status.StatusResource()
self.api.add_route('/api/v0/status', self.resource)
def test_status_retrieve(self):
"""
Verify retrieving Status.
"""
with mock.patch('cherrypy.engine.publish') as _publish:
child = MagicMock(value='')
self.return_value._children = [child]
self.return_value.leaves = self.return_value._children
_publish.return_value = [[self.return_value, None]]
body = self.simulate_request('/api/v0/status')
self.assertEqual(self.srmock.status, falcon.HTTP_200)
self.assertEqual(
json.loads(self.astatus),
json.loads(body[0]))
| cooktheryan/commissaire | test/test_handlers_status.py | Python | gpl-3.0 | 2,512 |
import os
from iotile.core.dev import ComponentRegistry
from iotile.ship.recipe import RecipeObject
from iotile.ship.exceptions import RecipeNotFoundError
class RecipeManager:
"""A class that maintains a list of installed recipes and recipe actions.
It allows fetching recipes by name and auotmatically building RecipeObjects
from textual descriptions.
The RecipeManager maintains a dictionary of RecipeAction objects that it
compiles from all installed iotile packages. It passes this dictionary to
any Recipe that is created from it so the recipe can find any recipe
actions that it needs.
The RecipeManager finds RecipeActions by looking for plugins that
are registered with pkg_resources.
"""
def __init__(self):
self._recipe_actions = {}
self._recipe_resources = {}
self._recipes = {}
reg = ComponentRegistry()
for name, action in reg.load_extensions('iotile.recipe_action', product_name='build_step'):
self._recipe_actions[name] = action
for name, resource in reg.load_extensions('iotile.recipe_resource', product_name='build_resource'):
self._recipe_resources[name] = resource
def is_valid_action(self, name):
"""Check if a name describes a valid action.
Args:
name (str): The name of the action to check
Returns:
bool: Whether the action is known and valid.
"""
return self._recipe_actions.get(name, None) is not None
def is_valid_recipe(self, recipe_name):
"""Check if a recipe is known and valid.
Args:
name (str): The name of the recipe to check
Returns:
bool: Whether the recipe is known and valid.
"""
return self._recipes.get(recipe_name, None) is not None
def add_recipe_folder(self, recipe_folder, whitelist=None):
"""Add all recipes inside a folder to this RecipeManager with an optional whitelist.
Args:
recipe_folder (str): The path to the folder of recipes to add.
whitelist (list): Only include files whose os.basename() matches something
on the whitelist
"""
if whitelist is not None:
whitelist = set(whitelist)
if recipe_folder == '':
recipe_folder = '.'
for yaml_file in [x for x in os.listdir(recipe_folder) if x.endswith('.yaml')]:
if whitelist is not None and yaml_file not in whitelist:
continue
recipe = RecipeObject.FromFile(os.path.join(recipe_folder, yaml_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
for ship_file in [x for x in os.listdir(recipe_folder) if x.endswith('.ship')]:
if whitelist is not None and ship_file not in whitelist:
continue
recipe = RecipeObject.FromArchive(os.path.join(recipe_folder, ship_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
def add_recipe_actions(self, recipe_actions):
"""Add additional valid recipe actions to RecipeManager
args:
recipe_actions (list): List of tuples. First value of tuple is the classname,
second value of tuple is RecipeAction Object
"""
for action_name, action in recipe_actions:
self._recipe_actions[action_name] = action
def get_recipe(self, recipe_name):
"""Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
"""
if recipe_name.endswith('.yaml'):
recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)
else:
recipe = self._recipes.get(recipe_name)
if recipe is None:
raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])
return recipe
| iotile/coretools | iotileship/iotile/ship/recipe_manager.py | Python | gpl-3.0 | 4,144 |
#!/usr/bin/env python2.7
# Author: echel0n <[email protected]>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unittest
import sickrage
from sickrage.core.tv.show import TVShow
from tests import SiCKRAGETestDBCase
class XEMBasicTests(SiCKRAGETestDBCase):
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append(curShow)
except Exception:
pass
def loadFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append(curShow)
except Exception as e:
print "There was an error creating the show"
def test_formating(self):
name = "Game.of.Thrones.S03.720p.HDTV.x264-CtrlHD"
release = "Game of Thrones"
# m = re.match('(?P<ep_ab_num>(?>\d{1,3})(?![ip])).+', name)
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(release))
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))'
# print(u"Checking if show " + name + " matches " + curRegex)
match = re.search(curRegex, name, re.I)
# if match:
# print(u"Matched " + curRegex + " to " + name)
if __name__ == "__main__":
print "=================="
print "STARTING - XEM SCENE NUMBERING TESTS"
print "=================="
print "######################################################################"
unittest.main()
| gborri/SickRage | tests/test_xem.py | Python | gpl-3.0 | 2,900 |
from django.db import models
from django.utils.timezone import now
# Create your models here.
class Pagina(models.Model):
titulo = models.CharField(max_length=200, unique=True)
contenido = models.TextField()
imagen = models.ImageField(upload_to='pagina_img', default='media/default.png')
orden = models.PositiveIntegerField()
publicar = models.BooleanField()
creado = models.DateField(default = now)
slug = models.SlugField(default='Slug default')
def __str__(self):
return self.titulo
| jenriquezdeceano/asit.mx | src/pagina/models.py | Python | gpl-3.0 | 502 |
import numpy as np
import exceptions
import healpy
from file import read
def read_map(filename, HDU=0, field=0, nest=False):
"""Read Healpix map
all columns of the specified HDU are read into a compound numpy MASKED array
if nest is not None, the map is converted if need to NEST or RING ordering.
this function requires healpy"""
m, h = read(filename, HDU=HDU, return_header=True)
try:
m = m[field]
except exceptions.KeyError:
m = m.values()[field]
nside = healpy.npix2nside(m.size)
if not nest is None:
if h.get('ORDERING', False):
if h['ORDERING'] == 'NESTED' and not nest:
idx = healpy.ring2nest(nside,np.arange(m.size,dtype=np.int32))
m = m[idx]
elif h['ORDERING'] == 'RING' and nest:
idx = healpy.nest2ring(nside,np.arange(m.size,dtype=np.int32))
m = m[idx]
return healpy.ma(m)
def read_mask(filename, HDU=0, field=0, nest=False):
m = read_map(filename, HDU, field, nest)
return np.logical_not(m.filled()).astype(np.bool)
| zonca/pycfitsio | pycfitsio/healpix.py | Python | gpl-3.0 | 1,092 |
from __future__ import (absolute_import, division, print_function)
import unittest
import sys
from sans.gui_logic.models.beam_centre_model import BeamCentreModel
from sans.common.enums import FindDirectionEnum, SANSInstrument
if sys.version_info.major == 3:
from unittest import mock
else:
import mock
class BeamCentreModelTest(unittest.TestCase):
def setUp(self):
self.result = {'pos1':300, 'pos2':-300}
self.centre_finder_instance = mock.MagicMock(return_value = self.result)
self.SANSCentreFinder = mock.MagicMock(return_value = self.centre_finder_instance)
self.beam_centre_model = BeamCentreModel(self.SANSCentreFinder)
def test_that_model_initialises_with_correct_values(self):
self.assertEqual(self.beam_centre_model.max_iterations, 10)
self.assertEqual(self.beam_centre_model.r_min, 60)
self.assertEqual(self.beam_centre_model.r_max, 280)
self.assertEqual(self.beam_centre_model.left_right, True)
self.assertEqual(self.beam_centre_model.up_down, True)
self.assertEqual(self.beam_centre_model.tolerance, 0.000125)
self.assertEqual(self.beam_centre_model.lab_pos_1, '')
self.assertEqual(self.beam_centre_model.lab_pos_2, '')
self.assertEqual(self.beam_centre_model.hab_pos_2, '')
self.assertEqual(self.beam_centre_model.hab_pos_1, '')
self.assertEqual(self.beam_centre_model.scale_1, 1000)
self.assertEqual(self.beam_centre_model.scale_2, 1000)
self.assertEqual(self.beam_centre_model.COM, False)
self.assertEqual(self.beam_centre_model.verbose, False)
self.assertEqual(self.beam_centre_model.q_min, 0.01)
self.assertEqual(self.beam_centre_model.q_max, 0.1)
def test_that_can_update_model_values(self):
self.beam_centre_model.scale_2 = 1.0
self.assertEqual(self.beam_centre_model.scale_2, 1.0)
def test_that_correct_values_are_set_for_LARMOR(self):
self.beam_centre_model.reset_to_defaults_for_instrument(SANSInstrument.LARMOR)
self.assertEqual(self.beam_centre_model.scale_1, 1.0)
def test_that_correct_values_are_set_for_LOQ(self):
self.beam_centre_model.reset_to_defaults_for_instrument(SANSInstrument.LOQ)
self.assertEqual(self.beam_centre_model.r_max, 200)
def test_that_find_beam_centre_calls_centre_finder_once_when_COM_is_False(self):
state = mock.MagicMock()
self.beam_centre_model.find_beam_centre(state)
self.SANSCentreFinder.return_value.assert_called_once_with(state, r_min=self.beam_centre_model.r_min,
r_max=self.beam_centre_model.r_max,
max_iter= self.beam_centre_model.max_iterations,
x_start=self.beam_centre_model.lab_pos_1,
y_start=self.beam_centre_model.lab_pos_2,
tolerance=self.beam_centre_model.tolerance,
find_direction=FindDirectionEnum.All,
reduction_method=True,
verbose=False)
self.assertEqual(state.convert_to_q.q_min, self.beam_centre_model.q_min)
self.assertEqual(state.convert_to_q.q_max, self.beam_centre_model.q_max)
def test_that_find_beam_centre_calls_centre_finder_twice_when_COM_is_TRUE(self):
state = mock.MagicMock()
self.beam_centre_model.COM = True
self.beam_centre_model.find_beam_centre(state)
self.assertEqual(self.SANSCentreFinder.return_value.call_count, 2)
self.SANSCentreFinder.return_value.assert_called_with(state, r_min=self.beam_centre_model.r_min,
r_max=self.beam_centre_model.r_max,
max_iter= self.beam_centre_model.max_iterations,
x_start=self.result['pos1'],
y_start=self.result['pos2'],
tolerance=self.beam_centre_model.tolerance,
find_direction=FindDirectionEnum.All,
reduction_method=True,
verbose=False)
self.SANSCentreFinder.return_value.assert_any_call(state, r_min=self.beam_centre_model.r_min,
r_max=self.beam_centre_model.r_max,
max_iter=self.beam_centre_model.max_iterations,
x_start=self.beam_centre_model.lab_pos_1,
y_start=self.beam_centre_model.lab_pos_2,
tolerance=self.beam_centre_model.tolerance,
find_direction=FindDirectionEnum.All,
reduction_method=False)
if __name__ == '__main__':
unittest.main()
| ScreamingUdder/mantid | scripts/test/SANS/gui_logic/beam_centre_model_test.py | Python | gpl-3.0 | 5,587 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Hardwarerelease.medium'
db.add_column('ashop_hardwarerelease', 'medium', self.gf('django.db.models.fields.CharField')(default=0, max_length=10), keep_default=False)
def backwards(self, orm):
# Deleting field 'Hardwarerelease.medium'
db.delete_column('ashop_hardwarerelease', 'medium')
models = {
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'multiple': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'7437b6be-ab03-4a9d-af4e-dbdd430c819e'", 'max_length': '36'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.release': {
'Meta': {'ordering': "('releasedate',)", 'object_name': 'Release'},
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Profession']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_label'", 'to': "orm['alibrary.Label']"}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'ashop.baseproduct': {
'Meta': {'ordering': "['name']", 'object_name': 'Baseproduct', '_ormbases': ['shop.Product']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'needs_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'baseproduct_picture'", 'null': 'True', 'to': "orm['filer.Image']"}),
'picture_listing': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'baseproduct_picture_listing'", 'null': 'True', 'to': "orm['filer.Image']"}),
'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'}),
'subline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'weight': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ashop.downloadrelease': {
'Meta': {'ordering': "['name']", 'object_name': 'Downloadrelease', '_ormbases': ['ashop.Releaseproduct']},
'releaseproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ashop.Releaseproduct']", 'unique': 'True', 'primary_key': 'True'})
},
'ashop.hardwarerelease': {
'Meta': {'ordering': "['name']", 'object_name': 'Hardwarerelease', '_ormbases': ['ashop.Releaseproduct']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'needs_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'releaseproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ashop.Releaseproduct']", 'unique': 'True', 'primary_key': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ashop.releaseproduct': {
'Meta': {'ordering': "['name']", 'object_name': 'Releaseproduct', '_ormbases': ['shop.Product']},
'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releaseproduct_release'", 'to': "orm['alibrary.Release']"})
},
'ashop.singleproduct': {
'Meta': {'object_name': 'SingleProduct', 'db_table': "'cmsplugin_singleproduct'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ashop.Baseproduct']"}),
'style': ('django.db.models.fields.CharField', [], {'default': "'l'", 'max_length': '24'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_shop.product_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['ashop']
| hzlf/openbroadcast | website/apps/ashop/migrations/0015_auto__add_field_hardwarerelease_medium.py | Python | gpl-3.0 | 23,666 |
# -*- coding: utf-8 -*-
import kivymd.snackbar as Snackbar
from kivy.app import App
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty
from kivy.uix.image import Image
from kivymd.bottomsheet import MDListBottomSheet, MDGridBottomSheet
from kivymd.button import MDIconButton
from kivymd.label import MDLabel
from kivymd.list import ILeftBody, ILeftBodyTouch, IRightBodyTouch
from kivymd.navigationdrawer import NavigationDrawer
from kivymd.selectioncontrols import MDCheckbox
from kivymd.theming import ThemeManager
from kivymd.dialog import MDDialog
from kivymd.time_picker import MDTimePicker
from kivymd.date_picker import MDDatePicker
from kivymd.material_resources import DEVICE_TYPE
main_widget_kv = '''
#:import Toolbar kivymd.toolbar.Toolbar
#:import ThemeManager kivymd.theming.ThemeManager
#:import NavigationDrawer kivymd.navigationdrawer.NavigationDrawer
#:import MDCheckbox kivymd.selectioncontrols.MDCheckbox
#:import MDSwitch kivymd.selectioncontrols.MDSwitch
#:import MDList kivymd.list.MDList
#:import OneLineListItem kivymd.list.OneLineListItem
#:import TwoLineListItem kivymd.list.TwoLineListItem
#:import ThreeLineListItem kivymd.list.ThreeLineListItem
#:import OneLineAvatarListItem kivymd.list.OneLineAvatarListItem
#:import OneLineIconListItem kivymd.list.OneLineIconListItem
#:import OneLineAvatarIconListItem kivymd.list.OneLineAvatarIconListItem
#:import SingleLineTextField kivymd.textfields.SingleLineTextField
#:import MDSpinner kivymd.spinner.MDSpinner
#:import MDCard kivymd.card.MDCard
#:import MDSeparator kivymd.card.MDSeparator
#:import MDDropdownMenu kivymd.menu.MDDropdownMenu
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:import colors kivymd.color_definitions.colors
#:import SmartTile kivymd.grid.SmartTile
#:import MDSlider kivymd.slider.MDSlider
#:import MDTabbedPanel kivymd.tabs.MDTabbedPanel
#:import MDTab kivymd.tabs.MDTab
#:import MDProgressBar kivymd.progressbar.MDProgressBar
#:import MDAccordion kivymd.accordion.MDAccordion
#:import MDAccordionItem kivymd.accordion.MDAccordionItem
#:import MDThemePicker kivymd.theme_picker.MDThemePicker
#:import MDBottomNavigation kivymd.tabs.MDBottomNavigation
#:import MDBottomNavigationItem kivymd.tabs.MDBottomNavigationItem
BoxLayout:
orientation: 'vertical'
Toolbar:
id: toolbar
title: 'KivyMD Kitchen Sink'
background_color: app.theme_cls.primary_color
background_palette: 'Primary'
background_hue: '500'
left_action_items: [['menu', lambda x: app.nav_drawer.toggle()]]
right_action_items: [['dots-vertical', lambda x: app.nav_drawer.toggle()]]
ScreenManager:
id: scr_mngr
Screen:
name: 'bottomsheet'
MDRaisedButton:
text: "Open List Bottom Sheet"
opposite_colors: True
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.6}
on_release: app.show_example_bottom_sheet()
MDRaisedButton:
text: "Open grid bottom sheet"
opposite_colors: True
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.3}
on_release: app.show_example_grid_bottom_sheet()
Screen:
name: 'button'
BoxLayout:
size_hint: None, None
size: '88dp', '48dp'
padding: '12dp'
pos_hint: {'center_x': 0.75, 'center_y': 0.8}
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Disable buttons"
size_hint_x:None
width: '56dp'
MDCheckbox:
id: disable_the_buttons
MDIconButton:
icon: 'sd'
pos_hint: {'center_x': 0.25, 'center_y': 0.8}
disabled: disable_the_buttons.active
MDFlatButton:
text: 'MDFlatButton'
pos_hint: {'center_x': 0.5, 'center_y': 0.6}
disabled: disable_the_buttons.active
MDRaisedButton:
text: "MDRaisedButton"
elevation_normal: 2
opposite_colors: True
pos_hint: {'center_x': 0.5, 'center_y': 0.4}
disabled: disable_the_buttons.active
MDFloatingActionButton:
id: float_act_btn
icon: 'plus'
opposite_colors: True
elevation_normal: 8
pos_hint: {'center_x': 0.5, 'center_y': 0.2}
disabled: disable_the_buttons.active
Screen:
name: 'card'
MDCard:
size_hint: None, None
size: dp(320), dp(180)
pos_hint: {'center_x': 0.5, 'center_y': 0.7}
MDCard:
size_hint: None, None
size: dp(320), dp(180)
pos_hint: {'center_x': 0.5, 'center_y': 0.3}
BoxLayout:
orientation:'vertical'
padding: dp(8)
MDLabel:
text: 'Title'
theme_text_color: 'Secondary'
font_style:"Title"
size_hint_y: None
height: dp(36)
MDSeparator:
height: dp(1)
MDLabel:
text: 'Body'
theme_text_color: 'Primary'
Screen:
name: 'slider'
BoxLayout:
MDSlider:
id: hslider
min:0
max:100
value: 10
MDSlider:
id: vslider
orientation:'vertical'
min:0
max:100
value: hslider.value
Screen:
name: 'dialog'
MDRaisedButton:
text: "Open dialog"
size_hint: None, None
size: 3 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
opposite_colors: True
on_release: app.show_example_dialog()
Screen:
name: 'grid'
ScrollView:
do_scroll_x: False
GridLayout:
cols: 3
row_default_height: (self.width - self.cols*self.spacing[0])/self.cols
row_force_default: True
size_hint_y: None
height: 8 * dp(100) # /1 * self.row_default_height
padding: dp(4), dp(4)
spacing: dp(4)
SmartTileWithLabel:
mipmap: True
source: './assets/african-lion-951778_1280.jpg'
text: "African Lion"
SmartTile:
mipmap: True
source: './assets/beautiful-931152_1280.jpg'
SmartTile:
mipmap: True
source: './assets/african-lion-951778_1280.jpg'
SmartTile:
mipmap: True
source: './assets/guitar-1139397_1280.jpg'
SmartTile:
mipmap: True
source: './assets/robin-944887_1280.jpg'
SmartTile:
mipmap: True
source: './assets/kitten-1049129_1280.jpg'
SmartTile:
mipmap: True
source: './assets/light-bulb-1042480_1280.jpg'
SmartTile:
mipmap: True
source: './assets/tangerines-1111529_1280.jpg'
Screen:
name: 'list'
ScrollView:
do_scroll_x: False
MDList:
id: ml
OneLineListItem:
text: "One-line item"
TwoLineListItem:
text: "Two-line item"
secondary_text: "Secondary text here"
ThreeLineListItem:
text: "Three-line item"
secondary_text: "This is a multi-line label where you can fit more text than usual"
OneLineAvatarListItem:
text: "Single-line item with avatar"
AvatarSampleWidget:
source: './assets/avatar.png'
TwoLineAvatarListItem:
type: "two-line"
text: "Two-line item..."
secondary_text: "with avatar"
AvatarSampleWidget:
source: './assets/avatar.png'
ThreeLineAvatarListItem:
type: "three-line"
text: "Three-line item..."
secondary_text: "...with avatar..." + '\\n' + "and third line!"
AvatarSampleWidget:
source: './assets/avatar.png'
OneLineIconListItem:
text: "Single-line item with left icon"
IconLeftSampleWidget:
id: li_icon_1
icon: 'star-circle'
TwoLineIconListItem:
text: "Two-line item..."
secondary_text: "...with left icon"
IconLeftSampleWidget:
id: li_icon_2
icon: 'comment-text'
ThreeLineIconListItem:
text: "Three-line item..."
secondary_text: "...with left icon..." + '\\n' + "and third line!"
IconLeftSampleWidget:
id: li_icon_3
icon: 'sd'
OneLineAvatarIconListItem:
text: "Single-line + avatar&icon"
AvatarSampleWidget:
source: './assets/avatar.png'
IconRightSampleWidget:
TwoLineAvatarIconListItem:
text: "Two-line item..."
secondary_text: "...with avatar&icon"
AvatarSampleWidget:
source: './assets/avatar.png'
IconRightSampleWidget:
ThreeLineAvatarIconListItem:
text: "Three-line item..."
secondary_text: "...with avatar&icon..." + '\\n' + "and third line!"
AvatarSampleWidget:
source: './assets/avatar.png'
IconRightSampleWidget:
Screen:
name: 'menu'
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
text: 'Open menu'
opposite_colors: True
pos_hint: {'center_x': 0.1, 'center_y': 0.9}
on_release: MDDropdownMenu(items=app.menu_items, width_mult=4).open(self)
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
text: 'Open menu'
opposite_colors: True
pos_hint: {'center_x': 0.1, 'center_y': 0.1}
on_release: MDDropdownMenu(items=app.menu_items, width_mult=4).open(self)
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
text: 'Open menu'
opposite_colors: True
pos_hint: {'center_x': 0.9, 'center_y': 0.1}
on_release: MDDropdownMenu(items=app.menu_items, width_mult=4).open(self)
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
text: 'Open menu'
opposite_colors: True
pos_hint: {'center_x': 0.9, 'center_y': 0.9}
on_release: MDDropdownMenu(items=app.menu_items, width_mult=4).open(self)
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
text: 'Open menu'
opposite_colors: True
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
on_release: MDDropdownMenu(items=app.menu_items, width_mult=4).open(self)
Screen:
name: 'progress'
MDCheckbox:
id: chkbox
size_hint: None, None
size: dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.4}
active: True
MDSpinner:
id: spinner
size_hint: None, None
size: dp(46), dp(46)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
active: True if chkbox.active else False
Screen:
name: 'progressbars'
BoxLayout:
orientation:'vertical'
padding: '8dp'
MDSlider:
id:progress_slider
min:0
max:100
value: 40
MDProgressBar:
value: progress_slider.value
MDProgressBar:
reversed: True
value: progress_slider.value
BoxLayout:
MDProgressBar:
orientation:"vertical"
reversed: True
value: progress_slider.value
MDProgressBar:
orientation:"vertical"
value: progress_slider.value
Screen:
name: 'selectioncontrols'
MDCheckbox:
id: grp_chkbox_1
group: 'test'
size_hint: None, None
size: dp(48), dp(48)
pos_hint: {'center_x': 0.25, 'center_y': 0.5}
MDCheckbox:
id: grp_chkbox_2
group: 'test'
size_hint: None, None
size: dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
MDSwitch:
size_hint: None, None
size: dp(36), dp(48)
pos_hint: {'center_x': 0.75, 'center_y': 0.5}
active: False
Screen:
name: 'snackbar'
MDRaisedButton:
text: "Create simple snackbar"
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.75}
opposite_colors: True
on_release: app.show_example_snackbar('simple')
MDRaisedButton:
text: "Create snackbar with button"
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
opposite_colors: True
on_release: app.show_example_snackbar('button')
MDRaisedButton:
text: "Create snackbar with a lot of text"
size_hint: None, None
size: 5 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.25}
opposite_colors: True
on_release: app.show_example_snackbar('verylong')
Screen:
name: 'textfields'
ScrollView:
BoxLayout:
orientation: 'vertical'
size_hint_y: None
height: dp(1000)
BoxLayout:
size_hint_y: None
height: dp(400)
padding: dp(48)
orientation: 'vertical'
spacing: 10
SingleLineTextField:
hint_text: "No helper text"
SingleLineTextField:
hint_text: "Helper text on focus"
message: "This will disappear when you click off"
message_mode: "on_focus"
SingleLineTextField:
hint_text: "Persistent helper text"
message: "Text is always here"
message_mode: "persistent"
SingleLineTextField:
id: text_field_error
hint_text: "Helper text on error (Hit Enter with two characters here)"
message: "Two is my least favorite number"
message_mode: "on_error"
SingleLineTextField:
hint_text: "Max text length = 10"
max_text_length: 10
SingleLineTextField:
hint_text: "required = True"
required: True
message_mode: "on_error"
BoxLayout:
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Body1 label"
halign: 'center'
MDLabel:
font_style: 'Body2'
theme_text_color: 'Primary'
text: "Body2 label"
halign: 'center'
BoxLayout:
MDLabel:
font_style: 'Caption'
theme_text_color: 'Primary'
text: "Caption label"
halign: 'center'
MDLabel:
font_style: 'Subhead'
theme_text_color: 'Primary'
text: "Subhead label"
halign: 'center'
BoxLayout:
MDLabel:
font_style: 'Title'
theme_text_color: 'Primary'
text: "Title label"
halign: 'center'
MDLabel:
font_style: 'Headline'
theme_text_color: 'Primary'
text: "Headline label"
halign: 'center'
MDLabel:
font_style: 'Display1'
theme_text_color: 'Primary'
text: "Display1 label"
halign: 'center'
size_hint_y: None
height: self.texture_size[1] + dp(4)
MDLabel:
font_style: 'Display2'
theme_text_color: 'Primary'
text: "Display2 label"
halign: 'center'
size_hint_y: None
height: self.texture_size[1] + dp(4)
MDLabel:
font_style: 'Display3'
theme_text_color: 'Primary'
text: "Display3 label"
halign: 'center'
size_hint_y: None
height: self.texture_size[1] + dp(4)
MDLabel:
font_style: 'Display4'
theme_text_color: 'Primary'
text: "Display4 label"
halign: 'center'
size_hint_y: None
height: self.texture_size[1] + dp(4)
BoxLayout:
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Primary color"
halign: 'center'
MDLabel:
font_style: 'Body1'
theme_text_color: 'Secondary'
text: "Secondary color"
halign: 'center'
BoxLayout:
MDLabel:
font_style: 'Body1'
theme_text_color: 'Hint'
text: "Hint color"
halign: 'center'
MDLabel:
font_style: 'Body1'
theme_text_color: 'Error'
text: "Error color"
halign: 'center'
MDLabel:
font_style: 'Body1'
theme_text_color: 'Custom'
text_color: (0,1,0,.4)
text: "Custom"
halign: 'center'
Screen:
name: 'theming'
BoxLayout:
orientation: 'vertical'
size_hint_y: None
height: dp(80)
center_y: self.parent.center_y
MDRaisedButton:
size_hint: None, None
size: 3 * dp(48), dp(48)
center_x: self.parent.center_x
text: 'Change theme'
on_release: MDThemePicker().open()
opposite_colors: True
pos_hint: {'center_x': 0.5}
MDLabel:
text: "Current: " + app.theme_cls.theme_style + ", " + app.theme_cls.primary_palette
theme_text_color: 'Primary'
pos_hint: {'center_x': 0.5}
halign: 'center'
Screen:
name: 'toolbar'
Toolbar:
title: "Simple toolbar"
pos_hint: {'center_x': 0.5, 'center_y': 0.75}
background_color: get_color_from_hex(colors['Teal']['500'])
background_palette: 'Teal'
background_hue: '500'
Toolbar:
title: "Toolbar with right buttons"
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
background_color: get_color_from_hex(colors['Amber']['700'])
background_palette: 'Amber'
background_hue: '700'
right_action_items: [['content-copy', lambda x: None]]
Toolbar:
title: "Toolbar with left and right buttons"
pos_hint: {'center_x': 0.5, 'center_y': 0.25}
background_color: get_color_from_hex(colors['DeepPurple']['A400'])
background_palette: 'DeepPurple'
background_hue: 'A400'
left_action_items: [['arrow-left', lambda x: None]]
right_action_items: [['lock', lambda x: None], \
['camera', lambda x: None], \
['play', lambda x: None]]
Screen:
name: 'tabs'
MDTabbedPanel:
id: tab_panel
tab_display_mode:'text'
MDTab:
name: 'music'
text: "Music" # Why are these not set!!!
icon: "playlist-play"
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Here is my music list :)"
halign: 'center'
MDTab:
name: 'movies'
text: 'Movies'
icon: "movie"
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Show movies here :)"
halign: 'center'
BoxLayout:
size_hint_y:None
height: '48dp'
padding: '12dp'
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Use icons"
size_hint_x:None
width: '64dp'
MDCheckbox:
on_state: tab_panel.tab_display_mode = 'icons' if tab_panel.tab_display_mode=='text' else 'text'
Screen:
name: 'accordion'
BoxLayout:
MDAccordion:
orientation:'vertical'
size_hint_x: None
width: '240dp'
MDAccordionItem:
id: accordion_item
title:'Item 1'
icon: 'home'
ScrollView:
MDList:
OneLineListItem:
text: "Subitem 1"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 2"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 3"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
MDAccordionItem:
title:'Item 2'
icon: 'earth'
ScrollView:
MDList:
OneLineListItem:
text: "Subitem 4"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 5"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 6"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
MDAccordionItem:
title:'Item 3'
icon: 'account'
ScrollView:
MDList:
OneLineListItem:
text: "Subitem 7"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 8"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
OneLineListItem:
text: "Subitem 9"
theme_text_color: 'Custom'
text_color: [1,1,1,1]
MDLabel:
text: 'Content'
theme_text_color: 'Primary'
Screen:
name: 'pickers'
BoxLayout:
spacing: dp(40)
orientation: 'vertical'
size_hint_x: None
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
BoxLayout:
orientation: 'vertical'
# size_hint: (None, None)
MDRaisedButton:
text: "Open time picker"
size_hint: None, None
size: 3 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
opposite_colors: True
on_release: app.show_example_time_picker()
MDLabel:
id: time_picker_label
theme_text_color: 'Primary'
size_hint: None, None
size: dp(48)*3, dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
BoxLayout:
size: dp(48)*3, dp(48)
size_hint: (None, None)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
MDLabel:
theme_text_color: 'Primary'
text: "Start on previous time"
size_hint: None, None
size: dp(130), dp(48)
MDCheckbox:
id: time_picker_use_previous_time
size_hint: None, None
size: dp(48), dp(48)
BoxLayout:
orientation: 'vertical'
MDRaisedButton:
text: "Open date picker"
size_hint: None, None
size: 3 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
opposite_colors: True
on_release: app.show_example_date_picker()
MDLabel:
id: date_picker_label
theme_text_color: 'Primary'
size_hint: None, None
size: dp(48)*3, dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
BoxLayout:
size: dp(48)*3, dp(48)
size_hint: (None, None)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
MDLabel:
theme_text_color: 'Primary'
text: "Start on previous date"
size_hint: None, None
size: dp(130), dp(48)
MDCheckbox:
id: date_picker_use_previous_date
size_hint: None, None
size: dp(48), dp(48)
Screen:
name: 'bottom_navigation'
MDBottomNavigation:
id: bottom_navigation_demo
MDBottomNavigationItem:
name: 'octagon'
text: "Warning"
icon: "alert-octagon"
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Warning!"
halign: 'center'
MDBottomNavigationItem:
name: 'banking'
text: "Bank"
icon: 'bank'
BoxLayout:
orientation: 'vertical'
size_hint_y: None
padding: dp(48)
spacing: 10
SingleLineTextField:
hint_text: "You can put any widgets here"
message: "Hello :)"
message_mode: "on_focus"
MDBottomNavigationItem:
name: 'bottom_navigation_desktop_1'
text: "Hello"
icon: 'alert'
id: bottom_navigation_desktop_1
BoxLayout:
orientation: 'vertical'
size_hint_y: None
padding: dp(48)
spacing: 10
SingleLineTextField:
hint_text: "Hello again"
MDBottomNavigationItem:
name: 'bottom_navigation_desktop_2'
text: "Food"
icon: 'food'
id: bottom_navigation_desktop_2
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Cheese!"
halign: 'center'
<KitchenSinkNavDrawer>
title: "NavigationDrawer"
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Bottom sheets"
on_release: app.root.ids.scr_mngr.current = 'bottomsheet'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Buttons"
on_release: app.root.ids.scr_mngr.current = 'button'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Cards"
on_release: app.root.ids.scr_mngr.current = 'card'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Dialogs"
on_release: app.root.ids.scr_mngr.current = 'dialog'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Grid lists"
on_release: app.root.ids.scr_mngr.current = 'grid'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Lists"
on_release: app.root.ids.scr_mngr.current = 'list'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Menus"
on_release: app.root.ids.scr_mngr.current = 'menu'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Progress & activity"
on_release: app.root.ids.scr_mngr.current = 'progress'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Progress bars"
on_release: app.root.ids.scr_mngr.current = 'progressbars'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Selection controls"
on_release: app.root.ids.scr_mngr.current = 'selectioncontrols'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Snackbars"
on_release: app.root.ids.scr_mngr.current = 'snackbar'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Text fields"
on_release: app.root.ids.scr_mngr.current = 'textfields'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Themes"
on_release: app.root.ids.scr_mngr.current = 'theming'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Toolbars"
on_release: app.root.ids.scr_mngr.current = 'toolbar'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Sliders"
on_release: app.root.ids.scr_mngr.current = 'slider'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Tabs"
on_release: app.root.ids.scr_mngr.current = 'tabs'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Accordion"
on_release: app.root.ids.scr_mngr.current = 'accordion'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Pickers"
on_release: app.root.ids.scr_mngr.current = 'pickers'
NavigationDrawerIconButton:
icon: 'checkbox-blank-circle'
text: "Bottom Navigation"
on_release: app.root.ids.scr_mngr.current = 'bottom_navigation'
'''
class KitchenSinkNavDrawer(NavigationDrawer):
pass
class KitchenSink(App):
theme_cls = ThemeManager()
nav_drawer = ObjectProperty()
previous_date = ObjectProperty()
menu_items = [
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
{'viewclass': 'MDMenuItem',
'text': 'Example item'},
]
def build(self):
main_widget = Builder.load_string(main_widget_kv)
# self.theme_cls.theme_style = 'Dark'
main_widget.ids.text_field_error.bind(
on_text_validate=self.set_error_message,
on_focus=self.set_error_message)
self.nav_drawer = KitchenSinkNavDrawer()
self.bottom_navigation_remove_mobile(main_widget)
return main_widget
def bottom_navigation_remove_mobile(self, widget):
# Removes some items from bottom-navigation demo when on mobile
if DEVICE_TYPE == 'mobile':
widget.ids.bottom_navigation_demo.remove_widget(widget.ids.bottom_navigation_desktop_2)
if DEVICE_TYPE == 'mobile' or DEVICE_TYPE == 'tablet':
widget.ids.bottom_navigation_demo.remove_widget(widget.ids.bottom_navigation_desktop_1)
def show_example_snackbar(self, snack_type):
if snack_type == 'simple':
Snackbar.make("This is a snackbar!")
elif snack_type == 'button':
Snackbar.make("This is a snackbar", button_text="with a button!",
button_callback=lambda *args: 2)
elif snack_type == 'verylong':
Snackbar.make("This is a very very very very very very very long "
"snackbar!",
button_text="Hello world")
def show_example_dialog(self):
content = MDLabel(font_style='Body1',
theme_text_color='Secondary',
text="This is a dialog with a title and some text. "
"That's pretty awesome right!",
valign='top')
content.bind(size=content.setter('text_size'))
self.dialog = MDDialog(title="This is a test dialog",
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
self.dialog.add_action_button("Dismiss",
action=lambda *x: self.dialog.dismiss())
self.dialog.open()
def get_time_picker_data(self, instance, time):
self.root.ids.time_picker_label.text = str(time)
self.previous_time = time
def show_example_time_picker(self):
self.time_dialog = MDTimePicker()
self.time_dialog.bind(time=self.get_time_picker_data)
if self.root.ids.time_picker_use_previous_time.active:
try:
self.time_dialog.set_time(self.previous_time)
except AttributeError:
pass
self.time_dialog.open()
def set_previous_date(self, date_obj):
self.previous_date = date_obj
self.root.ids.date_picker_label.text = str(date_obj)
def show_example_date_picker(self):
if self.root.ids.date_picker_use_previous_date.active:
pd = self.previous_date
try:
MDDatePicker(self.set_previous_date,
pd.year, pd.month, pd.day).open()
except AttributeError:
MDDatePicker(self.set_previous_date).open()
else:
MDDatePicker(self.set_previous_date).open()
def show_example_bottom_sheet(self):
bs = MDListBottomSheet()
bs.add_item("Here's an item with text only", lambda x: x)
bs.add_item("Here's an item with an icon", lambda x: x,
icon='clipboard-account')
bs.add_item("Here's another!", lambda x: x, icon='nfc')
bs.open()
def show_example_grid_bottom_sheet(self):
bs = MDGridBottomSheet()
bs.add_item("Facebook", lambda x: x,
icon_src='./assets/facebook-box.png')
bs.add_item("YouTube", lambda x: x,
icon_src='./assets/youtube-play.png')
bs.add_item("Twitter", lambda x: x,
icon_src='./assets/twitter.png')
bs.add_item("Da Cloud", lambda x: x,
icon_src='./assets/cloud-upload.png')
bs.add_item("Camera", lambda x: x,
icon_src='./assets/camera.png')
bs.open()
def set_error_message(self, *args):
if len(self.root.ids.text_field_error.text) == 2:
self.root.ids.text_field_error.error = True
else:
self.root.ids.text_field_error.error = False
def on_pause(self):
return True
def on_stop(self):
pass
class AvatarSampleWidget(ILeftBody, Image):
pass
class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton):
pass
class IconRightSampleWidget(IRightBodyTouch, MDCheckbox):
pass
if __name__ == '__main__':
KitchenSink().run()
| jmadajian/CinemaFlowWorks | Examples/kitchen_sink.py | Python | gpl-3.0 | 41,711 |
'''
Construct and manipulate multilayer representations of configuration vectors
Created by: Ankit Khambhati
Change Log
----------
2016/02/03 - Implement functions to construct multilayer networks
'''
import numpy as np
import scipy.sparse as sp
from ....Common import errors
from ...Transforms import configuration
def ml_modularity_matr(conn_matr, gamma, omega, null):
"""
Find the multilayer modularity matrix of network and an assocomm_initated
null model. Method assumes sequential linking between layers with
homogenous weights.
Parameters
----------
conn_matr: numpy.ndarray
Connection matrix over multiple layers
Has shape: [n_layers x n_conns]
gamma: float
Intra-layer resolution parameter, typical values around 1.0
omega: float
Inter-layer resolution parameter, typical values around 1.0
null: str
Choose a null mode type: ['None', 'temporal',
'connectional', 'nodal']
Returns
-------
ml_mod_matr: numpy.ndarray
Multilayer modularity matrix
Has shape: [n_nodes*n_layers x n_nodes*n_layers]
twomu: float
Total edge weight in the network
"""
# Standard param checks
errors.check_type(conn_matr, np.ndarray)
errors.check_type(gamma, float)
errors.check_type(omega, float)
errors.check_type(null, str)
# Check conn_matr dimensions
if not len(conn_matr.shape) == 2:
raise ValueError('%r does not have two-dimensions' % conn_matr)
n_layers = conn_matr.shape[0]
n_conns = conn_matr.shape[1]
n_nodes = int(np.floor(np.sqrt(2*n_conns))+1)
# Check null model specomm_initfication
valid_null_types = ['none', 'temporal', 'connectional', 'nodal']
null = null.lower()
if null not in valid_null_types:
raise ValueError('%r is not on of %r' % (null, valid_null_types))
# Initialize multilayer matrix
B = np.zeros((n_nodes*n_layers, n_nodes*n_layers))
twomu = 0
if null == 'temporal':
rnd_layer_ix = np.random.permutation(n_layers)
conn_matr = conn_matr[rnd_layer_ix, :]
if null == 'connectional':
rnd_node_ix = np.random.permutation(n_nodes)
rnd_node_iy = np.random.permutation(n_nodes)
ix, iy = np.mgrid[0:n_nodes, 0:n_nodes]
for ll, conn_vec in enumerate(conn_matr):
A = configuration.convert_conn_vec_to_adj_matr(conn_vec)
if null == 'connectional':
A = A[rnd_node_ix[ix], rnd_node_iy[iy]]
A = np.triu(A, k=1)
A += A.T
# Compute node degree
k = np.sum(A, axis=0)
twom = np.sum(k) # Intra-layer average node degree
twomu += twom # Inter-layer accumulated node degree
# NG Null-model
if twom < 1e-6:
P = np.dot(k.reshape(-1, 1), k.reshape(1, -1)) / 1.0
else:
P = np.dot(k.reshape(-1, 1), k.reshape(1, -1)) / twom
# Multi-slice modularity matrix
start_ix = ll*n_nodes
end_ix = (ll+1)*n_nodes
B[start_ix:end_ix, start_ix:end_ix] = A - gamma*P
# Add inter-slice degree
twomu += twomu + 2*omega*n_nodes*(n_layers-1)
# Add the sequential inter-layer model
interlayer = sp.spdiags(np.ones((2, n_nodes*n_layers)),
[-n_nodes, n_nodes],
n_nodes*n_layers, n_nodes*n_layers).toarray()
if null == 'nodal':
null_layer = np.random.permutation(np.diag(np.ones(n_nodes)))
for ll in xrange(n_layers-1):
interlayer[ll*n_nodes:(ll+1)*n_nodes,
(ll+1)*n_nodes:(ll+2)*n_nodes] = null_layer
interlayer = np.triu(interlayer, k=1)
interlayer += interlayer.T
B = B + omega*interlayer
B = np.triu(B, k=1)
B += B.T
ml_mod_matr = B
return ml_mod_matr, twomu
| akhambhati/Echobase | Echobase/Network/Partitioning/Module/modularity.py | Python | gpl-3.0 | 3,949 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import math
import espressomd
import espressomd.interactions
import espressomd.shapes
import tests_common
@utx.skipIfMissingFeatures(["LENNARD_JONES_GENERIC"])
class ShapeBasedConstraintTest(ut.TestCase):
box_l = 30.
system = espressomd.System(box_l=3 * [box_l])
def tearDown(self):
self.system.part.clear()
self.system.constraints.clear()
def pos_on_surface(self, theta, v, semiaxis0, semiaxis1,
semiaxis2, center=np.array([15, 15, 15])):
"""Return position on ellipsoid surface."""
pos = np.array([semiaxis0 * np.sqrt(1. - v**2) * np.cos(theta),
semiaxis1 * np.sqrt(1. - v**2) * np.sin(theta),
semiaxis2 * v])
return pos + center
def test_hollow_conical_frustum(self):
"""
Test implementation of conical frustum shape.
"""
R1 = 5.0
R2 = 10.0
LENGTH = 15.0
D = 2.4
def z(y, r1, r2, l): return l / (r1 - r2) * \
y + l / 2. - l * r1 / (r1 - r2)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=0.0, length=LENGTH)
y_vals = np.linspace(R1, R2, 100)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=D, length=LENGTH, direction=-1)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.5 * D)
np.testing.assert_almost_equal(np.copy(shape.center), [0.0, 0.0, 0.0])
np.testing.assert_almost_equal(np.copy(shape.axis), [0, 0, 1])
self.assertEqual(shape.r1, R1)
self.assertEqual(shape.r2, R2)
self.assertEqual(shape.thickness, D)
self.assertEqual(shape.length, LENGTH)
self.assertEqual(shape.direction, -1)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=D, length=LENGTH)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], -0.5 * D)
# check sign of dist
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R1, thickness=D, length=LENGTH)
self.assertLess(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R1, thickness=D, length=LENGTH, direction=-1)
self.assertGreater(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
def test_sphere(self):
"""Checks geometry of an inverted sphere
"""
rad = self.box_l / 2.0
sphere_shape = espressomd.shapes.Sphere(
center=3 * [rad],
radius=rad,
direction=-1)
phi_steps = 11
theta_steps = 11
for distance in {-1.2, 2.6}:
for phi in range(phi_steps):
phi_angle = phi / phi_steps * 2.0 * math.pi
for theta in range(theta_steps):
theta_angle = theta / theta_steps * math.pi
pos = np.array(
[math.cos(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.sin(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.cos(theta_angle) * (rad + distance)]) + rad
shape_dist, _ = sphere_shape.calc_distance(
position=pos.tolist())
self.assertAlmostEqual(shape_dist, -distance)
def test_ellipsoid(self):
"""Checks that distance of particles on the ellipsoid constraint's surface is zero.
For the case of a spherical ellipsoid, also several non-zero distances are tested.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(pos=[0., 0., 0.], type=0)
# abuse generic LJ to measure distance via the potential V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=7., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
N = 10
# check oblate ellipsoid
semiaxes = [2.18, 5.45]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
system.constraints.remove(const1)
# check prolate ellipsoid
semiaxes = [3.61, 2.23]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
# check sphere (multiple distances from surface)
# change ellipsoid parameters instead of creating a new constraint
e.a = 1.
e.b = 1.
radii = np.linspace(1., 6.5, 7)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for r in radii:
pos = self.pos_on_surface(theta, v, r, r, r)
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], r - 1.)
# Reset the interaction to zero
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_cylinder(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that cylinder constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
rad = self.box_l / 2.0
length = self.box_l / 2.0
system.part.add(id=0, pos=[rad, 1.02, rad], type=0)
# check force calculation of a cylinder without top and bottom
interaction_dir = -1 # constraint is directed inwards
cylinder_shape = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=interaction_dir,
radius=rad,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=cylinder_shape, particle_type=1, penetrable=penetrability)
outer_cylinder_wall = system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(id=1, pos=[rad, y_part2, rad], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_wall.total_force()[2], 0.0)
self.assertAlmostEqual(
outer_cylinder_wall.total_normal_force(),
2 *
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=dist_part2))
# Test the geometry of a cylinder with top and bottom
cylinder_shape_finite = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=1,
radius=rad,
length=length)
phi_steps = 11
for distance in {-3.6, 2.8}:
for z in range(int(self.box_l)):
center = np.array([rad, rad, z])
start_point = np.array([rad, 2 * rad - distance, z])
for phi in range(phi_steps):
# Rotation around the axis of the cylinder
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix, start_point - center) + center
shape_dist, _ = cylinder_shape_finite.calc_distance(
position=phi_rot_point.tolist())
dist = -distance
if distance > 0.0:
if z < (self.box_l - length) / 2.0 + distance:
dist = (self.box_l - length) / 2.0 - z
elif z > (self.box_l + length) / 2.0 - distance:
dist = z - (self.box_l + length) / 2.0
else:
dist = -distance
else:
if z < (self.box_l - length) / 2.0:
z_dist = (self.box_l - length) / 2.0 - z
dist = math.sqrt(z_dist**2 + distance**2)
elif z > (self.box_l + length) / 2.0:
z_dist = z - (self.box_l + length) / 2.0
dist = math.sqrt(z_dist**2 + distance**2)
else:
dist = -distance
self.assertAlmostEqual(shape_dist, dist)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_spherocylinder(self):
"""Checks that spherocylinder constraints with LJ interactions exert
forces on a test particle (that is, the constraints do what they should)
using geometrical parameters of (1) an infinite cylinder and (2) a
finite spherocylinder.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(
id=0, pos=[self.box_l / 2.0, 1.02, self.box_l / 2.0], type=0)
# check force calculation of spherocylinder constraint
# (1) infinite cylinder
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 0, 1],
direction=interaction_dir,
radius=self.box_l / 2.0,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_constraint.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(
id=1, pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_constraint.total_force()[2], 0.0)
self.assertAlmostEqual(outer_cylinder_constraint.total_normal_force(),
2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0.,
eps=1.0, sig=1.0, r=dist_part2))
# Reset
system.part.clear()
system.constraints.clear()
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
# (2) finite spherocylinder
system.part.clear()
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 1, 0],
direction=interaction_dir,
radius=10.0,
length=6.0)
penetrability = True # penetrable
inner_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(inner_cylinder_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
# check hemispherical caps (multiple distances from surface)
N = 10
radii = np.linspace(1., 10., 10)
system.part.add(pos=[0., 0., 0.], type=0)
for i in range(6):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for r in radii:
pos = self.pos_on_surface(theta, v, r, r, r) + [0, 3, 0]
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 10. - r)
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_wall_forces(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that wall constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.part.add(id=0, pos=[5., 1.21, 0.83], type=0)
# Check forces are initialized to zero
f_part = system.part[0].f
self.assertEqual(f_part[0], 0.)
self.assertEqual(f_part[1], 0.)
self.assertEqual(f_part[2], 0.)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=1.5, sigma=1.0, cutoff=2.0, shift=0)
shape_xz = espressomd.shapes.Wall(normal=[0., 1., 0.], dist=0.)
shape_xy = espressomd.shapes.Wall(normal=[0., 0., 1.], dist=0.)
# (1)
constraint_xz = espressomd.constraints.ShapeBasedConstraint(
shape=shape_xz, particle_type=1)
wall_xz = system.constraints.add(constraint_xz)
# (2)
wall_xy = system.constraints.add(shape=shape_xy, particle_type=2)
system.integrator.run(0) # update forces
f_part = system.part[0].f
self.assertEqual(f_part[0], 0.)
self.assertAlmostEqual(
f_part[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10)
self.assertAlmostEqual(
f_part[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# test summed forces on walls
self.assertAlmostEqual(
-1.0 * wall_xz.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10) # minus for Newton's third law
self.assertAlmostEqual(
-1.0 * wall_xy.total_force()[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# check whether total_normal_force is correct
self.assertAlmostEqual(
wall_xy.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# this one is closer and should get the mindist()
system.part.add(pos=[5., 1.20, 0.82], type=0)
self.assertAlmostEqual(constraint_xz.min_dist(), system.part[1].pos[1])
self.assertAlmostEqual(wall_xz.min_dist(), system.part[1].pos[1])
self.assertAlmostEqual(wall_xy.min_dist(), system.part[1].pos[2])
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_slitpore(self):
"""Checks that slitpore constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of slitpore constraint
slitpore_shape = espressomd.shapes.Slitpore(
channel_width=5,
lower_smoothing_radius=2,
upper_smoothing_radius=3,
pore_length=15,
pore_mouth=20,
pore_width=10,
dividing_plane=self.box_l / 2)
slitpore_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=slitpore_shape, particle_type=1, penetrable=True)
system.constraints.add(slitpore_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
system.part.add(pos=[0., 0., 0.], type=0)
x = self.box_l / 2.0
d = 1 - np.sqrt(2) / 2
parameters = [
([x, x, 1.], -4., [0., 0., -1.]), # outside channel
([x, x, 15.], 5., [-1., 0., 0.]), # inside channel
([x, x, 5.], 0., [0., 0., 0.]), # on channel bottom surface
([x - 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x + 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x - 5. + 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x + 5. - 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x - 5. - 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([x + 5. + 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([1., x, 20.], 0., [0., 0., 0.]), # on inner wall surface
([x, x, 25.], 0., [0., 0., 0.]), # on outer wall surface
([x, x, 27.], -2., [0., 0., 1.]), # outside wall
]
for pos, ref_mindist, ref_force in parameters:
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
obs_mindist = slitpore_constraint.min_dist()
self.assertAlmostEqual(obs_mindist, ref_mindist, places=10)
if (ref_mindist == 0. and obs_mindist != 0.):
# force direction on a circle is not well-defined due to
# numerical instability
continue
np.testing.assert_almost_equal(
np.copy(slitpore_constraint.total_force()), ref_force, 10)
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_rhomboid(self):
"""Checks that rhomboid constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should)
using the geometrical parameters of (1) a cuboid and (2) a rhomboid.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of rhomboid constraint
# (1) using a cuboid
interaction_dir = +1 # constraint is directed outwards
length = np.array([-5.0, 6.0, 7.0]) # dimension of the cuboid
corner = np.array(3 * [self.box_l / 2.0])
rhomboid_shape = espressomd.shapes.Rhomboid(
corner=corner,
a=[length[0], 0.0, 0.0], # cube
b=[0.0, length[1], 0.0],
c=[0.0, 0.0, length[2]],
direction=interaction_dir
)
penetrability = False # impenetrable
rhomboid_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=rhomboid_shape, particle_type=1, penetrable=penetrability)
rhomboid_constraint = system.constraints.add(rhomboid_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.part.add(id=0, pos=[self.box_l / 2.0 + length[0] / 2.0,
self.box_l / 2.0 + length[1] / 2.0,
self.box_l / 2.0 - 1], type=0)
system.integrator.run(0) # update forces
f_part = system.part[0].f
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertEqual(f_part[0], 0.)
self.assertEqual(f_part[1], 0.)
self.assertAlmostEqual(
-f_part[2],
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
x_range = 12
y_range = 12
z_range = 12
for x in range(x_range):
for y in range(y_range):
for z in range(z_range):
pos = np.array(
[x + (self.box_l + length[0] - x_range) / 2.0,
y + (self.box_l + length[1] - y_range) / 2.0,
z + (self.box_l + length[2] - z_range) / 2.0])
shape_dist, shape_dist_vec = rhomboid_shape.calc_distance(
position=pos.tolist())
outside = False
edge_case = False
dist_vec = np.array([0.0, 0.0, 0.0])
# check if outside or inside
if(pos[0] < (self.box_l + length[0] - abs(length[0])) / 2.0 or
pos[0] > (self.box_l + length[0] + abs(length[0])) / 2.0 or
pos[1] < (self.box_l + length[1] - abs(length[1])) / 2.0 or
pos[1] > (self.box_l + length[1] + abs(length[1])) / 2.0 or
pos[2] < (self.box_l + length[2] - abs(length[2])) / 2.0 or
pos[2] > (self.box_l + length[2] + abs(length[2])) / 2.0):
outside = True
if outside:
for i in range(3):
if pos[i] < (self.box_l + length[i] -
abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] - abs(length[i])) / 2.0
elif pos[i] > (self.box_l + length[i] + abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] + abs(length[i])) / 2.0
else:
dist_vec[i] = 0.0
dist = np.linalg.norm(dist_vec)
else:
dist = self.box_l
c1 = pos - corner
c2 = corner + length - pos
abs_c1c2 = np.abs(np.concatenate((c1, c2)))
dist = np.amin(abs_c1c2)
where = np.argwhere(dist == abs_c1c2)
if len(where) > 1:
edge_case = True
for which in where:
if which < 3:
dist_vec[which] = dist * np.sign(c1[which])
else:
dist_vec[which - 3] = -dist * \
np.sign(c2[which - 3])
dist *= -interaction_dir
if edge_case:
for i in range(3):
if shape_dist_vec[i] != 0.0:
self.assertAlmostEqual(
abs(shape_dist_vec[i]), abs(dist_vec[i]))
else:
self.assertAlmostEqual(shape_dist_vec[0], dist_vec[0])
self.assertAlmostEqual(shape_dist_vec[1], dist_vec[1])
self.assertAlmostEqual(shape_dist_vec[2], dist_vec[2])
self.assertAlmostEqual(shape_dist, dist)
# (2) using a rhomboid
rhomboid_shape.a = [5., 5., 0.] # rhomboid
rhomboid_shape.b = [0., 0., 5.]
rhomboid_shape.c = [0., 5., 0.]
system.part[0].pos = [self.box_l / 2.0 + 2.5,
self.box_l / 2.0 + 2.5,
self.box_l / 2.0 - 1]
system.integrator.run(0) # update forces
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
system.part[0].pos = system.part[0].pos - [0., 1., 0.]
system.integrator.run(0) # update forces
self.assertAlmostEqual(
rhomboid_constraint.min_dist(), 1.2247448714, 10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.2247448714),
places=10)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_torus(self):
"""Checks that torus constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
interaction_dir = 1 # constraint is directed inwards
radius = self.box_l / 4.0
tube_radius = self.box_l / 6.0
part_offset = 1.2
system.part.add(
id=0, pos=[self.box_l / 2.0, self.box_l / 2.0 + part_offset, self.box_l / 2.0], type=0)
# check force calculation of cylinder constraint
torus_shape = espressomd.shapes.Torus(
center=3 * [self.box_l / 2.0],
normal=[0, 0, 1],
direction=interaction_dir,
radius=radius,
tube_radius=tube_radius)
penetrability = False # impenetrable
torus_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=torus_shape, particle_type=1, penetrable=penetrability)
torus_wall = system.constraints.add(torus_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(torus_constraint.min_dist(),
radius - tube_radius - part_offset)
# test summed forces on torus wall
self.assertAlmostEqual(
torus_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=torus_constraint.min_dist()),
places=10)
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l / 2.0 + 2.0 * radius - part_offset
system.part.add(
id=1, pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
self.assertAlmostEqual(torus_wall.total_force()[1], 0.0)
self.assertAlmostEqual(torus_wall.total_normal_force(), 2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0., eps=1.0, sig=1.0,
r=radius - tube_radius - part_offset))
# Test the geometry of the shape directly
phi_steps = 11
theta_steps = 11
center = np.array([self.box_l / 2.0,
self.box_l / 2.0,
self.box_l / 2.0])
tube_center = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius,
self.box_l / 2.0])
for distance in {1.02, -0.7}:
start_point = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius -
tube_radius - distance,
self.box_l / 2.0])
for phi in range(phi_steps):
for theta in range(theta_steps):
# Rotation around the tube
theta_angle = theta / theta_steps * 2.0 * math.pi
theta_rot_matrix = np.array(
[[1.0, 0.0, 0.0],
[0.0, math.cos(theta_angle), -math.sin(theta_angle)],
[0.0, math.sin(theta_angle), math.cos(theta_angle)]])
theta_rot_point = np.dot(
theta_rot_matrix,
start_point - tube_center)
theta_rot_point += tube_center
# Rotation around the center of the torus
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix,
theta_rot_point - center) + center
shape_dist, _ = torus_shape.calc_distance(
position=phi_rot_point.tolist())
self.assertAlmostEqual(shape_dist, distance)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/python/constraint_shape_based.py | Python | gpl-3.0 | 36,175 |
"""
Setup Module
This module is used to make a distribution of
the game using distutils.
"""
from distutils.core import setup
setup(
name = 'Breakout',
version = '1.0',
description = 'A remake of the classic video game',
author = 'Derek Morey',
author_email = '[email protected]',
license = 'GPL',
url = 'https://github.com/Oisota/Breakout',
download_url = 'https://github.com/Oisota/Breakout/archive/master.zip',
keywords = ['breakout', 'arcade', 'game', 'pygame', 'python',],
platforms = ['linux', 'windows'],
scripts = ['breakout.py','breakout-editor.py'],
packages = ['breakout','breakout.game','breakout.utils','breakout.editor'],
package_data = {'breakout':['assets/images/*.gif',
'assets/images/*.png',
'assets/sounds/*.wav',
'assets/levels/*.json']},
requires = ['sys', 'os', 'random', 'tkinter', 'pygame', 'json'],
classifiers = ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Framework :: Pygame',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Games/Entertainment',
'Topic :: Games/Entertainment :: Arcade'],
long_description =
"""
Breakout
--------
This is a remake of the classic game Breakout. I made this game for the sole
purpose of educating myself about python, pygame, and game development in general.
Feel free to use or modify my code in any way.
"""
)
| Oisota/Breakout | setup.py | Python | gpl-3.0 | 2,098 |
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import socket
import os
import re
import requests
import json
from hashlib import sha256
from urlparse import urljoin
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import electrum
from electrum import bitcoin
from electrum.bitcoin import *
from electrum.mnemonic import Mnemonic
from electrum import version
from electrum.wallet import Wallet_2of3
from electrum.i18n import _
from electrum.plugins import BasePlugin, run_hook, hook
from electrum_gui.qt.util import *
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum_gui.qt.amountedit import AmountEdit
from electrum_gui.qt.main_window import StatusBarButton
from decimal import Decimal
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/', debug=False):
self.base_url = base_url
self.debug = debug
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print '%s %s %s' % (method, url, data)
response = requests.request(method, url, **kwargs)
if self.debug:
print response.text
print
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
return self.send_request('get', 'cosigner/%s' % quote(id))
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Wallet_2of3):
wallet_type = '2fa'
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
if xpub2 is None and not self.storage.get('use_trustedcoin'):
return 'show_disclaimer'
if xpub2 is None:
return 'create_extended_seed'
if xpub3 is None:
return 'create_remote_key'
if not self.accounts:
return 'create_accounts'
def make_seed(self):
return Mnemonic('english').make_seed(num_bits=256, prefix=SEED_PREFIX)
def estimated_fee(self, tx):
fee = Wallet_2of3.estimated_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
def get_tx_fee(self, tx):
fee = Wallet_2of3.get_tx_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
class Plugin(BasePlugin):
wallet = None
def __init__(self, x, y):
BasePlugin.__init__(self, x, y)
self.seed_func = lambda x: bitcoin.is_new_seed(x, SEED_PREFIX)
self.billing_info = None
self.is_billing = False
def constructor(self, s):
return Wallet_2fa(s)
def is_available(self):
if not self.wallet:
return False
if self.wallet.storage.get('wallet_type') == '2fa':
return True
return False
def requires_settings(self):
return True
def set_enabled(self, enabled):
self.wallet.storage.put('use_' + self.name, enabled)
def is_enabled(self):
if not self.is_available():
return False
if self.wallet.master_private_keys.get('x2/'):
return False
return True
def make_long_id(self, xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
def get_user_id(self):
xpub_hot = self.wallet.master_public_keys["x1/"]
xpub_cold = self.wallet.master_public_keys["x2/"]
long_id = self.make_long_id(xpub_hot, xpub_cold)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(self, xpub, s):
_, _, _, c, cK = deserialize_xkey(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
xpub2 = ("0488B21E" + "00" + "00000000" + "00000000").decode("hex") + c2 + cK2
return EncodeBase58Check(xpub2)
def make_billing_address(self, num):
long_id, short_id = self.get_user_id()
xpub = self.make_xpub(billing_xpub, long_id)
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
address = public_key_to_bc_address( cK )
return address
def create_extended_seed(self, wallet, window):
seed = wallet.make_seed()
if not window.show_seed(seed, None):
return
if not window.verify_seed(seed, None, self.seed_func):
return
password = window.password_dialog()
wallet.storage.put('seed_version', wallet.seed_version, True)
wallet.storage.put('use_encryption', password is not None, True)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_xpub(' '.join(words[n:]), 'x2/')
msg = [
_('Your wallet file is:') + " %s"%os.path.abspath(wallet.storage.path),
_('You need to be online in order to complete the creation of your wallet.'),
_('If you generated your seed on an offline computer, click on "%s" to close this window, move your wallet file to an online computer and reopen it with Electrum.') % _('Close'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
return window.question('\n\n'.join(msg), no_label=_('Close'), yes_label=_('Next'))
def show_disclaimer(self, wallet, window):
msg = [
_("Two-factor authentication is a service provided by TrustedCoin.") + ' ',
_("It uses a multi-signature wallet, where you own 2 of 3 keys.") + ' ',
_("The third key is stored on a remote server that signs transactions on your behalf.") + ' ',
_("To use this service, you will need a smartphone with Google Authenticator.") + '\n\n',
_("A small fee will be charged on each transaction that uses the remote server.") + ' ',
_("You may check and modify your billing preferences once the installation is complete.") + '\n\n',
_("Note that your coins are not locked in this service.") + ' ',
_("You may withdraw your funds at any time and at no cost, without the remote server, by using the 'restore wallet' option with your wallet seed.") + '\n\n',
_('The next step will generate the seed of your wallet.') + ' ',
_('This seed will NOT be saved in your computer, and it must be stored on paper.') + ' ',
_('To be safe from malware, you may want to do this on an offline computer, and move your wallet later to an online computer.')
]
icon = QPixmap(':icons/trustedcoin.png')
if not window.question(''.join(msg), icon=icon):
return False
self.wallet = wallet
self.set_enabled(True)
return True
def restore_third_key(self, wallet):
long_user_id, short_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
wallet.add_master_public_key('x3/', xpub3)
@hook
def do_clear(self):
self.is_billing = False
@hook
def load_wallet(self, wallet):
self.trustedcoin_button = StatusBarButton( QIcon(":icons/trustedcoin.png"), _("Network"), self.settings_dialog)
self.window.statusBar().addPermanentWidget(self.trustedcoin_button)
self.xpub = self.wallet.master_public_keys.get('x1/')
self.user_id = self.get_user_id()[1]
t = threading.Thread(target=self.request_billing_info)
t.setDaemon(True)
t.start()
@hook
def close_wallet(self):
self.window.statusBar().removeWidget(self.trustedcoin_button)
@hook
def get_wizard_action(self, window, wallet, action):
if hasattr(self, action):
return getattr(self, action)
@hook
def installwizard_restore(self, window, storage):
if storage.get('wallet_type') != '2fa':
return
seed = window.enter_seed_dialog("Enter your seed", None, func=self.seed_func)
if not seed:
return
wallet = Wallet_2fa(storage)
self.wallet = wallet
password = window.password_dialog()
wallet.add_seed(seed, password)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_seed(' '.join(words[n:]), 'x2/', password)
self.restore_third_key(wallet)
wallet.create_main_account(password)
# disable plugin
self.set_enabled(False)
return wallet
def create_remote_key(self, wallet, window):
self.wallet = wallet
self.window = window
if wallet.storage.get('wallet_type') != '2fa':
raise
return
email = self.accept_terms_of_use(window)
if not email:
return
xpub_hot = wallet.master_public_keys["x1/"]
xpub_cold = wallet.master_public_keys["x2/"]
# Generate third key deterministically.
long_user_id, self.user_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub_hot, xpub_cold, email)
except socket.error:
self.window.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
raise e
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
self.window.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == self.user_id, ("user id error", _id, self.user_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
self.window.show_message(str(e))
return
if not self.setup_google_auth(self.window, self.user_id, otp_secret):
return
self.wallet.add_master_public_key('x3/', xpub3)
return True
def need_server(self, tx):
from electrum.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = self.get_user_id()
xpub3 = self.wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
@hook
def send_tx(self, tx):
self.print_error("twofactor:send_tx")
if self.wallet.storage.get('wallet_type') != '2fa':
return
if not self.need_server(tx):
self.print_error("twofactor: xpub3 not needed")
self.auth_code = None
return
self.auth_code = self.auth_dialog()
@hook
def before_send(self):
# request billing info before forming the transaction
self.billing_info = None
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info)
self.waiting_dialog.start()
self.waiting_dialog.wait()
if self.billing_info is None:
self.window.show_message('Could not contact server')
return True
return False
@hook
def extra_fee(self, tx):
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
# trustedcoin won't charge if the total inputs is lower than their fee
price = int(self.price_per_tx.get(1))
assert price <= 100000
if tx.input_value() < price:
self.print_error("not charging for this tx")
return 0
return price
@hook
def make_unsigned_transaction(self, tx):
price = self.extra_fee(tx)
if not price:
return
tx.outputs.append(('address', self.billing_info['billing_address'], price))
@hook
def sign_transaction(self, tx, password):
self.print_error("twofactor:sign")
if self.wallet.storage.get('wallet_type') != '2fa':
self.print_error("twofactor: aborting")
return
self.long_user_id, self.user_id = self.get_user_id()
if not self.auth_code:
return
if tx.is_complete():
return
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
try:
r = server.sign(self.user_id, raw_tx, self.auth_code)
except Exception as e:
tx.error = str(e)
return
self.print_error( "received answer", r)
if not r:
return
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
def auth_dialog(self ):
d = QDialog(self.window)
d.setModal(1)
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def settings_dialog(self):
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info, self.show_settings_dialog)
self.waiting_dialog.start()
def show_settings_dialog(self, success):
if not success:
self.window.show_message(_('Server not reachable.'))
return
d = QDialog(self.window)
d.setWindowTitle("TrustedCoin Information")
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
v = self.price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(self.window.format_amount(v) + ' ' + self.window.base_unit()), 0, 1)
i = 1
if 10 not in self.price_per_tx:
self.price_per_tx[10] = 10 * self.price_per_tx.get(1)
for k, v in sorted(self.price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + self.window.format_amount(v/k) + ' ' + self.window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = self.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, k, v, d):
d.close()
if self.window.pluginsdialog:
self.window.pluginsdialog.close()
uri = "bitcoin:" + self.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
self.is_billing = True
self.window.pay_from_URI(uri)
self.window.payto_e.setFrozen(True)
self.window.message_e.setFrozen(True)
self.window.amount_e.setFrozen(True)
def request_billing_info(self):
billing_info = server.get(self.user_id)
billing_address = self.make_billing_address(billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
self.billing_info = billing_info
self.price_per_tx = dict(self.billing_info['price_per_tx'])
return True
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
window.set_layout(vbox)
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
vbox.addStretch()
accept_button = OkButton(window, _('Accept'))
accept_button.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), accept_button))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(lambda: accept_button.setEnabled(re.match(regexp,email_e.text()) is not None))
email_e.setFocus(True)
if not window.exec_():
return
email = str(email_e.text())
return email
def setup_google_auth(self, window, _id, otp_secret):
vbox = QVBoxLayout()
window.set_layout(vbox)
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
vbox.addWidget(QLabel("Please scan this QR code in Google Authenticator."))
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel("This wallet is already registered, but it was never authenticated. To finalize your registration, please enter your Google Authenticator Code. If you do not have this code, delete the wallet file and start a new registration")
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(QLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
hbox.addWidget(pw)
hbox.addStretch(1)
vbox.addLayout(hbox)
b = OkButton(window, _('Next'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), b))
pw.textChanged.connect(lambda: b.setEnabled(len(pw.text())==6))
while True:
if not window.exec_():
return False
otp = pw.get_amount()
try:
server.auth(_id, otp)
return True
except:
QMessageBox.information(self.window, _('Message'), _('Incorrect password'), _('OK'))
pw.setText('')
| edb1rd/BTC | plugins/trustedcoin.py | Python | gpl-3.0 | 25,459 |
# Generated by Django 3.0.4 on 2020-04-15 23:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('config', '0026_hardware_id_not_unique')]
operations = [
migrations.AlterField(
model_name='device',
name='last_ip',
field=models.GenericIPAddressField(
blank=True,
db_index=True,
help_text=(
'indicates the IP address logged from the '
'last request coming from the device'
),
null=True,
),
),
migrations.AlterField(
model_name='device',
name='management_ip',
field=models.GenericIPAddressField(
blank=True,
db_index=True,
help_text='ip address of the management interface, if available',
null=True,
),
),
]
| nemesisdesign/openwisp2 | openwisp_controller/config/migrations/0027_add_indexes_on_ip_fields.py | Python | gpl-3.0 | 982 |
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mongodict import MongoDict
from nltk import word_tokenize, sent_tokenize
from pypln.backend.celery_task import PyPLNTask
class Tokenizer(PyPLNTask):
def process(self, document):
text = document['text']
tokens = word_tokenize(text)
sentences = [word_tokenize(sent) for sent in sent_tokenize(text)]
return {'tokens': tokens, 'sentences': sentences}
| fccoelho/pypln.backend | pypln/backend/workers/tokenizer.py | Python | gpl-3.0 | 1,140 |
# -*- coding: utf-8 -*-
# Author: Matías Bordese
"""Tests for the unified diff parser process."""
import os.path
import unittest2
from nlg4patch.unidiff import parser
class TestUnidiffParser(unittest2.TestCase):
"""Tests for Unified Diff Parser."""
def setUp(self):
samples_dir = os.path.dirname(os.path.realpath(__file__))
self.sample_file = os.path.join(samples_dir, 'sample.diff')
self.sample_bad_file = os.path.join(samples_dir, 'sample_bad.diff')
def test_parse_sample(self):
"""Parse sample file."""
with open(self.sample_file) as diff_file:
res = parser.parse_unidiff(diff_file)
# one file in the patch
self.assertEqual(len(res), 1)
# three hunks
self.assertEqual(len(res[0]), 3)
# Hunk 1: five additions, no deletions, no modifications
self.assertEqual(res[0][0].added, 6)
self.assertEqual(res[0][0].modified, 0)
self.assertEqual(res[0][0].deleted, 0)
# Hunk 2: no additions, 6 deletions, 2 modifications
self.assertEqual(res[0][1].added, 0)
self.assertEqual(res[0][1].modified, 2)
self.assertEqual(res[0][1].deleted, 6)
# Hunk 3: four additions, no deletions, no modifications
self.assertEqual(res[0][2].added, 4)
self.assertEqual(res[0][2].modified, 0)
self.assertEqual(res[0][2].deleted, 0)
# Check file totals
self.assertEqual(res[0].added, 10)
self.assertEqual(res[0].modified, 2)
self.assertEqual(res[0].deleted, 6)
def test_parse_malformed_diff(self):
"""Parse malformed file."""
with open(self.sample_bad_file) as diff_file:
self.assertRaises(parser.UnidiffParseException,
parser.parse_unidiff, diff_file)
| DrDub/nlg4patch | nlg4patch/unidiff/tests/test_parser.py | Python | gpl-3.0 | 1,824 |
# -*- coding: utf-8 -*-
from resources.lib.parser import cParser
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.gui.gui import cGui
from resources.lib.util import cUtil
from resources.lib.handler.ParameterHandler import ParameterHandler
SITE_IDENTIFIER = 'bundesliga_de'
SITE_NAME = 'Bundesliga.de'
SITE_ICON = 'bl.png'
URL_MAIN = 'http://www.bundesliga.de'
URL_TV = 'http://www.bundesliga.de/de/service/?action=teaserbox&type=video&language=de&amount=25&category='
URL_GET_STREAM = 'http://btd-flv-lbwww-01.odmedia.net/bundesliga/'
def load():
oGui = cGui()
__createMainMenuItem(oGui, 'Aktuell', 'new')
__createMainMenuItem(oGui, 'Spieltag', 'spieltag')
__createMainMenuItem(oGui, 'Stars', 'stars')
__createMainMenuItem(oGui, 'Stories', 'stories')
__createMainMenuItem(oGui, 'Historie', 'historie')
__createMainMenuItem(oGui, 'Partner', 'partner')
__createMainMenuItem(oGui, 'Vereine', 'clubs')
oGui.setEndOfDirectory()
def __createMainMenuItem(oGui, sTitle, sPlaylistId):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle(sTitle)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('playlistId', sPlaylistId)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def listVideos():
oGui = cGui()
params = ParameterHandler()
if (params.exist('playlistId')):
sPlaylistId = params.getValue('playlistId')
if not params.exist('sUrl'):
sUrl = URL_TV + str(sPlaylistId)
else:
sUrl = params.getValue('sUrl')
if sPlaylistId == 'spieltag':
oParser = cParser()
if not params.exist('saison'):
oRequest = cRequestHandler('http://www.bundesliga.de/de/bundesliga-tv/index.php')
sHtmlContent = oRequest.request()
sPattern = 'data-season="([^"]+)" class="active grey-gradient"'
aResult = oParser.parse(sHtmlContent, sPattern)
saison = aResult[1][0]
else:
saison = params.getValue('saison')
oRequest = cRequestHandler(sUrl+'&season='+saison+'&matchday=1')
sHtmlContent = oRequest.request()
if sHtmlContent.find('"message":"nothing found"') != -1:
return False
#ausgewählte Saison
for matchDay in range(1,35):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle('%s Spieltag Saison %s' % (matchDay,saison))
sUrl = sUrl+'&season='+saison+'&matchday='+str(matchDay)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('saison', saison)
oOutputParameterHandler.setParam('matchDay', matchDay)
oOutputParameterHandler.setParam('playlistId', 'spieltagEinzeln')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
#ältere Saison
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
lastSaison = str(int(saison) - 1)
oGuiElement.setTitle('* Saison %s/%s *' % (lastSaison,saison))
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('saison', lastSaison)
oOutputParameterHandler.setParam('playlistId', 'spieltag')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
elif sPlaylistId == 'clubs':
sPattern = '<li data-club="([^"]+)" data-name="([^"]+)".*?src="([^"]+)"'
oRequest = cRequestHandler('http://www.bundesliga.de/de/bundesliga-tv/index.php')
sHtmlContent = oRequest.request()
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return False
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle((aEntry[1]))
sThumbnail = URL_MAIN + str(aEntry[2]).replace('variant27x27.','')
oGuiElement.setThumbnail(sThumbnail)
sUrl = sUrl +'&club='+str(aEntry[0])
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('playlistId', 'clubVideos')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
else:
sPattern = 'btd-teaserbox-entry.*?<a href="([^"]+)".*?<h3 class=.*?>([^<]+)<.*?src="([^"]+).*?class="teaser-text">([^<]+)'
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sHtmlContent = sHtmlContent.replace('\\"','"').replace('\\/','/')
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return False
for aEntry in aResult[1]:
sThumbnail = URL_MAIN + str(aEntry[2])
sUrl = URL_MAIN + str(aEntry[0])
sTitle = cUtil().unescape(str(aEntry[1]).decode('unicode-escape')).encode('utf-8')
sDescription = cUtil().unescape(str(aEntry[3]).decode('unicode-escape')).encode('utf-8')
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('play')
oGuiElement.setTitle(sTitle)
oGuiElement.setDescription(sDescription)
oGuiElement.setThumbnail(sThumbnail)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('sTitle', sTitle)
oGui.addFolder(oGuiElement, oOutputParameterHandler, bIsFolder = False)
oGui.setView('movies')
oGui.setEndOfDirectory()
def play():
params = ParameterHandler()
if (params.exist('sUrl') and params.exist('sTitle')):
sUrl = params.getValue('sUrl')
sTitle = params.getValue('sTitle')
print sUrl
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = ': "([^\."]+)\.flv"'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sStreamUrl = URL_GET_STREAM + str(aResult[1][0])+'_HD.flv?autostart=true'
result = {}
result['streamUrl'] = sStreamUrl
result['resolved'] = True
return result
return False
| dbiesecke/plugin.video.xstream | sites/bundesliga_de.py | Python | gpl-3.0 | 7,292 |
import maya.cmds;mc = maya.cmds
import pymel.core;pm = pymel.core
from pytaya.core.general import listForNone
from pytd.util.logutils import logMsg
from pytd.util.sysutils import grouper
def fileNodesFromObjects(oObjList):
return fileNodesFromShaders(shadersFromObjects(oObjList))
def fileNodesFromShaders(oMatList):
oFileNodeList = set()
for oMat in oMatList:
oFileNodeList.update(oMat.listHistory(type="file"))
return list(oFileNodeList)
def shadersFromObjects(objList, connectedTo=""):
sAttrName = connectedTo
if not objList:
return []
oMatSgList = shadingGroupsFromObjects(objList)
oMatList = []
for oMatSg in oMatSgList:
sName = oMatSg.attr(sAttrName).name() if connectedTo else oMatSg.name()
oMatList.extend(pm.ls(listForNone(mc.listConnections(sName, source=True,
destination=False)),
type=mc.listNodeTypes('shader', ex="texture")))
return oMatList
def shadingGroupsFromObjects(objList):
oShdGrpList = set()
for obj in objList:
oObj = obj if isinstance(obj, pm.PyNode) else pm.PyNode(obj)
oShdGrpList.update(shadingGroupsForObject(oObj))
return list(oShdGrpList)
def shadingGroupsForObject(oObj, warn=True):
oShdGrpList = []
oShape = None
if isinstance(oObj, pm.general.MeshFace):
indiceList = oObj.indices()
for oShdEng in oObj.listHistory(type="shadingEngine"):
if set(indiceList).intersection(set(oShdEng.members()[0].indices())):
oShdGrpList.append(oShdEng)
elif isinstance(oObj, pm.general.NurbsSurfaceFace):
oShape = oObj.node()
elif isinstance(oObj, pm.nt.Transform):
oShape = oObj.getShape()
elif isinstance(oObj, (pm.nt.Mesh, pm.nt.NurbsSurface)):
oShape = oObj
elif warn:
logMsg("Can't get shading groups from {}".format(repr(oObj)) , warning=True)
if not oShdGrpList:
if oShape:
oShdGrpList = oShape.shadingGroups()
if not oShdGrpList:
oShdGrpList = oShape.connections(type="shadingEngine")
return oShdGrpList
def conformShadingNetworkToNamespace(oMeshList, sNamespaceToMatch , **kwargs):
bForce = kwargs.get("force", False)
oShadingGroupMembersDct = {}
oMatNotConformList = []
for oShape in oMeshList:
# print "\nfor shape: ", oShape
oMatSGList = shadingGroupsForObject(oShape)
for oMatSG in oMatSGList:
# print "for shadingGroup: ", oMatSG
oMatList = pm.ls(oMatSG.inputs(), type=mc.listNodeTypes('shader', ex="texture"))
oMat = oMatList[0]
##ignore shadingGroups where materials are defaultNode
if oMat.isDefaultNode():
continue
##ignore shadingGroups where materials are already in namespace to match
sMatNamespace = oMat.namespace()
# print "sMatNamespace", sMatNamespace
# print "sNamespaceToMatch", sNamespaceToMatch
if sMatNamespace == sNamespaceToMatch:
continue
else:
oMatNotConformList.append(oMat)
oMembers = oMatSG.members()
for oMember in oMembers:
# print "member :", oMember
if oMember.node() == oShape:
oShadingGroupMembersDct.setdefault(oMatSG, []).append(oMember)
# for k, v in oShadingGroupMembersDct.iteritems():
# print "for shadingGroup: ", k, ", specific members are: ", v
if oMatNotConformList:
if bForce:
pass
else:
result = pm.confirmDialog(title='Materials not conform to Namespace...'
, message="Found materials not conform to Namespace,\nCopy Shading Network, Conform to Namespace & Assign ?"
, button=["OK", 'Cancel']
, defaultButton='Cancel'
, cancelButton='Cancel'
, dismissString='Cancel')
if result == "Cancel":
pm.warning("Materials Namespace conformation cancelled.")
return bForce
else:
bForce = True
else:
if sNamespaceToMatch:
logMsg('Materials already conformed to Namespace: "{0}"'.format(sNamespaceToMatch) , warning=True)
return bForce
##Force current namespace to the one to match to duplicate in this namespace
mc.namespace(set=":")
mc.namespace(set=sNamespaceToMatch if sNamespaceToMatch else ":")
oMatNotConformList = []
oShapeAssignedList = []
for oMatSG, oMembers in oShadingGroupMembersDct.iteritems():
oNewMatSGs = pm.duplicate(oMatSG, rr=True, un=True)
oNewMatSG = oNewMatSGs[0]
# print "old shadingGroup: ", oMatSG
# print "new shadingGroup: ", oNewMatSGs[0]
# print "oMembers", oMembers
# print oMembers[0]
for oMember in oMembers:
oShape = oMember.node()
if oShape not in oShapeAssignedList:
oShapeAssignedList.append(oShape)
try:
pm.sets(oNewMatSG, e=True, forceElement=oShape)
logMsg('Material "{0}" assigned first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
except:
logMsg('Could not assign material "{0}" first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
try:
pm.sets(oNewMatSG, e=True, forceElement=oMembers)
logMsg('Material "{0}" assigned to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
except:
logMsg('Could not assign material "{0}" to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
mc.namespace(set=":")
return bForce
def transferUvAndShaders(oSrcGrp, oDestGrp):
notCompatibleShapeList = []
sSourceNameSpace = oSrcGrp.namespace()
notFoundList = []
transferList = []
oTargetList = pm.ls(oDestGrp, dag=True, tr=True)
#searchCount = len(oTargetList)
for oTargetXfm in oTargetList:
oShape = oTargetXfm.getShape(ni=True)
if isinstance(oShape, pm.nt.Mesh):
sXfmName = oTargetXfm.nodeName()
sSourceName = sSourceNameSpace + sXfmName
oSourceXfm = pm.PyNode(sSourceName)
if oSourceXfm:
transferList.append((oSourceXfm, oTargetXfm))
# print oSourceXfm, oTargetXfm
else:
notFoundList.append(oTargetXfm)
print 'No match found for "{0}"'.format(sXfmName)
print "Searching... {0}".format(oTargetXfm.nodeName())
# oSet = fncTools.checkSet("noMatchFound")
# if notFoundList:
# pm.sets(oSet, addElement=notFoundList)
result = pm.confirmDialog(title='Transfer Uvs',
message='Found {0}/{1} mismatches :'.format(len(notFoundList), len(transferList)),
button=['Ok', 'Cancel'],
defaultButton='Cancel',
cancelButton='Cancel',
dismissString='Cancel')
if result == 'Cancel':
return
else :
for oSourceXfm, oTargetXfm in transferList:
oSourceShape = oSourceXfm.getShape(ni=True)
oHistList = oTargetXfm.listHistory()
oShapeList = pm.ls(oHistList, type="mesh")
oTargetShape = None
bShapeOrig = False
oTargetCurrentShape = oTargetXfm.getShape(ni=True)
if len(oShapeList) > 1:
for oShape in oShapeList:
if oShape.getAttr("intermediateObject") and oShape.attr("worldMesh").outputs():
bShapeOrig = True
oShape.setAttr("intermediateObject", False)
oTargetShape = oShape
break
else:
oTargetShape = oTargetCurrentShape
if oTargetShape:
try:
print ('transferring uvs and shaders from "{0}" to "{1}"'
.format(oSourceShape, oTargetShape))
if oTargetCurrentShape.numVertices() != oSourceShape.numVertices():
notCompatibleShapeList.extend([oSourceShape, oTargetCurrentShape])
pm.transferAttributes(oSourceShape, oTargetShape, transferPositions=0,
transferNormals=0, transferUVs=2, transferColors=2,
sampleSpace=5, sourceUvSpace="map1", targetUvSpace="map1",
searchMethod=3, flipUVs=0, colorBorders=1)
pm.transferShadingSets(oSourceShape, oTargetShape, sampleSpace=0, searchMethod=3)
pm.delete(oTargetShape, ch=True)
finally:
if bShapeOrig:
oTargetShape.setAttr("intermediateObject", True)
pm.select(clear=True)
pm.select(oSourceShape, r=True)
pm.select(oTargetCurrentShape, tgl=True)
pm.transferShadingSets(sampleSpace=1, searchMethod=3)
# oSet = fncTools.checkSet("Shapes_Without_Same_Topology")
# if notCompatibleShapeList:
# pm.sets(oSet, addElement=notCompatibleShapeList)
# pm.select(notCompatibleShapeList)
# pm.warning("The selected node's may have potentially problems on transferring uvs and materials.")
return notFoundList, notCompatibleShapeList
def averageVertexColorsToMaterial(oMatList="NoEntry"):
if oMatList == "NoEntry":
oMatList = pm.selected()
if not oMatList:
logMsg("Nothing is selected. Select meshes to apply vertex color." , warning=True)
return
for oMat in oMatList:
logMsg("Processing {0}".format(repr(oMat)))
try:
colorAttr = oMat.attr("color")
except pm.MayaAttributeError:
logMsg("\tNo color attribute found.")
continue
try:
oSG = oMat.shadingGroups()[0]
except IndexError:
print "\tNo ShadingGroup found."
continue
oMemberList = oSG.members()
if not oMemberList:
logMsg("\tShadingGroup is empty.")
continue
pm.select(oMemberList, r=True)
pm.mel.ConvertSelectionToVertices()
sSelectedVerts = mc.ls(sl=True)
pm.refresh()
try:
vtxColorList = tuple(grouper(3, mc.polyColorPerVertex(sSelectedVerts, q=True, rgb=True)))
except:
logMsg("\tNo vertex colors found.")
continue
numVtx = len(vtxColorList)
rSum = 0.0
gSum = 0.0
bSum = 0.0
for r, g, b in vtxColorList:
rSum += r
gSum += g
bSum += b
if rSum + gSum + bSum > 0.0:
avrVtxColor = (rSum / numVtx, gSum / numVtx, bSum / numVtx)
try:
colorAttr.disconnect()
colorAttr.set(avrVtxColor)
except Exception, e:
logMsg("\t{0}".format(e))
def duplicateShadersPerObject(oMatList):
oNewMatList = []
for oMat in oMatList:
oShadEngList = oMat.outputs(type="shadingEngine")
if not oShadEngList:
continue
oShadEng = oShadEngList[0]
oShadEngMemberList = oShadEng.members()
oMemberByGeoObjDct = {}
for member in oShadEngMemberList:
oMesh = member.node() if isinstance(member, pm.MeshFace) else member
oMemberByGeoObjDct.setdefault(oMesh, []).append(member)
count = len(oMemberByGeoObjDct)
if count <= 1:
continue
oMemberByGeoObjDct.popitem()
for oShadingMembers in oMemberByGeoObjDct.itervalues():
oNewMat = pm.duplicate(oMat, inputConnections=True)[0]
# pm.select(oShadingMembers, replace=True)
# pm.hyperShade(assign=oNewMat)
oSG = pm.sets(renderable=True, noSurfaceShader=True, empty=True, name=oNewMat.nodeName() + "SG")
oNewMat.attr("outColor") >> oSG.attr("surfaceShader")
pm.sets(oSG, forceElement=oShadingMembers)
oNewMatList.append(oNewMat)
return oNewMatList
| sebcourtois/pypeline-tool-devkit | pytaya/core/rendering.py | Python | gpl-3.0 | 12,494 |
# -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
# testing if we are preserving task_data between wf jumps
def main_wf(current):
current.task_data['from_main'] = True
current.output['from_jumped'] = current.task_data.get('from_jumped')
assert current.workflow.name == 'jump_to_wf'
def jumped_wf(current):
current.output['from_main'] = current.task_data['from_main']
current.task_data['from_jumped'] = True
assert current.workflow.name == 'jump_to_wf2'
def set_external_wf(current):
current.task_data['external_wf'] = 'jump_to_wf2'
| zetaops/zengine | tests/views/jump_to.py | Python | gpl-3.0 | 692 |
TestNet = False
Address = "1MjeEv3WDgycrEaaNeSESrWvRfkU6s81TX"
workerEndpoint = "3333"
DonationPercentage = 0.0
Upnp = True
BitcoindConfigPath = "/opt/bitcoin/bitcoindata/bitcoin.conf"
WORKER_STATUS_REFRESH_TIME = 10
dbService = {}
workerStatus = {}
NodeService = {
'authentication': 'http://127.0.0.1:8080/service/node/authentication.htm'
}
DbOptions = {
'type': 'sql',
'engine': 'mysql',
'dbopts': {
'host': '127.0.0.1',
'db': 'antpooldb',
'user': 'antpool',
'password': 'antpool',
}
}
| bitmaintech/p2pool | config.py | Python | gpl-3.0 | 545 |
#!/usr/bin/env python3
import asm, datetime, os
"""
Import script for Shelterpro databases in DBF format
Requires my hack to dbfread to support VFP9 -
copy parseC in FieldParser.py and rename it parseV, then remove
encoding so it's just a binary string that can be ignored.
Requires address.dbf, addrlink.dbf, animal.dbf, incident.dbf, license.dbf, note.dbf, person.dbf, shelter.dbf, vacc.dbf
Will also look in PATH/images/IMAGEKEY.[jpg|JPG] for animal photos if available.
29th December, 2016 - 2nd April 2020
"""
PATH = "/home/robin/tmp/asm3_import_data/shelterpro_bc2243"
START_ID = 100
INCIDENT_IMPORT = False
LICENCE_IMPORT = False
PICTURE_IMPORT = True
VACCINATION_IMPORT = True
NOTE_IMPORT = True
SHELTER_IMPORT = True
SEPARATE_ADDRESS_TABLE = True
IMPORT_ANIMALS_WITH_NO_NAME = True
""" when faced with a field type it doesn't understand, dbfread can produce an error
'Unknown field type xx'. This parser returns anything unrecognised as binary data """
class ExtraFieldParser(dbfread.FieldParser):
def parse(self, field, data):
try:
return dbfread.FieldParser.parse(self, field, data)
except ValueError:
return data
def open_dbf(name):
return asm.read_dbf(name)
def gettype(animaldes):
spmap = {
"DOG": 2,
"CAT": 11
}
species = animaldes.split(" ")[0]
if species in spmap:
return spmap[species]
else:
return 2
def gettypeletter(aid):
tmap = {
2: "D",
10: "A",
11: "U",
12: "S"
}
return tmap[aid]
def getsize(size):
if size == "VERY":
return 0
elif size == "LARGE":
return 1
elif size == "MEDIUM":
return 2
else:
return 3
def getdateage(age, arrivaldate):
""" Returns a date adjusted for age. Age can be one of
ADULT, PUPPY, KITTEN, SENIOR """
d = arrivaldate
if d == None: d = datetime.datetime.today()
if age == "ADULT":
d = d - datetime.timedelta(days = 365 * 2)
if age == "SENIOR":
d = d - datetime.timedelta(days = 365 * 7)
if age == "KITTEN":
d = d - datetime.timedelta(days = 60)
if age == "PUPPY":
d = d - datetime.timedelta(days = 60)
return d
owners = []
ownerlicences = []
logs = []
movements = []
animals = []
animalvaccinations = []
animalcontrol = []
animalcontrolanimals = []
ppa = {}
ppo = {}
ppi = {}
addresses = {}
addrlink = {}
notes = {}
asm.setid("adoption", START_ID)
asm.setid("animal", START_ID)
asm.setid("animalcontrol", START_ID)
asm.setid("log", START_ID)
asm.setid("owner", START_ID)
if VACCINATION_IMPORT: asm.setid("animalvaccination", START_ID)
if LICENCE_IMPORT: asm.setid("ownerlicence", START_ID)
if PICTURE_IMPORT: asm.setid("media", START_ID)
if PICTURE_IMPORT: asm.setid("dbfs", START_ID)
# Remove existing
print("\\set ON_ERROR_STOP\nBEGIN;")
print("DELETE FROM adoption WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
print("DELETE FROM animal WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
print("DELETE FROM owner WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
if INCIDENT_IMPORT: print("DELETE FROM animalcontrol WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
if VACCINATION_IMPORT: print("DELETE FROM animalvaccination WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
if LICENCE_IMPORT: print("DELETE FROM ownerlicence WHERE ID >= %d AND CreatedBy = 'conversion';" % START_ID)
if PICTURE_IMPORT: print("DELETE FROM media WHERE ID >= %d;" % START_ID)
if PICTURE_IMPORT: print("DELETE FROM dbfs WHERE ID >= %d;" % START_ID)
# Create a transfer owner
to = asm.Owner()
owners.append(to)
to.OwnerSurname = "Other Shelter"
to.OwnerName = to.OwnerSurname
# Create an unknown owner
uo = asm.Owner()
owners.append(uo)
uo.OwnerSurname = "Unknown Owner"
uo.OwnerName = uo.OwnerSurname
# Load up data files
if SEPARATE_ADDRESS_TABLE:
caddress = open_dbf("address")
caddrlink = open_dbf("addrlink")
canimal = open_dbf("animal")
if LICENCE_IMPORT: clicense = open_dbf("license")
cperson = open_dbf("person")
if SHELTER_IMPORT: cshelter = open_dbf("shelter")
if VACCINATION_IMPORT: cvacc = open_dbf("vacc")
if INCIDENT_IMPORT: cincident = open_dbf("incident")
if NOTE_IMPORT: cnote = open_dbf("note")
if PICTURE_IMPORT: cimage = open_dbf("image")
# Addresses if we have a separate file
if SEPARATE_ADDRESS_TABLE:
for row in caddress:
addresses[row["ADDRESSKEY"]] = {
"address": asm.strip(row["ADDRESSSTR"]) + " " + asm.strip(row["ADDRESSST2"]) + " " + asm.strip(row["ADDRESSST3"]),
"city": asm.strip(row["ADDRESSCIT"]),
"state": asm.strip(row["ADDRESSSTA"]),
"zip": asm.strip(row["ADDRESSPOS"])
}
# The link between addresses and people
for row in caddrlink:
addrlink[row["EVENTKEY"]] = row["ADDRLINKAD"]
# People
for row in cperson:
o = asm.Owner()
owners.append(o)
personkey = 0
# Sometimes called UNIQUE
if "PERSONKEY" in row: personkey = row["PERSONKEY"]
elif "UNIQUE" in row: personkey = row["UNIQUE"]
ppo[personkey] = o
o.OwnerForeNames = asm.strip(row["FNAME"])
o.OwnerSurname = asm.strip(row["LNAME"])
o.OwnerName = o.OwnerTitle + " " + o.OwnerForeNames + " " + o.OwnerSurname
# Find the address if it's in a separate table
if SEPARATE_ADDRESS_TABLE:
if personkey in addrlink:
addrkey = addrlink[personkey]
if addrkey in addresses:
add = addresses[addrkey]
o.OwnerAddress = add["address"]
o.OwnerTown = add["city"]
o.OwnerCounty = add["state"]
o.OwnerPostcode = add["zip"]
else:
# Otherwise, address fields are in the person table
o.OwnerAddress = row["ADDR1"].encode("ascii", "xmlcharrefreplace") + "\n" + row["ADDR2"].encode("ascii", "xmlcharrefreplace")
o.OwnerTown = row["CITY"]
o.OwnerCounty = row["STATE"]
o.OwnerPostcode = row["POSTAL_ID"]
if asm.strip(row["EMAIL"]) != "(": o.EmailAddress = asm.strip(row["EMAIL"])
if row["HOME_PH"] != 0: o.HomeTelephone = asm.strip(row["HOME_PH"])
if row["WORK_PH"] != 0: o.WorkTelephone = asm.strip(row["WORK_PH"])
if row["THIRD_PH"] != 0: o.MobileTelephone = asm.strip(row["THIRD_PH"])
o.IsACO = asm.cint(row["ACO_IND"])
o.IsStaff = asm.cint(row["STAFF_IND"])
o.IsVolunteer = asm.cint(row["VOL_IND"])
o.IsDonor = asm.cint(row["DONOR_IND"])
o.IsMember = asm.cint(row["MEMBER_IND"])
o.IsBanned = asm.cint(row["NOADOPT"] == "T" and "1" or "0")
if "FOSTERS" in row: o.IsFosterer = asm.cint(row["FOSTERS"])
# o.ExcludeFromBulkEmail = asm.cint(row["MAILINGSAM"]) # Not sure this is correct
# Animals
for row in canimal:
if not IMPORT_ANIMALS_WITH_NO_NAME and row["PETNAME"].strip() == "": continue
a = asm.Animal()
animals.append(a)
ppa[row["ANIMALKEY"]] = a
a.AnimalTypeID = gettype(row["ANIMLDES"])
a.SpeciesID = asm.species_id_for_name(row["ANIMLDES"].split(" ")[0])
a.AnimalName = asm.strip(row["PETNAME"]).title()
if a.AnimalName.strip() == "":
a.AnimalName = "(unknown)"
age = row["AGE"].split(" ")[0]
added = asm.now()
if "ADDEDDATET" in row and row["ADDEDDATET"] is not None: added = row["ADDEDDATET"]
if "DOB" in row: a.DateOfBirth = row["DOB"]
if a.DateOfBirth is None: a.DateOfBirth = getdateage(age, added)
a.DateBroughtIn = added
a.LastChangedDate = a.DateBroughtIn
a.CreatedDate = a.DateBroughtIn
a.EntryReasonID = 4
a.generateCode(gettypeletter(a.AnimalTypeID))
a.ShortCode = row["ANIMALKEY"]
a.Neutered = asm.cint(row["FIX"])
a.Declawed = asm.cint(row["DECLAWED"])
a.IsNotAvailableForAdoption = 0
a.ShelterLocation = 1
a.Sex = asm.getsex_mf(asm.strip(row["GENDER"]))
a.Size = getsize(asm.strip(row["WEIGHT"]))
a.BaseColourID = asm.colour_id_for_names(asm.strip(row["FURCOLR1"]), asm.strip(row["FURCOLR2"]))
a.IdentichipNumber = asm.strip(row["MICROCHIP"])
if a.IdentichipNumber <> "": a.Identichipped = 1
comments = "Original breed: " + asm.strip(row["BREED1"]) + "/" + asm.strip(row["CROSSBREED"]) + ", age: " + age
comments += ",Color: " + asm.strip(row["FURCOLR1"]) + "/" + asm.strip(row["FURCOLR2"])
comments += ", Coat: " + asm.strip(row["COAT"])
comments += ", Collar: " + asm.strip(row["COLLRTYP"])
a.BreedID = asm.breed_id_for_name(asm.strip(row["BREED1"]))
a.Breed2ID = a.BreedID
a.BreedName = asm.breed_name_for_id(a.BreedID)
if row["PUREBRED"] == "0":
a.Breed2ID = asm.breed_id_for_name(asm.strip(row["CROSSBREED"]))
if a.Breed2ID == 1: a.Breed2ID = 442
a.BreedName = "%s / %s" % ( asm.breed_name_for_id(a.BreedID), asm.breed_name_for_id(a.Breed2ID) )
a.HiddenAnimalDetails = comments
# Make everything non-shelter until it's in the shelter file
a.NonShelterAnimal = 1
a.Archived = 1
# If the row has an original owner
if row["PERSOWNR"] in ppo:
o = ppo[row["PERSOWNR"]]
a.OriginalOwnerID = o.ID
# Shelterpro records Deceased as Status == 2 as far as we can tell
if row["STATUS"] == 2:
a.DeceasedDate = a.DateBroughtIn
a.PTSReasonID = 2 # Died
# Vaccinations
if VACCINATION_IMPORT:
for row in cvacc:
if not row["ANIMALKEY"] in ppa: continue
a = ppa[row["ANIMALKEY"]]
# Each row contains a vaccination
av = asm.AnimalVaccination()
animalvaccinations.append(av)
vaccdate = row["VACCEFFECT"]
if vaccdate is None:
vaccdate = a.DateBroughtIn
av.AnimalID = a.ID
av.VaccinationID = 8
if row["VACCTYPE"].find("DHLPP") != -1: av.VaccinationID = 8
if row["VACCTYPE"].find("BORDETELLA") != -1: av.VaccinationID = 6
if row["VACCTYPE"].find("RABIES") != -1: av.VaccinationID = 4
av.DateRequired = vaccdate
av.DateOfVaccination = vaccdate
av.DateExpires = row["VACCEXPIRA"]
av.Manufacturer = row["VACCMANUFA"]
av.BatchNumber = row["VACCSERIAL"]
av.Comments = "Name: %s, Issue: %s" % (row["VACCDRUGNA"], row["VACCISSUED"])
# Run through the shelter file and create any movements/euthanisation info
if SHELTER_IMPORT:
for row in cshelter:
a = None
if row["ANIMALKEY"] in ppa:
a = ppa[row["ANIMALKEY"]]
arivdate = row["ARIVDATE"]
a.ShortCode = asm.strip(row["ANIMALKEY"])
a.ShelterLocationUnit = asm.strip(row["KENNEL"])
a.NonShelterAnimal = 0
if arivdate is not None:
a.DateBroughtIn = arivdate
a.LastChangedDate = a.DateBroughtIn
a.CreatedDate = a.DateBroughtIn
a.generateCode(gettypeletter(a.AnimalTypeID))
a.ShortCode = asm.strip(row["ANIMALKEY"])
else:
# Couldn't find an animal record, bail
continue
o = None
if row["OWNERATDIS"] in ppo:
o = ppo[row["OWNERATDIS"]]
dispmeth = asm.strip(row["DISPMETH"])
dispdate = row["DISPDATE"]
# Apply other fields
if row["ARIVREAS"] == "QUARANTINE":
a.IsQuarantine = 1
elif row["ARIVREAS"] == "STRAY":
if a.AnimalTypeID == 2: a.AnimalTypeID = 10
if a.AnimalTypeID == 11: a.AnimalTypeID = 12
a.EntryReasonID = 7
# Adoptions
if dispmeth == "ADOPTED":
if a is None or o is None: continue
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 1
m.MovementDate = dispdate
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementType = 1
movements.append(m)
# Reclaims
elif dispmeth == "RETURN TO OWNER":
if a is None or o is None: continue
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 5
m.MovementDate = dispdate
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementType = 5
movements.append(m)
# Released or Other
elif dispmeth == "RELEASED" or dispmeth == "OTHER":
if a is None or o is None: continue
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = 0
m.MovementType = 7
m.MovementDate = dispdate
m.Comments = dispmeth
a.Archived = 1
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementID = m.ID
a.ActiveMovementType = 7
movements.append(m)
# Holding
elif dispmeth == "" and row["ANIMSTAT"] == "HOLDING":
a.IsHold = 1
a.Archived = 0
# Deceased
elif dispmeth == "DECEASED":
a.DeceasedDate = dispdate
a.PTSReasonID = 2 # Died
a.Archived = 1
# Euthanized
elif dispmeth == "EUTHANIZED":
a.DeceasedDate = dispdate
a.PutToSleep = 1
a.PTSReasonID = 4 # Sick/Injured
a.Archived = 1
# If the outcome is blank, it's on the shelter
elif dispmeth == "":
a.Archived = 0
# It's the name of an organisation that received the animal
else:
if a is None: continue
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = to.ID
m.MovementType = 3
m.MovementDate = dispdate
m.Comments = dispmeth
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementType = 3
movements.append(m)
if LICENCE_IMPORT:
for row in clicense:
a = None
if row["ANIMALKEY"] in ppa:
a = ppa[row["ANIMALKEY"]]
o = None
if row["LICENSEOWN"] in ppo:
o = ppo[row["LICENSEOWN"]]
if a is not None and o is not None:
if row["LICENSEEFF"] is None:
continue
ol = asm.OwnerLicence()
ownerlicences.append(ol)
ol.AnimalID = a.ID
ol.OwnerID = o.ID
ol.IssueDate = row["LICENSEEFF"]
ol.ExpiryDate = row["LICENSEEXP"]
if ol.ExpiryDate is None: ol.ExpiryDate = ol.IssueDate
ol.LicenceNumber = asm.strip(row["LICENSE"])
ol.LicenceTypeID = 2 # Unaltered dog
if a.Neutered == 1:
ol.LicenceTypeID = 1 # Altered dog
if PICTURE_IMPORT:
for row in cimage:
a = None
if not row["ANIMALKEY"] in ppa:
continue
a = ppa[row["ANIMALKEY"]]
imdata = None
if os.path.exists(PATH + "/images/%s.jpg" % row["IMAGEKEY"]):
f = open(PATH + "/images/%s.jpg" % row["IMAGEKEY"], "rb")
imdata = f.read()
f.close()
if imdata is not None:
asm.animal_image(a.ID, imdata)
# Incidents
if INCIDENT_IMPORT:
for row in cincident:
ac = asm.AnimalControl()
animalcontrol.append(ac)
calldate = row["DATETIMEAS"]
if calldate is None: calldate = row["DATETIMEOR"]
if calldate is None: calldate = asm.now()
ac.CallDateTime = calldate
ac.IncidentDateTime = calldate
ac.DispatchDateTime = calldate
ac.CompletedDate = row["DATETIMEOU"]
if ac.CompletedDate is None: ac.CompletedDate = calldate
if row["CITIZENMAK"] in ppo:
ac.CallerID = ppo[row["CITIZENMAK"]].ID
if row["OWNERATORI"] in ppo:
ac.OwnerID = ppo[row["OWNERATORI"]].ID
ac.IncidentCompletedID = 2
if row["FINALOUTCO"] == "ANIMAL PICKED UP":
ac.IncidentCompletedID = 2
elif row["FINALOUTCO"] == "OTHER":
ac.IncidentCompletedID = 6 # Does not exist in default data
ac.IncidentTypeID = 1
incidentkey = 0
if "INCIDENTKE" in row: incidentkey = row["INCIDENTKE"]
elif "KEY" in row: incidentkey = row["KEY"]
comments = "case: %s\n" % incidentkey
comments += "outcome: %s\n" % asm.strip(row["FINALOUTCO"])
comments += "precinct: %s\n" % asm.strip(row["PRECINCT"])
ac.CallNotes = comments
ac.Sex = 2
if "ANIMALKEY" in row:
if row["ANIMALKEY"] in ppa:
a = ppa[row["ANIMALKEY"]]
animalcontrolanimals.append("INSERT INTO animalcontrolanimal (AnimalControlID, AnimalID) VALUES (%s, %s);" % (ac.ID, a.ID))
# Notes as log entries
if NOTE_IMPORT:
for row in cnote:
eventtype = row["EVENTTYPE"]
eventkey = row["EVENTKEY"]
notedate = row["NOTEDATE"]
memo = row["NOTEMEMO"]
if eventtype in [ 1, 3 ]: # animal/intake or case notes
if not eventkey in ppa: continue
linkid = ppa[eventkey].ID
ppa[eventkey].HiddenAnimalDetails += "\n" + memo
l = asm.Log()
logs.append(l)
l.LogTypeID = 3
l.LinkID = linkid
l.LinkType = 0
l.Date = notedate
if l.Date is None:
l.Date = asm.now()
l.Comments = memo
elif eventtype in [ 2, 5, 10 ]: # person, case and incident notes
if not eventkey in ppi: continue
linkid = ppi[eventkey].ID
ppi[eventkey].CallNotes += "\n" + memo
l = asm.Log()
logs.append(l)
l.LogTypeID = 3
l.LinkID = linkid
l.LinkType = 6
l.Date = notedate
if l.Date is None:
l.Date = asm.now()
l.Comments = memo
# Run back through the animals, if we have any that are still
# on shelter after 2 years, add an adoption to an unknown owner
#asm.adopt_older_than(animals, movements, uo.ID, 365*2)
# Now that everything else is done, output stored records
for a in animals:
print(a)
for av in animalvaccinations:
print(av)
for o in owners:
print(o)
for l in logs:
print(l)
for m in movements:
print(m)
for ol in ownerlicences:
print(ol)
for ac in animalcontrol:
print(ac)
for aca in animalcontrolanimals:
print(aca)
asm.stderr_summary(animals=animals, animalvaccinations=animalvaccinations, logs=logs, owners=owners, movements=movements, ownerlicences=ownerlicences, animalcontrol=animalcontrol)
print("DELETE FROM configuration WHERE ItemName LIKE 'DBView%';")
print("COMMIT;")
| bobintetley/asm3 | import/shelterpro_dbf.py | Python | gpl-3.0 | 18,704 |
import time
start = time.time()
def prime_factors(n):
l = []
i = 2
while n != 1:
if n % i == 0:
l.append(i)
while n % i == 0:
n = n // i
i += 1
return l
print(prime_factors(600851475143)[-1])
elapsed = time.time() - start
print("time elapsed:", elapsed, "seconds")
| libranwizard/projecteuler | euler3.py | Python | gpl-3.0 | 344 |
"""TBD
"""
import os
import sys
import time
import subprocess
import xmlrpc.client
class ApplicationMonitor(object):
"""Responsible for launching, monitoring, and terminating the FLDIGI application process, using subprocess.Popen()
:param hostname: The FLDIGI XML-RPC server's IP address or hostname (usually localhost / 127.0.0.1)
:type hostname: str (path to folder)
:param port: The port in which FLDIGI's XML-RPC server is listening on.
:type port: int
.. note:: Commandline arguments can be found on the following links:
* `Official Documentation page <http://www.w1hkj.com/FldigiHelp-3.21/html/command_line_switches_page.html/>`_
* `Man page for FLDIGI <https://www.dragonflybsd.org/cgi/web-man?command=fldigi§ion=1/>`_
"""
def __init__(self, hostname='127.0.0.1', port=7362):
self.platform = sys.platform
self.hostname = hostname
self.port = int(port)
if self.platform not in ['linux', 'win32', 'darwin']:
raise Exception('You\'re probably using an OS that is unsupported. Sorry about that. I take pull requests.')
self.client = xmlrpc.client.ServerProxy('http://{}:{}/'.format(self.hostname, self.port))
self.process = None
def start(self, headless=False, wfall_only=False):
"""Start fldigi in the background
:param headless: if True, starts the FLDIGI application in headless mode (POSIX only! Doesn't work in Windows)
:type headless: bool
:param wfall_only: If True, start FLDIGI in 'waterfall-only' mode. (POSIX only! Doesn't work in Windows)
:type wfall_only: bool
:Example:
>>> import pyfldigi
>>> c = pyfldigi.Client()
>>> app = pyfldigi.ApplicationMonitor(headless=True)
>>> app.start()
>>> # At this point, fldigi should be running in headless mode.
>>> c.modem.name # Ask FLDIGI which modem it's currently using
'CW'
"""
args = [self._get_path()]
if self.platform == 'win32':
# Currently, the app crashes if I pass in any params from the windows commandline.
# For now just ignore all of the params if running this under windows.
pass
else:
args.extend(['--arq-server-address', self.hostname])
args.extend(['--arq-server-port', str(self.port)])
if headless is True:
if self.platform == 'win32':
raise Exception('cannot run headless with win32. Headless mode is only supported on Linux.')
else: # Assumes cygwin, linux, and darwin can utilize xvfb to create a fake x server
args.insert(0, 'xvfb-run') # http://manpages.ubuntu.com/manpages/zesty/man1/xvfb-run.1.html
args.append('-display')
args.append(':99')
else:
if wfall_only is True: # consider this modal with 'headless'
args.append('--wfall-only')
# args.extend(['-title', 'fldigi']) # Set the title to something predictable.
self.process = subprocess.Popen(args)
start = time.time()
while(1):
try:
if self.client.fldigi.name() == 'fldigi':
break
except ConnectionRefusedError:
pass
if time.time() - start >= 10:
break
time.sleep(0.5)
def stop(self, save_options=True, save_log=True, save_macros=True, force=True):
"""Attempts to gracefully shut down fldigi. Returns the error code.
:Example:
>>> import pyfldigi
>>> app = pyfldigi.ApplicationMonitor()
>>> app.start()
>>> time.sleep(10) # wait a bit
>>> app.stop()
"""
bitmask = int('0b{}{}{}'.format(int(save_macros), int(save_log), int(save_options)), 0)
self.client.fldigi.terminate(bitmask)
if self.process is not None:
error_code = self.process.wait(timeout=2)
if force is True:
if error_code is None:
self.process.terminate() # attempt to terminate
error_code = self.process.wait(timeout=2)
if error_code is None:
error_code = self.process.kill()
self.process = None
return error_code
def kill(self):
"""Kills fldigi.
.. warning::
Please try and use stop() before doing this to shut down fldigi gracefully.
Consider kill() the last resort.
:Example:
>>> import pyfldigi
>>> app = pyfldigi.ApplicationMonitor()
>>> app.start()
>>> time.sleep(10) # wait a bit
>>> app.kill() # kill the process
"""
if self.process is not None:
self.process.kill()
self.process = None
# TODO: Interpret error codes and raise custom exceptions
def _get_path(self):
if self.platform == 'win32':
# Below is a clever way to return a list of fldigi versions. This would fail if the user
# did not install fldigi into Program Files.
fldigi_versions = [d for d in os.listdir(os.environ["ProgramFiles(x86)"]) if 'fldigi' in d.lower()]
if len(fldigi_versions) == 0:
raise FileNotFoundError('Cannot find the path to fldigi. Is it installed?')
elif len(fldigi_versions) == 1:
path = os.path.join(os.environ["ProgramFiles(x86)"], fldigi_versions[0])
# Check to see if fldigi.exe is in the folder
if 'fldigi.exe' in os.listdir(path):
return os.path.join(path, 'fldigi.exe')
else:
raise Exception('Found more than one version of fldigi. Uninstall one.')
else: # Assume all other OS's are smart enough to place fldigi in PATH
return 'fldigi'
def is_running(self):
"""Uses the python subprocess module object to see if FLDIGI is still running.
.. warning::
If the AppMonitor did not start FLDIGI, then this function will not return True. The method
only works if FLDIGI was launched using start().
:return: Returns whether or not the FLDIGI application is running
:rtype: bool
"""
if self.process is None:
return False
else:
p = self.process.poll() # will return None if not yet finished. Will return the exit code if it has finished.
if p is None:
return False
else:
self.returncode = p
if __name__ == '__main__':
a = ApplicationMonitor()
a.start()
for i in range(0, 5):
print(a.is_running())
time.sleep(1)
errorCode = a.stop()
print(errorCode)
| KM4YRI/pyFldigi | pyfldigi/appmonitor.py | Python | gpl-3.0 | 6,913 |
# PPFem: An educational finite element code
# Copyright (C) 2015 Matthias Rambausek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ppfem.user_elements
import ppfem.user_equations
import ppfem.quadrature
from ppfem.user_elements import *
from ppfem.user_equations import *
from ppfem.quadrature import *
from ppfem.mesh.mesh import Mesh
from ppfem.geometry import Point, Vertex, Line, Face, Cell, Mapping
from ppfem.fem.assembler import DefaultSystemAssembler
from ppfem.fem.form import Functional, LinearForm, BilinearForm, FormCollection
from ppfem.fem.function import FEFunction, FunctionEvaluator
from ppfem.fem.function_space import FunctionSpace
from ppfem.fem.partial_differential_equation import PDE
__all__ = ["Mesh", "Point", "Line", "Vertex", "Face", "Cell", "Mapping", "FunctionSpace", "Functional",
"LinearForm", "BilinearForm", "FormCollection", "DefaultSystemAssembler", "FEFunction", "FunctionEvaluator",
"PDE"]
__all__ += ppfem.user_elements.__all__ + ppfem.quadrature.__all__ + ppfem.user_equations.__all__
| mrambausek/PPFem | ppfem/__init__.py | Python | gpl-3.0 | 1,648 |
from random import random, seed
import numpy as np
from skued import biexponential, exponential, with_irf
seed(23)
def test_exponential_tzero_limits():
"""Test that the output of ``exponential`` has the correct time-zero"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
# Check that all values before time-zero are the amplitude
assert np.all(np.equal(I[t < tzero], amp))
assert np.all(np.less(I[t > tzero], amp))
def test_exponential_positivity():
"""Test that the output of ``exponential`` is always positive."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
assert np.all(I > 0)
def test_exponential_amplitude():
"""Test that the output of ``exponential`` is at most ``amp``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
assert np.all(np.less_equal(I, amp))
def test_exponential_offset():
"""Test that the output of ``exponential`` is at lest ``offset``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
offset = 15
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst, offset=offset)
assert np.all(np.greater_equal(I, offset))
def test_biexponential_tzero_limits():
"""Test that the output of ``biexponential`` has the correct time-zero"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
# Check that all values before time-zero are the amplitude
assert np.all(np.equal(I[t < tzero], amp1 + amp2))
assert np.all(np.less(I[t > tzero], amp1 + amp2))
def test_biexponential_positivity():
"""Test that the output of ``biexponential`` is always positive."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
assert np.all(I > 0)
def test_biexponential_amplitude():
"""Test that the output of ``biexponential`` is at most ``amp1 + amp2``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
assert np.all(np.less_equal(I, amp1 + amp2))
def test_biexponential_offset():
"""Test that the output of ``biexponential`` is at least ``offset``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
offset = 15
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
offset=offset,
)
assert np.all(np.greater_equal(I, offset))
def test_biexponential_against_exponential():
"""Test that ``biexponential`` reduces to ``exponential`` for appropriate parameters"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
offset = 2
exp = exponential(t, tzero, amp1, tconst1, offset=offset)
biexp = biexponential(t, tzero, amp1, 0, tconst1, 1, offset=offset)
assert np.allclose(exp, biexp)
def test_with_irf_trivial_constant_spacing():
"""Test with_irf with a trivial IRF, with constant spacing"""
params = (0, 1, 3)
times = np.linspace(-5, 15, 256)
data = exponential(times, *params)
@with_irf(0.00001) # vanishingly small irf
def exponential_with_irf(time, *args, **kwargs):
return exponential(time, *args, **kwargs)
conv = exponential_with_irf(times, *params)
assert np.allclose(data, conv)
def test_with_irf_trivial_nonconstant_spacing():
"""Test with_irf with a trivial IRF, with non-constant spacing"""
# Note that the spacing of the steps is important for this test
# If the array `times` is of even length, then the convolution will result
# in one time-step shift
params = (0, 1, 3)
times = np.concatenate(
(
np.arange(-10, -2, step=1),
np.arange(-2, 2, step=0.04),
np.arange(2, 10, step=1),
)
)
data = exponential(times, *params)
@with_irf(0.00001) # vanishingly small irf
def exponential_with_irf(time, *args, **kwargs):
return exponential(time, *args, **kwargs)
conv = exponential_with_irf(times, *params)
assert np.allclose(data, conv)
| LaurentRDC/scikit-ued | skued/time_series/tests/test_fitting.py | Python | gpl-3.0 | 6,251 |
#!/usr/bin/env python
# - * - mode: python; coding: utf-8 - * -
# Copyright (C) 2013 Andrey Degtyarev <[email protected]>
# This program is distributed licensed under the GNU General Public License v.3
# as published by the Free Software Foundation.
import manage_pressure.constants, manage_pressure.work_device, time
def control(motor_id, pressure_1_id, pressure_2_id):
devices = manage_pressure.work_device.WorkDevice(motor_id, pressure_1_id, pressure_2_id)
while 1:
devices.check()
devices.action()
time.sleep(manage_pressure.constants.TIME_REQUEST_DEVICE)
| ad-dycost/mindhouse | manage_pressure/watching.py | Python | gpl-3.0 | 576 |
# -*- coding: utf-8 -*-
from queue import Queue
from lxml import etree
import requests
import random
from settings import *
import time
import socket
from pybloom_live import BloomFilter
from settings import log
import os
from settings import REFRESH_BF
from settings import MIN_NUM
import redis
import threading
import traceback
bloom = BloomFilter(capacity=10000000, error_rate=0.001)
def get_pages(url):
try:
headers["User-Agent"] = random.choice(USER_AGENT_LIST)
r = requests.get(url,headers=headers)
if r.ok:
return r.content
else:
return None
except Exception as e:
log.error("PID:%d error:%s url:%s" % (os.getpid(),traceback.format_exc(),url))
return None
def parse_page(url, page, pattern):
page = etree.HTML(page.lower())
#page = etree.HTML(page.lower().decode('utf-8'))
ips = page.xpath(pattern["ip"])
ports = page.xpath(pattern["port"])
ty = page.xpath(pattern["type"])
if ips == None or ports == None or ty == None:
raise ValueError("current page "+str(ips)+str(ports)+str(ty))
for i in range(len(ips)):
ret = {}
str = "%s:%s"
ret["ip_port"] = str%(ips[i].text,ports[i].text)
#print(url, ret["ip_port"], ty[i].text)
if ty[i].text.find("http") >= 0:
ret["type"] = 0
elif ty[i].text.find("https") >= 0:
ret["type"] = 1
else:
log.error("PID:%d page:%s can not get proxy type" % (os.getpid(), url))
yield ret
def get_and_check(url,pattern,q):
try:
page = get_pages(url)
if page == None:
return
lists = parse_page(url, page, pattern)
for ele in lists:
is_existed = ele["ip_port"] in bloom
#log.debug("PID:%d proxy worker ip %s is_existed %d" % (os.getpid(),ele["ip_port"],is_existed))
if is_existed == False:
try:
bloom.add(ele["ip_port"])
except Exception as e:
log.error("PID:%d bloom filter error:%s ip:%s" % (os.getpid(),e,ele["ip_port"]))
#url, ip, is_http, store_cookies, use_default_cookies, check_anonymity,
ele["name"] = "global"
ele["db"] = 0
ele["url"] = TEST_URL
ele["store_cookies"] = STORE_COOKIE
ele["use_default_cookies"] = USE_DEFAULT_COOKIE
ele["check_anonymity"] = True
q.put(ele)
except Exception as e:
log.error("PID:%d parse page error %s " % (os.getpid(), traceback.format_exc()))
def worker(pattern,q):
try:
num = pattern["page_range"]
for i in range(len(pattern["url"])):
index = pattern["url"][i].find("%d")
log.debug("PID:%d url:%s" % (os.getpid(), str(pattern)))
if index == -1:
get_and_check(pattern["url"][i],pattern,q)
time.sleep(10)
continue
for j in range(1,num+1):
url = pattern["url"][i] % j
get_and_check(url,pattern,q)
time.sleep(10)
except Exception as e:
log.error("PID:%d proxy url error:%s %s " % (os.getpid(),traceback.format_exc(), str(pattern)))
def db_zcount():
r = redis.StrictRedis(REDIS_SERVER,REDIS_PORT,DB_FOR_IP, decode_responses=True)
return r.zcard("proxy:counts")
def get_proxy(q):
#bloom.clear_all()
times = 0
while True:
try:
num = db_zcount()
log.debug("PID:%d db current ips %d------" % (os.getpid(),num))
while num > MIN_NUM:
time.sleep(REFRESH_WEB_SITE_TIMEER)
times += 1
if times == REFRESH_BF:
#bloom.clear()
bloom = BloomFilter(capacity=100000, error_rate=0.001)
times = 0
log.debug("PID:%d refresh bloom filter" % os.getpid())
t1 = time.time()
threads = []
for key,value in list(URL_PATTERN.items()):
thread = threading.Thread(target=worker,args=(value,q))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
t2 = time.time()
t = REFRESH_WEB_SITE_TIMEER - (t2 - t1)
times += 1
if t > 0:
time.sleep(t)
log.debug("PID:%d proxy sleep end------" % os.getpid())
if times == REFRESH_BF:
#bloom.clear()
bloom = BloomFilter(capacity=100000, error_rate=0.001)
times = 0
log.debug("PID:%d refresh bloom filter" % os.getpid())
except Exception as e:
log.error("PID:%d proxy error:%s" % (os.getpid(), traceback.format_exc()))
if __name__ == "__main__":
q = Queue()
get_proxy(q)
#worker(URL_PATTERN[URL_LIST[0]],q)
| yutian2011/IPProxy | proxy.py | Python | gpl-3.0 | 5,041 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Performing mathematical calculations """
WEEKS = (((19 % 10) + 100) + (2 ** 8)) / 7
| nparrilla/is210-week-03-warmup | task_02.py | Python | mpl-2.0 | 135 |
__all__ = ["sqlite_dump", "sqlite_merge"]
from random import Random
import math
def random_expectations(depth=0, breadth=3, low=1, high=10, random=Random()):
"""
Generate depth x breadth array of random numbers where each row sums to
high, with a minimum of low.
"""
result = []
if depth == 0:
initial = high + 1
for i in range(breadth - 1):
n = random.randint(low, initial - (low * (breadth - i)))
initial -= n
result.append(n)
result.append(initial - low)
random.shuffle(result)
else:
result = [random_expectations(depth - 1, breadth, low, high, random) for x in range(breadth)]
return result
def rescale(new_low, new_high, low, diff, x):
scaled = (new_high-new_low)*(x - low)
scaled /= diff
return scaled + new_low
def weighted_random_choice(choices, weights, random=Random()):
population = [val for val, cnt in zip(choices, weights) for i in range(int(cnt))]
return random.choice(population)
def multinomial(probabilities, draws=1, random=Random()):
"""
Draw from a multinomial distribution
"""
def pick():
draw = random.random()
bracket = 0.
for i in range(len(probabilities)):
bracket += probabilities[i]
if draw < bracket:
return i
return i
result = [0] * len(probabilities)
for i in range(draws):
result[pick()] += 1
return result
def logistic_random(loc, scale, random=Random()):
"""
Return a random number from a specified logistic distribution.
"""
x = random.random()
return loc + scale * math.log(x / (1 - x))
def shuffled(target, random=Random()):
"""
Return a shuffled version of the argument
"""
a = target[:]
random.shuffle(a)
return a
def make_pbs_script(kwargs, hours=60, mins=0, ppn=16, script_name=None):
"""
Generate a PBS run script to be submitted.
"""
from disclosuregame.Util.sqlite_merge import list_matching
from os.path import split
args_dir, name = split(kwargs.kwargs[0])
kwargs_files = list_matching(args_dir, name)
count = len(kwargs_files)
import sys
args = sys.argv[1:]
args = " ".join(args)
args = args.replace("*", "${PBS_ARRAYID}")
args = args.replace(" %s " % kwargs.file_name, " ${PBS_ARRAYID}_%s " % kwargs.file_name)
if kwargs.file_name == "":
args += " -f ${PBS_ARRAYID}"
interpreter = sys.executable
run_script = ["#!/bin/bash -vx", "#PBS -l walltime=%d:%d:00" % (hours, mins), "#PBS -l nodes=1:ppn=%d" % ppn,
"module load python"]
# Doesn't work on multiple nodes, sadly
# Set up the call
run_call = "%s -m disclosuregame.run %s" % (interpreter, args)
run_script.append(run_call)
# Cleanup after all jobs have run
if script_name is not None:
run_script.append("if [$PBS_ARRAYID -eq %d]" % count)
run_script.append("then")
run_script.append("\trm %s" % script_name)
run_script.append("fi")
return '\n'.join(run_script), count
# ${python} Run.py -R 100 -s ${sig} -r ${resp} --pickled-arguments ../experiment_args/sensitivity_${PBS_ARRAYID}.args -f ${PBS_ARRAYID}_sensitivity -i 1000 -d ${dir} -g ${game}
| greenape/risky-aging-model | disclosuregame/Util/__init__.py | Python | mpl-2.0 | 3,317 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import NotOp as NotOp_
from jx_elasticsearch.es52.painless._utils import Painless
from jx_elasticsearch.es52.painless.es_script import EsScript
from jx_elasticsearch.es52.painless.false_op import false_script
from jx_elasticsearch.es52.painless.null_op import null_script
from jx_elasticsearch.es52.painless.true_op import true_script
from mo_json import BOOLEAN
class NotOp(NotOp_):
def to_es_script(self, schema, not_null=False, boolean=False, many=True):
value = self.term.partial_eval(Painless).to_es_script(schema)
if value is false_script:
return true_script
elif value is true_script:
return false_script
elif value is null_script:
return null_script
return EsScript(
type=BOOLEAN, expr="!(" + value.expr + ")", frum=self, schema=schema,
)
| klahnakoski/ActiveData | vendor/jx_elasticsearch/es52/painless/not_op.py | Python | mpl-2.0 | 1,218 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 21:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(verbose_name='Mensagem')),
('correct', models.BooleanField(default=False, verbose_name='Correto?')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to=settings.AUTH_USER_MODEL, verbose_name='Autor')),
],
options={
'verbose_name': 'Resposta',
'verbose_name_plural': 'Respostas',
'ordering': ['-correct', 'created_at'],
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Título')),
('body', models.TextField(verbose_name='Mensagem')),
('views', models.IntegerField(blank=True, default=0, verbose_name='Visualizações')),
('answers', models.IntegerField(blank=True, default=0, verbose_name='Respostas')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to=settings.AUTH_USER_MODEL, verbose_name='Autor')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'verbose_name': 'Tópico',
'verbose_name_plural': 'Tópicos',
'ordering': ['-updated_at'],
},
),
]
| VWApplications/VWCourses | forum/migrations/0001_initial.py | Python | mpl-2.0 | 2,693 |
"""
sandman_pasta reimplements the behaviour of decaf-masta, but instead evaluates all calls to deployable heat templates
"""
import json
from decaf_storage.json_base import StorageJSONEncoder
from decaf_storage import Endpoint
from decaf_utils_components.base_daemon import daemonize
import yaml
import time
import urllib
from decaf_utils_components import BasePlugin, In, Out
import base64
import sys
import math
import traceback
__author__ = "Banana PG-SANDMAN"
__date__ = "$01-jun-2016$"
TMPDIR = "/tmp/decaf/"
class Pasta(BasePlugin):
__version__ = "0.1-dev01"
datacenters = dict()
config = None
logger = None
def __init__(self, logger=None, config=None):
super(Pasta, self).__init__(logger=logger, config=config)
with open('/etc/decaf/pastad.cfg') as file:
self.config = yaml.safe_load(file)
if self.config is None:
self.logger.error("No configuration file found or not in yaml format.")
sys.exit(1)
try:
self.datacenters = self.config["datacenters"]
except KeyError as e:
self.logger.error("Please check the configuration. There is no datacenter defined.")
sys.exit(1)
self.logger.debug('Configuration seems sane.')
def _before_connect(self, url=None, rpc=None, routing_key=None):
pass
# same behaviour as masta
def _after_connect(self):
self.rpc.set_json_encoder(StorageJSONEncoder)
self.storage = Endpoint(self.rpc, self.logger)
# Check if all the datacenters are also registered in Storage, if not, register them
storage_datacenters = self.storage.get('datacenter', options=[], filters={})
def connect(self, url=None, rpc=None, routing_key=None):
# fake being masta, so we don't have to change other code
super(Pasta, self).connect(self.config["rpc"]["url"], None, "decaf_masta")
@In("datacenter_id", int)
@Out("success_code", int)
def initialize_datacenter(self, datacenter_config):
"""
Reimplemented method of decaf_masta
:param datacenter_config: A DatacenterConfig object describing the datacenter to be added.
:return: The id of the new entry.
"""
self.logger.info("Call to initialize_datacenter")
return 0
@In("keystone_credentials", dict)
@Out("keystone_id", int)
def create_keystone_credentials(self, keystone_credentials):
self.logger.info("Call to create_keystone_credentials")
return 0
@In("keystone_id", int)
@Out("keystone_credentials", dict)
def get_keystone_credentials(self, keystone_id):
"""
Gets a keystone entry from the database.
:param keystone_id: The id of the database entry.
:return: The data of the keystone entry with the given id, or an error code if not found.
"""
return 400
@Out("keystone_list", list)
def get_keystones(self):
"""
Get keystone entries contained in the database.
:return: A list of keystone entries currently existing in the Masta database.
"""
return None
# ----------------------------------------------------------
# DATACENTERS
# Every datacenter has a respective set of keystone credentials and a region.
# Keystone does not have to be installed on the actual datacenter, but could.
# ----------------------------------------------------------
@In("datacenter", dict)
@Out("datacenter_id", int)
def create_datacenter(self, datacenter):
"""
Adds a datacenter entry to the database.
:param datacenter: A Datacenter dictionary containing information of the datacenter.
:return: The id of the new entry in the database.
"""
return int(datacenter.datacenter_id)
@Out("datacenter_list", list)
def get_datacenters(self):
"""
Get datacenter entries contained in the database.
:return: A list of datacenter entries currently existing in the Masta database.
"""
return [datacenter.to_dict() for datacenter in self.datacenters]
@In("datacenter_id", int)
@Out("datacenter_stats", dict)
def get_datacenter_stats(self, datacenter_id):
"""
Returns information about the datacenter.
:param datacenter_id: The id of the datacenter.
:return: A list of datacenter entries currently existing in the Masta database
"""
return datacenter_stats
@In("datacenter_id", int)
@Out("ip_namespace", str)
def get_datacenter_ip_namespace(self, datacenter_id):
"""
Returns the name of the IP namespace of the router on the given datacenter.
:param datacenter_id: The masta id of the datacenter.
:return: IP namespace name.
"""
ip_namespace = "qrouter-1"
return ip_namespace
# ----------------------------------------------------------
# DEPLOY SCENARIO
# A scenario is deployed in two steps: First, the edges are created.
# Secondly, the nodes are created.
# If the process fails at one step, MaSta will rollback the deployment.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def deploy_scenario(self, instance_graph):
"""
Deploy scenario on the infrastructure.
:param instance_graph: An object of type InstanceGraph to be deployed.
:return: The modified instance graph with ips and keynames, if successful.
"""
return instance_graph
# ----------------------------------------------------------
# DESTROY SCENARIO
# Deletes all the nodes and edges and removes
# the scenario from the database.
# ----------------------------------------------------------
@In("scenario_instance_id", str)
@Out("success_code", int)
def destroy_scenario(self, scenario_instance_id):
"""
Destroy scenario by deleting all its nodes and removing from database.
:param scenario_instance_id: The id of the scenario instance.
:return: 200, if successful. 404, if not found.
"""
return 200
@Out("success_code", int)
def destroy_all_scenarios(self):
"""
Destroys all scenarios in the MaSta database.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# ALTER SCENARIO
# Methods to change a running scenario.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def extend_scenario(self, instance_graph):
"""
Method to extend an existing scenario.
:param instance_graph: An InstanceGraph with all the nodes and edges to add.
:return: 200, if successful.
"""
return 200
@In("shrink_graph", dict)
@Out("success_code", int)
def shrink_scenario(self, shrink_graph):
"""
Method to shrink an existing scenario.
:param shrink_graph: An object of type InstanceGraph that lists all the nodes and edges to delete.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# INTERNAL SCENARIO METHODS
# Internal methods for creation and deletion
# of nodes and edges.
# ----------------------------------------------------------
def create_nodes(self, instance_graph, session):
"""
Internal method to create nodes in database and deploy the nodes on the infrastructure.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def create_edges(self, instance_graph, session):
"""
Internal method to create edges in the database and set up the networks in OpenStack.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def rollback(self, instance_graph, session, del_scenario=False):
"""
Internal method to rollback the creation or altering of a scenario.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def delete_nodes(self, vm_instance_id_list, session):
"""
Internal method to delete nodes from a scenario.
:param scenario_instance_id: The id of the scenario.
:param session: The session object.
:return: 200, if successful.
"""
return 200
def delete_edges(self, edge_list, session):
"""
Internal method to delete edges from a scenario.
:param edge_list: A list containing objects of internal edges, management ports and public ports from the db.
:param session: The session object.
:return:
"""
pass
# ----------------------------------------------------------
# ACTIONS
# Perform actions on the VMS.
# ----------------------------------------------------------
@In("vm_action", dict)
@Out("success_code", int)
def action_vm_instance(self, vm_action):
"""
Perform an action on a single vm instance.
:param vm_action: A dictionary of type VMAction containing the vm instance id and the action to perform.
:return: 200, if successful.
"""
return 200
@In("scenario_action", dict)
@Out("success_code", int)
def action_scenario(self, scenario_action):
"""
Perform an action on a scenario.
:param scenario_action: A dictionary of type ScenarioAction containing the scenario instance id and the action to perform.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# FLAVORS
# ----------------------------------------------------------
@In("flavor_data", dict)
@Out("success_code", int)
def create_flavor(self, flavor_data):
"""
Adds a flavor entry to the database and uploads the flavor to OpenStack.
:param flavor_data: A FlavorData object containing data about the flavor.
:return: 201: flavor created. 200: flavor already exists, not created
"""
return 201
@In("flavor_id", str)
@Out("success_code", int)
def delete_flavor(self, flavor_id):
"""
Deletes a flavor from the database and OpenStack.
:param flavor_id: The id of the flavor.
:return: 200, if successful. 404, if not found.
"""
return 200
# ----------------------------------------------------------
# IMAGES
# ----------------------------------------------------------
@In("image_data", dict)
@Out("success_code", int)
def create_image(self, image_data):
"""
Stores an image in OpenStack.
:param image_data: A ImageData object containing data about the image.
:return: 201: image created. 200: image already exists, not created
"""
return 201
@In("image_id", str)
@Out("success_code", int)
def delete_image(self, image_id):
"""
Deletes an image from the database and OpenStack.
:param image_id: The id of the image.
:return: 200, if successful. 404, if not found.
"""
return 200
# ----------------------------------------------------------
# NETWORKS
# ----------------------------------------------------------
@In("vm_instance_id", str)
@Out("instance_ip", str)
def get_vm_mgmt_ip(self, vm_instance_id, session=None):
"""
Retrieves the management IP address of an instance.
:param vm_instance_id: The id of the VM instance.
:return: The ip of the instance.
"""
return "10.0.0.1"
# ----------------------------------------------------------
# MONITORING DATA
# ----------------------------------------------------------
@In("monitoring_request", dict)
@Out("monitoring_response", dict)
def get_monitoring_data(self, monitoring_request):
"""
Retrieves monitoring data for a specific VM.
:param monitoring_request: A MonitoringRequest object.
:return: A MonitoringResponse object.
"""
monitoring_request = monitoring_request["monitoring_request"]
monitoring_response = {
"monitoring_response": {
"type": monitoring_request["type"],
"vm_instance_id": monitoring_request["vm_instance_id"],
"value": {
"current": 10,
"total": 100
}
}
}
return monitoring_response
@In("monitoring_alarm_request", dict)
@Out("subscription_name", str)
def create_monitoring_alarm(self, monitoring_alarm_request):
"""
Sets up an alarm and returns a subscription id to subscribe to the message broker.
:param monitoring_alarm_request: A MonitoringAlarmRequest object containing data about the alarm to be set up.
:return: The name of the subscription
"""
return "test"
@In("subscription_name", str)
@Out("success_code", int)
def delete_monitoring_alarm(self, subscription_name):
"""
Delete monitoring alarm by subscription_name.
:param subscription_name: The name of the Subscription.
:return: 200, if successful. 404, if not found.
"""
return 200
@In("monitoring_alarm_id", int)
@Out("success_code", int)
def delete_monitoring_alarm_by_id(self, monitoring_alarm_id):
"""
Delete monitoring alarm by alarm id.
:param monitoring_alarm_id: The id of the alarm, under which it is registered in the MaSta database.
:return: 200, if successful. 404, if not found.
"""
return 200
@Out("success_code", int)
def delete_all_monitoring_alarms(self):
"""
Deletes all monitoring alarms in the DB.
:return: 200, if successful.
"""
return 200
def invoke_monitoring_alarm(self, data):
"""
Internal method. Called by the MaSta-Server when an alarm message arrives.
:param data: data
:return:
"""
pass
def daemon():
daemonize(Pasta)
if __name__ == '__main__':
daemon()
| CN-UPB/OpenBarista | components/sandman-pasta/sandman_pasta/sandman_pasta.py | Python | mpl-2.0 | 14,698 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from datetime import datetime, timedelta
from flask import current_app
from typing import Iterator, List, Tuple
from werkzeug.exceptions import Conflict, NotFound
from .models import Message, Policy
from .channels import send_notifications
from requests import get
from json import JSONDecodeError
from cli_common import log
from backend_common.auth import auth
import os
import mohawk
logger = log.get_logger(__name__)
AUTHENTICATION_SCOPE_PREFIX = 'project:releng:services/releng_notification_policy/permission/'
def get_policies_in_json_serializable_form(notification_policies: List[Policy]) -> List[dict]:
return [
policy.to_dict()
for policy in notification_policies
]
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'get_message'])
def get_message_by_uid(uid: str) -> dict:
session = current_app.db.session
message = session.query(Message).filter(Message.uid == uid).first()
if message:
notification_policies = session.query(Policy).filter(Policy.uid == message.uid).all()
policies_dicts = get_policies_in_json_serializable_form(notification_policies)
logger.info('Serving {message}'.format(message=message))
return {
'shortMessage': message.shortMessage,
'message': message.message,
'deadline': message.deadline,
'policies': policies_dicts,
}
else:
err_str = 'Message with uid {} not found.'.format(uid)
logger.info(err_str)
raise NotFound(err_str)
def get_policies_as_dict_for_message(message: Message) -> dict:
session = current_app.db.session
policies = session.query(Policy).filter(Policy.uid == message.uid).all()
serialized_policies = get_policies_in_json_serializable_form(policies)
return {
'policies': serialized_policies,
}
def get_active_policies_for_identity(identity_name: str) -> dict:
session = current_app.db.session
now = datetime.now()
active_policies = session.query(Policy).filter(Policy.identity == identity_name)\
.filter(Policy.start_timestamp < now)\
.filter(Policy.stop_timestamp > now)\
.all()
if active_policies:
return {
'policies': get_policies_in_json_serializable_form(active_policies),
}
else:
raise NotFound('No active policies found for {}.'.format(identity_name))
def get_pending_messages() -> dict:
session = current_app.db.session
current_time = datetime.now()
messages = session.query(Message).filter(Message.deadline > current_time).all()
if messages:
return {
'messages': [
{**message.to_dict(), **get_policies_as_dict_for_message(message)}
for message in messages
],
}
else:
raise NotFound('No pending messages found.')
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'put_message'])
def put_message(uid: str, body: dict) -> None:
'''
Add a new message to be delivered into the service.
:param uid: UID of message to track
:param body: Description of message
:return: No content, status code
'''
session = current_app.db.session
# Make sure the message UID doesn't already exist in the DB
existing_message = session.query(Message).filter(Message.uid == uid).first()
if existing_message:
err_str = '{message} already exists'.format(message=existing_message)
logger.info(err_str)
raise Conflict(err_str)
new_message = Message(uid=uid, shortMessage=body['shortMessage'],
message=body['message'], deadline=body['deadline'])
session.add(new_message)
session.flush()
policies = [
# Overwrite the frequency object input from the API with a db compatible timedelta object
Policy(**{**p, 'frequency': timedelta(**p['frequency']), 'uid': new_message.uid})
for p in body['policies']
]
session.add_all(policies)
session.commit()
logger.info('{} created.'.format(new_message))
for new_policy in policies:
logger.info('{} created.'.format(new_policy))
return None
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'delete_message'])
def delete_message(uid: str) -> None:
'''
Delete the message with the specified UID
:param uid: UID of the message to delete.
:return: No content, status code
'''
session = current_app.db.session
message = session.query(Message).filter(Message.uid == uid).first()
if message:
session.delete(message)
session.commit()
logger.info('{} deleted.'.format(message))
return None
else:
err_str = 'Message with uid "{}" not found'.format(uid)
logger.warning(err_str)
raise NotFound(err_str)
def determine_message_action(messages: List[Message]) -> Iterator[Tuple[Message, bool]]:
current_time = datetime.now()
for message in messages:
if current_time > message.deadline:
yield message, True
else:
yield message, False
def create_identity_preference_url(policy: Policy) -> str:
return '{endpoint}/identity/{identity_name}/{urgency}'\
.format(endpoint=current_app.config.get('RELENG_NOTIFICATION_IDENTITY_ENDPOINT'),
identity_name=policy.identity,
urgency=policy.urgency)
def get_identity_url_for_actionable_policies(policies: List[Policy]) -> Iterator[Tuple[Policy, str]]:
current_time = datetime.now()
for policy in policies:
# Check our policy time frame is in effect
if policy.stop_timestamp < current_time or current_time < policy.start_timestamp:
continue
# If we have notified already, only notify according to the frequency
if policy.last_notified and current_time - policy.last_notified < policy.frequency:
continue
identity_preference_url = create_identity_preference_url(policy)
yield policy, identity_preference_url
@auth.require_scopes([AUTHENTICATION_SCOPE_PREFIX + 'ticktock'])
def post_tick_tock() -> dict:
'''
Trigger pending notifications according to their notification policies
:return: Information about notification triggered by this call in JSON format.
'''
session = current_app.db.session
current_time = datetime.now()
pending_messages = session.query(Message).all()
if not pending_messages:
raise NotFound('No pending policies to trigger.')
notifications = []
for message, is_past_deadline in determine_message_action(pending_messages):
if is_past_deadline:
session.delete(message)
continue
policies = session.query(Policy).filter(Policy.uid == message.uid).all()
for policy, identity_preference_url in get_identity_url_for_actionable_policies(policies):
try:
service_credentials = {
'id': current_app.config['TASKCLUSTER_CLIENT_ID'],
'key': current_app.config['TASKCLUSTER_ACCESS_TOKEN'],
'algorithm': 'sha256',
}
hawk = mohawk.Sender(service_credentials, identity_preference_url, 'get',
content='',
content_type='application/json')
# Support dev ssl ca cert
ssl_dev_ca = current_app.config.get('SSL_DEV_CA')
if ssl_dev_ca is not None:
assert os.path.isdir(ssl_dev_ca), \
'SSL_DEV_CA must be a dir with hashed dev ca certs'
headers = {
'Authorization': hawk.request_header,
'Content-Type': 'application/json',
}
identity_preference = get(identity_preference_url, headers=headers, verify=ssl_dev_ca).json()['preferences'].pop()
notification_info = send_notifications(message, identity_preference)
notifications.append(notification_info)
policy.last_notified = current_time
except JSONDecodeError:
logger.warn('')
session.add_all(policies)
session.commit()
return {
'notifications': notifications,
}
| srfraser/services | src/releng_notification_policy/releng_notification_policy/api.py | Python | mpl-2.0 | 8,694 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <[email protected]>
#
from django.forms import ModelForm
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.forms import UserCreationForm, AdminPasswordChangeForm
from agent.models import AgentProfile, Agent
from agent.function_def import manager_list
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Div
class AgentPasswordChangeForm(AdminPasswordChangeForm):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.form_class = 'well'
self.helper.layout = Layout(
Fieldset('', 'password1', 'password2', css_class='col-md-4')
)
super(AgentPasswordChangeForm, self).__init__(*args, **kwargs)
class AgentCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(AgentCreationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.disable_csrf = False
self.helper.form_class = 'well'
self.helper.layout = Layout(
Fieldset('', 'username', 'password1', 'password2', css_class='col-md-6 col-xs-8')
)
class AgentNameChangeForm(UserChangeForm):
"""AgentNameChangeForm is used to change agent username"""
class Meta:
model = Agent
fields = ["username"]
def __init__(self, *args, **kwargs):
super(AgentNameChangeForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = "form-control"
class AgentProfileForm(ModelForm):
"""AgentProfileForm is used to change agent profile"""
class Meta:
model = AgentProfile
exclude = ('is_agent', )
def __init__(self, *args, **kwargs):
super(AgentProfileForm, self).__init__(*args, **kwargs)
self.fields['manager'].choices = manager_list()
for i in self.fields.keyOrder:
self.fields[i].widget.attrs['class'] = "form-control"
class AgentChangeDetailExtendForm(ModelForm):
"""A form used to change the detail of a agent in the manager UI."""
class Meta:
model = AgentProfile
fields = ["type", "call_timeout", "contact", "status",
"no_answer_delay_time", "max_no_answer", "wrap_up_time",
"reject_delay_time", "busy_delay_time"]
def __init__(self, user, *args, **kwargs):
self.user = user
super(AgentChangeDetailExtendForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.disable_csrf = False
css_class = 'col-md-6'
self.helper.layout = Layout(
Div(
Div('type', css_class=css_class),
Div('call_timeout', css_class=css_class),
Div('contact', css_class=css_class),
Div('status', css_class=css_class),
Div('no_answer_delay_time', css_class=css_class),
Div('max_no_answer', css_class=css_class),
Div('wrap_up_time', css_class=css_class),
Div('reject_delay_time', css_class=css_class),
Div('busy_delay_time', css_class=css_class),
css_class='row'
),
)
class AgentDetailExtendForm(ModelForm):
"""A form used to change the detail of a agent in the Agent UI."""
class Meta:
model = AgentProfile
# fields = ["address", "city", "state", "country", "zip_code",
# "phone_no", "fax", "company_name", "company_website",
# "language", "note"]
fields = ["address"]
def __init__(self, user, *args, **kwargs):
self.user = user
super(AgentDetailExtendForm, self).__init__(*args, **kwargs)
self.fields['address'].widget.attrs['ng-model'] = "user.address"
"""
self.fields['city'].widget.attrs['ng-model'] = "user.city"
self.fields['state'].widget.attrs['ng-model'] = "user.state"
self.fields['country'].widget.attrs['ng-model'] = "user.country"
self.fields['zip_code'].widget.attrs['ng-model'] = "user.zip_code"
self.fields['phone_no'].widget.attrs['ng-model'] = "user.phone_no"
self.fields['fax'].widget.attrs['ng-model'] = "user.fax"
self.fields['company_name'].widget.attrs['ng-model'] = "user.company_name"
self.fields['company_website'].widget.attrs['ng-model'] = "user.company_website"
self.fields['language'].widget.attrs['ng-model'] = "user.language"
self.fields['note'].widget.attrs['ng-model'] = "user.note"
"""
| tarikgwa/nfd | newfies/agent/forms.py | Python | mpl-2.0 | 5,036 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Audit groups and removes inactive users.
"""
import datetime
from django.contrib.auth.models import Group, User
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from django.utils import timezone
from crashstats.authentication.models import PolicyException
VALID_EMAIL_DOMAINS = ("mozilla.com", "mozilla.org")
def get_or_create_auditgroups_user():
try:
return User.objects.get(username="auditgroups")
except User.DoesNotExist:
return User.objects.create_user(
username="auditgroups",
email="[email protected]",
first_name="SYSTEMUSER",
last_name="DONOTDELETE",
is_active=False,
)
def delta_days(since_datetime):
"""Return the delta in days between now and since_datetime"""
return (timezone.now() - since_datetime).days
class Command(BaseCommand):
help = "Audits Django groups and removes inactive users."
def add_arguments(self, parser):
parser.add_argument(
"--dry-run", action="store_true", help="Whether or not to do a dry run."
)
def is_employee_or_exception(self, user):
# If this user has a policy exception, then they're allowed
if PolicyException.objects.filter(user=user).exists():
return True
if user.email.endswith(VALID_EMAIL_DOMAINS):
return True
return False
def audit_hackers_group(self, dryrun=True):
# Figure out the cutoff date for inactivity
cutoff = timezone.now() - datetime.timedelta(days=365)
self.stdout.write("Using cutoff: %s" % cutoff)
# Get all users in the "Hackers" group
try:
hackers_group = Group.objects.get(name="Hackers")
except Group.DoesNotExist:
self.stdout.write('"Hackers" group does not exist.')
return
# Go through the users and mark the ones for removal
users_to_remove = []
for user in hackers_group.user_set.all():
if not user.is_active:
users_to_remove.append((user, "!is_active"))
elif not self.is_employee_or_exception(user):
users_to_remove.append((user, "not employee or exception"))
elif user.last_login and user.last_login < cutoff:
days = delta_days(user.last_login)
# This user is inactive. Check for active API tokens.
active_tokens = [
token for token in user.token_set.all() if not token.is_expired
]
if not active_tokens:
users_to_remove.append((user, "inactive %sd, no tokens" % days))
else:
self.stdout.write(
"SKIP: %s (inactive %sd, but has active tokens: %s)"
% (user.email, days, len(active_tokens))
)
auditgroups_user = get_or_create_auditgroups_user()
# Log or remove the users that have been marked
for user, reason in users_to_remove:
self.stdout.write("Removing: %s (%s)" % (user.email, reason))
if dryrun is False:
hackers_group.user_set.remove(user)
# Toss a LogEntry in so we can keep track of when people get
# de-granted and what did it
LogEntry.objects.log_action(
user_id=auditgroups_user.id,
content_type_id=ContentType.objects.get_for_model(User).pk,
object_id=user.pk,
object_repr=user.email,
action_flag=CHANGE,
change_message="Removed %s from hackers--%s."
% (user.email, reason),
)
self.stdout.write("Total removed: %s" % len(users_to_remove))
def handle(self, **options):
dryrun = options["dry_run"]
if dryrun:
self.stdout.write("Dry run--this is what we think should happen.")
self.audit_hackers_group(dryrun=dryrun)
| lonnen/socorro | webapp-django/crashstats/authentication/management/commands/auditgroups.py | Python | mpl-2.0 | 4,373 |
# Copyright (c) 2015-2016 Contributors as noted in the AUTHORS file
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import signal
import sys
from functools import partial
from isac import IsacNode, IsacValue
from isac.tools import green
class DemoNode(object):
def __init__(self):
self.node = IsacNode('demo-zwave-react')
green.signal(signal.SIGTERM, partial(self.sigterm_handler))
self.action_value = IsacValue(self.node, 'zwave://0xdefbc93b.power_strip001/switch_binary/1/switch', survey_last_value=False, survey_static_tags=False)
self.sensor_value = IsacValue(self.node, 'zwave://0xdefbc93b.13/alarm/access_control', survey_last_value=False, survey_static_tags=False)
self.sensor_value.observers += self.value_update
def value_update(self, iv, value, timestamp, tags):
print 'Received update: ', value
if value == 0x16: # Open
self.action_value.value = True
elif value == 0x17: # Close
self.action_value.value = False
def sigterm_handler(self):
self.node.shutdown()
sys.exit(0)
if __name__ == '__main__':
demo = DemoNode()
try:
print 'serving'
demo.node.serve_forever()
except KeyboardInterrupt:
demo.node.shutdown()
| Alidron/demo-zwave-react | demo-zwave-react.py | Python | mpl-2.0 | 1,436 |
try:
import unittest.mock as mock
except ImportError:
import mock
import subprocess
import dags.utils.helpers as helpers
from dags.operators.postgres_to_s3_transfer import PostgresToS3Transfer
class TestPostgresToS3Transfer(object):
def test_its_created_successfully(self):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
assert operator
assert operator.task_id == 'task_id'
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_streams_url_data_to_s3(self, get_connection_mock, boto3_mock, popen_mock):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
boto3_mock().Bucket.assert_called_with('bucket')
boto3_mock().Bucket().upload_fileobj.assert_called_with(
popen_mock().stdout.__enter__(), # Needs __enter__() because it's called in a context manager
'key'
)
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_calls_pg_dump_correctly(self, get_connection_mock, boto3_mock, popen_mock):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
expected_command = [
'pg_dump',
'-Fc',
helpers.get_postgres_uri(operator.postgres_conn_id),
]
popen_mock.assert_called_with(expected_command, stdout=subprocess.PIPE)
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_dumps_only_whitelisted_tables(self, get_connection_mock, boto3_mock, popen_mock):
tables = [
'users',
'log',
]
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
tables=tables,
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
popen_command = popen_mock.call_args[0][0]
# Ignore executable and the Postgres URI, as the params need to be
# between these two
pg_dump_params_without_uri = popen_command[1:-1]
for table in tables:
assert '--table={}'.format(table) in pg_dump_params_without_uri
| opentrials/opentrials-airflow | tests/dags/operators/test_postgres_to_s3_transfer.py | Python | mpl-2.0 | 2,927 |
# Django settings for tests2 project.
import django
import sys
sys.path.append("../..")
sys.path.append("../../../../..")
from siteconf import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB_W,
'USER': MYSQL_USER_W,
'PASSWORD': MYSQL_PASSWORD_W,
'HOST': MYSQL_HOST_W,
'PORT': MYSQL_PORT_W,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xvds$ppv5ha75qg1yx3aax7ugr_2*fmdrc(lrc%x7kdez-63xn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = ''
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tests2.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# django.contrib.admin is needed because we call django_logout()
# and it expect some templates to be registered
'django.contrib.admin',
'djangosaml2',
'testprofiles',
)
AUTH_PROFILE_MODULE = 'testprofiles.TestProfile'
if django.VERSION >= (1, 7):
AUTH_USER_MODEL = 'testprofiles.TestUser'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'djangosaml2': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
import django
if django.VERSION < (1, 4):
del LOGGING['filters']['require_debug_false']
del LOGGING['handlers']['mail_admins']['filters']
AUTHENTICATION_BACKENDS = (
'djangosaml2.backends.Saml2Backend',
)
| EduPepperPDTesting/pepper2013-testing | lms/djangoapps/djangosaml2/unit_tests/settings.py | Python | agpl-3.0 | 5,559 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
@api.model
def page_search_dependencies(self, page_id=False):
dep = super(Website, self).page_search_dependencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
path = page.url
dom = [
('content', 'ilike', path)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:
page_key = _('Blog Posts')
dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to have a link to this page !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
@api.model
def page_search_key_dependencies(self, page_id=False):
dep = super(Website, self).page_search_key_dependencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
key = page.key
dom = [
('content', 'ilike', key)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:
page_key = _('Blog Posts')
dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to be calling this file !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('Blog'), url_for('/blog'), 'website_blog'))
return suggested_controllers
| ygol/odoo | addons/website_blog/models/website.py | Python | agpl-3.0 | 2,036 |
# -*- coding: utf-8 -*-
# Copyright 2016 Eficent Business and IT Consulting Services S.L.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl-3.0).
from openerp.tests import common
from openerp.tools import SUPERUSER_ID
class TestPurchaseRequest(common.TransactionCase):
def setUp(self):
super(TestPurchaseRequest, self).setUp()
self.purchase_request = self.env['purchase.request']
self.purchase_request_line = self.env['purchase.request.line']
def test_purchase_request_status(self):
vals = {
'picking_type_id': self.env.ref('stock.picking_type_in').id,
'requested_by': SUPERUSER_ID,
}
purchase_request = self.purchase_request.create(vals)
vals = {
'request_id': purchase_request.id,
'product_id': self.env.ref('product.product_product_13').id,
'product_uom_id': self.env.ref('product.product_uom_unit').id,
'product_qty': 5.0,
}
self.purchase_request_line.create(vals)
self.assertEqual(
purchase_request.is_editable, True,
'Should be editable')
purchase_request.button_to_approve()
self.assertEqual(
purchase_request.state, 'to_approve',
'Should be in state to_approve')
self.assertEqual(
purchase_request.is_editable, False,
'Should not be editable')
purchase_request.button_draft()
self.assertEqual(
purchase_request.is_editable, True,
'Should be editable')
self.assertEqual(
purchase_request.state, 'draft',
'Should be in state draft')
self.purchase_request_line.unlink()
| SerpentCS/purchase-workflow | purchase_request/tests/test_purchase_request.py | Python | agpl-3.0 | 1,728 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Open Solutions Finland 2013.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Exporting and Intrastat",
"version" : "1.0",
"author" : "Open Solutions Finland",
"description" : """
OpenERP module for exporting goods. Intrastat additions to invoices. Adds country of origin and customs code fields to products.
""",
"website" : "www.opensolutions.fi",
"depends" : ["base","product","sale","stock"],
"category" : "Generic Modules",
"init_xml" : [],
"demo_xml" : [],
"data" : [
'product_extension_view.xml',
'invoice_extension_view.xml'
],
'test': [
],
'installable': True,
'active': False,
'certificate': '',
}
| OpenSolutionsFinland/exporting | __openerp__.py | Python | agpl-3.0 | 1,635 |
from datetime import datetime
from collections import defaultdict
DEFAULT_RELEASE = datetime(1970, 1, 1)
_SORT_KEY = lambda eps: eps[0].released or DEFAULT_RELEASE
class PodcastGrouper(object):
"""Groups episodes of two podcasts based on certain features
The results are sorted by release timestamp"""
DEFAULT_RELEASE = datetime(1970, 1, 1)
def __init__(self, podcasts):
if not podcasts or (None in podcasts):
raise ValueError("podcasts must not be None")
self.podcasts = podcasts
def __get_episodes(self):
episodes = {}
for podcast in self.podcasts:
episodes.update(dict((e.id, e.id) for e in podcast.episode_set.all()))
return episodes
def group(self, get_features):
episodes = self.__get_episodes()
episode_groups = defaultdict(list)
episode_features = map(get_features, episodes.items())
for features, episode_id in episode_features:
episode = episodes[episode_id]
episode_groups[features].append(episode)
# groups = sorted(episode_groups.values(), key=_SORT_KEY)
groups = episode_groups.values()
return enumerate(groups)
| gpodder/mygpo | mygpo/administration/group.py | Python | agpl-3.0 | 1,212 |
import where_query
| galtys/galtys-addons | account_move_line_where_query/__init__.py | Python | agpl-3.0 | 19 |
# This file is part of django-popularity.
#
# django-popularity: A generic view- and popularity tracking pluggable for Django.
# Copyright (C) 2008-2010 Mathijs de Bruin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from math import log
import django
from django.db import models, connection
from django.db.models import F, Max
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
# Settings for popularity:
# - POPULARITY_LISTSIZE; default size of the lists returned by get_most_popular etc.
# - POPULARITY_CHARAGE; characteristic age used for measuring the popularity
from django.conf import settings
POPULARITY_CHARAGE = float(getattr(settings, 'POPULARITY_CHARAGE', 3600))
POPULARITY_LISTSIZE = int(getattr(settings, 'POPULARITY_LISTSIZE', 10))
# Maybe they wrote their own mysql backend that *is* mysql?
if django.VERSION < (1, 2, 0):
COMPATIBLE_DATABASES = getattr(settings, 'POPULARITY_COMPATIBILITY_OVERRIDE', None) or ('mysql',)
else:
COMPATIBLE_DATABASES = getattr(settings, 'POPULARITY_COMPATIBILITY_OVERRIDE', None) or ('django.db.backends.mysql',)
class ViewTrackerQuerySet(models.query.QuerySet):
_LOGSCALING = log(0.5)
def __init__(self, model=None, *args, **kwargs):
super(self.__class__, self).__init__(model, *args, **kwargs)
if django.VERSION < (1, 2, 0):
self._DATABASE_ENGINE = getattr(settings, 'DATABASE_ENGINE')
else:
self._DATABASE_ENGINE = settings.DATABASES.get(kwargs.get('using', None) or 'default')['ENGINE']
self._SQL_NOW = "'%s'"
self._SQL_AGE = 'TIMESTAMPDIFF(SECOND, added, %(now)s)'
self._SQL_RELVIEWS = '(views/%(maxviews)d)'
self._SQL_RELAGE = '(%(age)s/%(maxage)d)'
self._SQL_NOVELTY = '(%(factor)s * EXP(%(logscaling)s * %(age)s/%(charage)s) + %(offset)s)'
self._SQL_POPULARITY = '(views/%(age)s)'
self._SQL_RELPOPULARITY = '(%(popularity)s/%(maxpopularity)s)'
self._SQL_RANDOM = connection.ops.random_function_sql()
self._SQL_RELEVANCE = '%(relpopularity)s * %(novelty)s'
self._SQL_ORDERING = '%(relview)f * %(relview_sql)s + \
%(relage)f * %(relage_sql)s + \
%(novelty)f * %(novelty_sql)s + \
%(relpopularity)f * %(relpopularity_sql)s + \
%(random)f * %(random_sql)s + \
%(relevance)f * %(relevance_sql)s + \
%(offset)f'
def _get_db_datetime(self, value=None):
""" Retrieve an SQL-interpretable representation of the datetime value, or
now if no value is specified. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not value:
value = now()
_SQL_NOW = self._SQL_NOW % connection.ops.value_to_db_datetime(value)
return _SQL_NOW
def _add_extra(self, field, sql):
""" Add the extra parameter 'field' with value 'sql' to the queryset (without
removing previous parameters, as oppsoed to the normal .extra method). """
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
logging.debug(sql)
clone = self._clone()
clone.query.add_extra({field: sql}, None, None, None, None, None)
return clone
def select_age(self):
""" Adds age with regards to NOW to the QuerySet
fields. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
return self._add_extra('age', _SQL_AGE)
def select_relviews(self, relative_to=None):
""" Adds 'relview', a normalized viewcount, to the QuerySet.
The normalization occcurs relative to the maximum number of views
in the current QuerySet, unless specified in 'relative_to'.
The relative number of views should always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
maxviews = relative_to.aggregate(models.Max('views'))['views__max']
SQL_RELVIEWS = self._SQL_RELVIEWS % {'maxviews': maxviews}
return self._add_extra('relviews', SQL_RELVIEWS)
def select_relage(self, relative_to=None):
""" Adds 'relage', a normalized age, relative to the QuerySet.
The normalization occcurs relative to the maximum age
in the current QuerySet, unless specified in 'relative_to'.
The relative age should always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
maxage = relative_to.extra(select={'maxage': 'MAX(%s)' % _SQL_AGE}).values('maxage')[0]['maxage']
SQL_RELAGE = self._SQL_RELAGE % {'age': _SQL_AGE,
'maxage': maxage}
return self._add_extra('relage', SQL_RELAGE)
def select_novelty(self, minimum=0.0, charage=None):
""" Compute novelty - this is the age muliplied by a characteristic time.
After a this characteristic age, the novelty will be half its original
value (if the minimum is 0). The minimum is needed when this value
is used in multiplication.
The novelty value is always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
offset = minimum
factor = 1 / (1 - offset)
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage:
charage = POPULARITY_CHARAGE
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage,
'offset': offset,
'factor': factor}
return self._add_extra('novelty', SQL_NOVELTY)
def select_popularity(self):
""" Compute popularity, which is defined as: views/age. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
return self._add_extra('popularity', SQL_POPULARITY)
def select_relpopularity(self, relative_to=None):
""" Compute relative popularity, which is defined as: (views/age)/MAX(views/age).
The relpopularity value should always be in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(models.Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
return self._add_extra('relpopularity', SQL_POPULARITY)
def select_random(self):
""" Returns the original QuerySet with an extra field 'random' containing a random
value in the range [0,1] to use for ordering.
"""
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
SQL_RANDOM = self.RANDOM
return self._add_extra('random', SQL_RANDOM)
def select_relevance(self, relative_to=None, minimum_novelty=0.1, charage_novelty=None):
""" This adds the multiplication of novelty and relpopularity to the QuerySet, as 'relevance'. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(models.Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage_novelty:
charage_novelty = POPULARITY_CHARAGE
offset = minimum_novelty
factor = 1 / (1 - offset)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage_novelty,
'offset': offset,
'factor': factor}
SQL_RELEVANCE = self._SQL_RELEVANCE % {'novelty': SQL_NOVELTY,
'relpopularity': SQL_RELPOPULARITY}
return self._add_extra('relevance', SQL_RELEVANCE)
def select_ordering(self, relview=0.0, relage=0.0, novelty=0.0, relpopularity=0.0, random=0.0, relevance=0.0, offset=0.0, charage_novelty=None, relative_to=None):
""" Creates an 'ordering' field used for sorting the current QuerySet according to
specified criteria, given by the parameters.
All the parameters given here are relative to one another, so if you specify
random=1.0 and relage=3.0 then the relative age is 3 times as important.
Please do note that the relative age is the only value here that INCREASES over time so
you might want to specify a NEGATIVE value here and use an offset, just to compensate.
"""
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
assert abs(relview + relage + novelty + relpopularity + random + relevance) > 0, 'You should at least give me something to order by!'
maxviews = relative_to.aggregate(models.Max('views'))['views__max']
SQL_RELVIEWS = self._SQL_RELVIEWS % {'maxviews': maxviews}
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
maxage = relative_to.extra(select={'age': _SQL_AGE}).aggregate(Max('age'))['age__max']
SQL_RELAGE = self._SQL_RELAGE % {'age': _SQL_AGE,
'maxage': maxage}
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage_novelty:
charage_novelty = POPULARITY_CHARAGE
# Here, because the ordering field is not normalize, we don't have to bother about a minimum for the novelty
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage_novelty,
'offset': 0.0,
'factor': 1.0}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
SQL_RANDOM = self.RANDOM
SQL_RELEVANCE = self._SQL_RELEVANCE % {'novelty': SQL_NOVELTY,
'relpopularity': SQL_RELPOPULARITY}
SQL_ORDERING = self._SQL_ORDERING % {'relview': relview,
'relage': relage,
'novelty': novelty,
'relpopularity': relpopularity,
'relevance': relevance,
'random': random,
'relview_sql': SQL_RELVIEWS,
'relage_sql': SQL_RELAGE,
'novelty_sql': SQL_NOVELTY,
'relpopularity_sql': SQL_RELPOPULARITY,
'random_sql': SQL_RANDOM,
'relevance_sql': SQL_RELEVANCE}
return self._add_extra('ordering', SQL_ORDERING)
def get_recently_viewed(self, limit=None):
""" Returns the most recently viewed objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-viewed')[:limit]
def get_recently_added(self, limit=None):
""" Returns the objects with the most rcecent added. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-added')[:limit]
def get_most_popular(self, limit=None):
""" Returns the most popular objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.select_popularity().order_by('-popularity')[:limit]
def get_most_viewed(self, limit=None):
""" Returns the most viewed objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-views')[:limit]
def get_for_model(self, model):
""" Returns the objects and its views for a certain model. """
return self.get_for_models([model])
def get_for_models(self, models):
""" Returns the objects and its views for specified models. """
cts = []
for model in models:
cts.append(ContentType.objects.get_for_model(model))
return self.filter(content_type__in=cts)
def get_for_object(self, content_object, create=False):
""" Gets the viewtracker for specified object, or creates one
if requested. """
ct = ContentType.objects.get_for_model(content_object)
if create:
[viewtracker, created] = self.get_or_create(content_type=ct, object_id=content_object.pk)
else:
viewtracker = self.get(content_type=ct, object_id=content_object.pk)
return viewtracker
def get_for_objects(self, objects):
""" Gets the viewtrackers for specified objects, or creates them
if requested. """
qs = self.none()
for obj in objects:
ct = ContentType.objects.get_for_model(obj.__class__)
qs = qs | self.filter(content_type=ct, object_id=obj.pk)
return self & qs
def get_for_queryset(self, qs):
""" Gets the viewtrackers for the objects in a specified queryset. """
ct = ContentType.objects.get_for_model(qs.model)
return self.filter(content_type=ct, object_id__in=qs.values('pk'))
def get_object_list(self):
""" Gets a list with all the objects tracked in the current queryset. """
obj_list = []
for obj in self:
obj_list.append(obj.content_object)
return obj_list
def get_querysets(self):
""" Gets a list of all the querysets for the objects tracked in the current queryset. """
qs_list = []
for ct_id in self.values('content_type').distinct():
ct = ContentType.objects.get_for_id(ct_id)
qs_inner = self.filter(content_type=ct_id).values('object_id')
qs = ct.model_class().objects.filter(pk__in=qs_inner)
qs_list.append(qs)
return qs_list
class ViewTrackerManager(models.Manager):
""" Manager methods to do stuff like:
ViewTracker.objects.get_views_for_model(MyModel).
For documentation, please refer the ViewTrackerQuerySet object.
"""
def get_query_set(self):
return ViewTrackerQuerySet(self.model)
def select_age(self, *args, **kwargs):
return self.get_query_set().select_age(*args, **kwargs)
def select_relage(self, *args, **kwargs):
return self.get_query_set().select_relage(*args, **kwargs)
def select_relviews(self, *args, **kwargs):
return self.get_query_set().select_relviews(*args, **kwargs)
def select_novelty(self, *args, **kwargs):
return self.get_query_set().select_novelty(*args, **kwargs)
def select_popularity(self, *args, **kwargs):
return self.get_query_set().select_popularity(*args, **kwargs)
def select_relpopularity(self, *args, **kwargs):
return self.get_query_set().select_relpopularity(*args, **kwargs)
def select_random(self, *args, **kwargs):
return self.get_query_set().select_random(*args, **kwargs)
def select_ordering(self, *args, **kwargs):
return self.get_query_set().select_ordering(*args, **kwargs)
def get_recently_added(self, *args, **kwargs):
return self.get_query_set().get_recently_added(*args, **kwargs)
def get_recently_viewed(self, *args, **kwargs):
return self.get_query_set().get_recently_viewed(*args, **kwargs)
def get_most_viewed(self, *args, **kwargs):
return self.get_query_set().get_most_viewed(*args, **kwargs)
def get_most_popular(self, *args, **kwargs):
return self.get_query_set().get_most_popular(*args, **kwargs)
def get_for_model(self, *args, **kwargs):
return self.get_query_set().get_for_model(*args, **kwargs)
def get_for_models(self, *args, **kwargs):
return self.get_query_set().get_for_models(*args, **kwargs)
def get_for_object(self, *args, **kwargs):
return self.get_query_set().get_for_object(*args, **kwargs)
def get_for_objects(self, *args, **kwargs):
return self.get_query_set().get_for_objects(*args, **kwargs)
def get_for_queryset(self, *args, **kwargs):
return self.get_query_set().get_for_queryset(*args, **kwargs)
def get_object_list(self, *args, **kwargs):
return self.get_query_set().get_object_list(*args, **kwargs)
class ViewTracker(models.Model):
""" The ViewTracker object does exactly what it's supposed to do:
track the amount of views for an object in order to create make
a popularity rating."""
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
added = models.DateTimeField(auto_now_add=True)
viewed = models.DateTimeField(auto_now=True)
views = models.PositiveIntegerField(default=0)
objects = ViewTrackerManager()
class Meta:
get_latest_by = 'viewed'
ordering = ['-views', '-viewed', 'added']
unique_together = ('content_type', 'object_id')
def __unicode__(self):
return u"%s, %d views" % (self.content_object, self.views)
@classmethod
def add_view_for(cls, content_object):
""" This increments the viewcount for a given object. """
ct = ContentType.objects.get_for_model(content_object)
assert ct != ContentType.objects.get_for_model(cls), 'Cannot add ViewTracker for ViewTracker.'
qs = cls.objects.filter(content_type=ct, object_id=content_object.pk)
assert qs.count() == 0 or qs.count() == 1, 'More than one ViewTracker for object %s' % content_object
rows = qs.update(views=F('views') + 1, viewed=now())
# This is here mainly for compatibility reasons
if not rows:
qs.create(content_type=ct, object_id=content_object.pk, views=1, viewed=now())
logging.debug('ViewTracker created for object %s' % content_object)
else:
logging.debug('Views updated to %d for %s' % (qs[0].views, content_object))
return qs[0]
@classmethod
def get_views_for(cls, content_object):
""" Gets the total number of views for content_object. """
""" If we don't have any views, return 0. """
try:
viewtracker = cls.objects.get_for_object(content_object)
except ViewTracker.DoesNotExist:
return 0
return viewtracker.views
| carschar/django-popularity | popularity/models.py | Python | agpl-3.0 | 22,870 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 Jens Hoffmann (hoffmaje)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from django.db import models
from django.contrib.auth.models import User
| hoffmaje/layla | layla/vocabularymanager/models.py | Python | agpl-3.0 | 240 |
# PAPAS Access Point Administration System
# Copyright (c) 2010 Revolution Linux inc. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: UTF-8 -*-
from django.utils.translation import ugettext as _
from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group#,User #possible accès User
from django.conf import settings
INSTALLED_APPS = settings.INSTALLED_APPS
LDAP_CONNECTOR=False
if 'apmanager.ldapconnector' in INSTALLED_APPS:
from apmanager.ldapconnector.models import LDAPGroup
LDAP_CONNECTOR=True
from decimal import Decimal
DATABASE_CHOICE = (
('MY', 'MySQL'),
('PG', 'PostgreSQL'),
)
ENCODING_CHOICE = (
('8859', 'ISO-8859-1'),
('UTF8', 'Unicode'),
)
class DataSource(models.Model):
name = models.CharField(max_length = 100, unique=True,
help_text=u"A name to easily identify this data source in the administration interface.")
database_name = models.CharField(max_length = 30,
help_text=u"The name of the database containing the data for the report.")
database_type = models.CharField(max_length = 2, choices = DATABASE_CHOICE)
host = models.CharField(max_length = 100)
port = models.PositiveIntegerField(
help_text=u"Leave blank to use default port.", blank=True, null=True)
user = models.CharField(max_length = 100)
password = models.CharField(max_length = 100,
help_text=u"<strong>Warning</strong> : the password will appear in clear text at the screen.")
data_encoding = models.CharField(max_length = 4, choices = ENCODING_CHOICE,
help_text=u"""Indicates the native data format of the database.
Change this setting if accents are incorrectly displayed for this data source.""")
def __unicode__(self):
return self.name
class Report(models.Model):
title = models.CharField(max_length = 300)
data_source = models.ForeignKey(DataSource)
owner = models.CharField(max_length = 150, blank=True, null=True,
help_text=u"The name of the person maintaining this report.")
sql = models.TextField()
data_filter = models.CharField(max_length = 150, blank=True, null=True,
help_text=u"""A Python function to call to alter the data.
The parameters to the function are : sql_column_name, data.
You must include the full module path to the function, ex:
apmanager.contrib.datafilters.rt_ticket_link""")
if LDAP_CONNECTOR:
allowed_groups = models.ManyToManyField(LDAPGroup,filter_interface=True, null=True, blank=True)
default_order_by = models.CharField(max_length = 150, null=True, blank=True,
help_text=u"""The default sorting for the results.
the keyworks ORDER BY are NOT to be included in this field.
ex: for ORDER BY 1,2 DESC, the value would be "1,2 DESC" """)
def has_sort(self):
if self.default_order_by not in [None,""]:
#there is an order by, verify the query formation
import re
#Query must finish with ORDER BY, and possibly a semi-colon and spaces afterwards
return re.compile("ORDER BY\s*(LIMIT\s+\d+(\s+OFFSET\s+\d+)?\s*)?;?\s*$").search(self.sql) is not None
return False
def get_args(self,request):
"""
Gets sql args from:
1) the default report parameter values
2) the GET request
"""
sel_args = {}
for param in self.reportparameter_set.all():
#first get all the default parameter values
sel_args[param.name] = param.defaultvalue
for key,val in request.GET.items():
#now get all parameters supplied from the GET
sel_args[key] = val
return sel_args
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("apmanager.genericsql.views.display_report", args=(self.id,))
def has_footers(self):
return self.reportfooter_set.count()>0
def get_multireports(self):
#First, get all reportparameters
rparams=self.reportparameter_set.all()
ret = []
for rp in rparams:
#Report has multireports
if hasattr(rp,'multireport_set'):
ret.extend(rp.multireport_set.all())
return ret
if LDAP_CONNECTOR:
def verify_user_access(self,username):
allowed_groups = self.allowed_groups.all()
if allowed_groups:
#Verify that the user can access the report
if not LDAPGroup.objects.user_in_groups(username,allowed_groups):
#access DENIED!!!!
return False
else:
return True
else:
def verify_user_access(self,username):
return True
class ReportParameter(models.Model):
name = models.CharField(max_length = 50, verbose_name=u"Parameter name")
defaultvalue = models.CharField(max_length = 300, verbose_name=u"Default value")
report = models.ForeignKey(Report)
display_name = models.CharField(null=True,blank=True,max_length = 100, verbose_name=u"Display name")
display = models.BooleanField(default=False, verbose_name=u"Show in parameter panel")
def __unicode__(self):
return u"%s=%s" % (self.name, self.defaultvalue)
def str_report(self):
return "%s: %s" % (self.report.title, self.name)
def option_dict(self):
"""Return this report parameter as a dictionary suitable
for using in the report param panel"""
ret = {'name':str(self.name),
'display_name':str(self.display_name or self.name),
'param_name':str(self.name)}
if self.defaultvalue:
ret['value'] = str(self.defaultvalue)
ret['enabled'] = True
return ret
class Meta:
verbose_name = "Default report parameter"
ordering = ['report','name']
class ColumnName(models.Model):
report = models.ForeignKey(Report, null=True,blank=True)
sql_column_name = models.CharField(max_length = 40)
display_column_name = models.CharField(max_length = 100)
def __unicode__(self):
return self.display_column_name
class ReportFooter(models.Model):
report = models.ForeignKey(Report)
column_name= models.CharField(max_length = 40, )
function = models.PositiveSmallIntegerField(blank=True, verbose_name=_(u'Agregate function to perform'),
choices=((0, 'None'),
(1, 'Addition'),
(2, 'Average'),
(3, 'Count')))
label = models.CharField(max_length = 100,blank=True)
def render(self,values_iter):
if self.function == 1: # Addition
sum = Decimal()
for i in values_iter:
sum += Decimal(str(i))
#Prepend label+":" if label not blank
if self.label == "":
return sum
else:
return "%s : %s" % (self.label, sum)
elif self.function == 2: # Moyenne
sum,count=Decimal(),Decimal()
for i in values_iter:
sum += Decimal(str(i))
count += 1
try:
val = sum/count
except ZeroDivisionError:
val = "0"
#Prepend label+":" if label not blank
if self.label == "":
return val
else:
return "%s : %s" % (self.label, val)
elif self.function == 3: # Count
count=Decimal()
for i in values_iter:
count += 1
#Prepend label+":" if label not blank
if self.label == "":
return count
else:
return "%s : %s" % (self.label, count)
else:
return self.label
| veloutin/papas | apmanager/genericsql/models.py | Python | agpl-3.0 | 8,678 |
#!/usr/bin/env python3
# noinspection PyUnresolvedReferences
import init_django
from django.db import transaction
from common.utils import utcnow
from main.archive import DataArchiver
from main.delete import DataDeleter
from main.models import Ranking
from main.purge import purge_player_data
from tasks.base import Command
class Main(Command):
def __init__(self):
super().__init__("Delete ranking and all cache data and ranking data linked to it, used for broken "
"rankings.",
pid_file=True, stoppable=False)
self.add_argument('--delete', dest="delete", action='store_true', default=False,
help="If this is not set, deletes a dry run will be performed instead.")
self.add_argument('--keep-rankings', '-r', dest="keep_rankings", default=None,
help="Comma separated list of rankings to keep.")
def run(self, args, logger):
keep_ids = (int(id) for id in args.keep_rankings.split(","))
with transaction.atomic():
remove_ids = [r.id for r in Ranking.objects.exclude(id__in=keep_ids)]
data_deleter = DataDeleter(dry_run=not args.delete)
data_archiver = DataArchiver(utcnow(), remove=True)
# Remove rankings.
for remove_id in remove_ids:
data_deleter.delete_ranking(remove_id)
# Archive all rankings except the last.
if args.delete:
rankings = Ranking.objects.order_by("-id")[1:]
for ranking in rankings:
logger.info(f"archiving ranking {ranking.id}")
data_archiver.archive_ranking(ranking, self.check_stop)
else:
logger.info("DRY RUN no archiving of rankings")
# Delete ladders that are no longer needed.
keep_season_ids = {r.season_id for r in Ranking.objects.all()}
data_deleter.delete_ladders(tuple(keep_season_ids))
# Delete cache data that is unused.
data_deleter.agressive_delete_cache_data()
# Purge players and teams.
if args.delete:
purge_player_data(check_stop=self.check_stop)
else:
logger.info("DRY RUN no purge player data")
return 0
if __name__ == '__main__':
Main()()
| andersroos/rankedftw | tasks/delete_data_for_sample.py | Python | agpl-3.0 | 2,383 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:'ArroyoEtAl2010SInter'
"""
import numpy as np
from scipy.constants import g
from scipy.special import exp1
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
def _compute_mean(C, g, ctx):
"""
Compute mean according to equation 8a, page 773.
"""
mag = ctx.mag
dis = ctx.rrup
# computing r02 parameter and the average distance to the fault surface
ro2 = 1.4447e-5 * np.exp(2.3026 * mag)
avg = np.sqrt(dis ** 2 + ro2)
# computing fourth term of Eq. 8a, page 773.
trm4 = (exp1(C['c4'] * dis) - exp1(C['c4'] * avg)) / ro2
# computing the mean
mean = C['c1'] + C['c2'] * mag + C['c3'] * np.log(trm4)
# convert from cm/s**2 to 'g'
mean = np.log(np.exp(mean) * 1e-2 / g)
return mean
def _get_stddevs(C):
"""
Return standard deviations as defined in table 2, page 776.
"""
stds = np.array([C['s_t'], C['s_e'], C['s_r']])
return stds
class ArroyoEtAl2010SInter(GMPE):
"""
Implements GMPE developed by Arroyo et al. (2010) for Mexican
subduction interface events and published as:
Arroyo D., García D., Ordaz M., Mora M. A., and Singh S. K. (2010)
"Strong ground-motion relations for Mexican interplate earhquakes",
J. Seismol., 14:769-785.
The original formulation predict peak ground acceleration (PGA), in
cm/s**2, and 5% damped pseudo-acceleration response spectra (PSA) in
cm/s**2 for the geometric average of the maximum component of the two
horizontal component of ground motion.
The GMPE predicted values for Mexican interplate events at rock sites
(NEHRP B site condition) in the forearc region.
"""
#: Supported tectonic region type is subduction interface,
#: given that the equations have been derived using Mexican interface
#: events.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTERFACE
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration. See Table 2 in page 776.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
#: Supported intensity measure component is the geometric average of
# the maximum of the two horizontal components.
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GEOMETRIC_MEAN
#: Supported standard deviation types are inter-event, intra-event
#: and total. See Table 2, page 776.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT}
#: No site parameters required
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameter is the magnitude
REQUIRES_RUPTURE_PARAMETERS = {'mag'}
#: Required distance measure is Rrup (closest distance to fault surface)
REQUIRES_DISTANCES = {'rrup'}
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.compute>`
for spec of input and result values.
"""
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
mean[m] = _compute_mean(C, g, ctx)
sig[m], tau[m], phi[m] = _get_stddevs(C)
#: Equation coefficients for geometric average of the maximum of the two
#: horizontal components, as described in Table 2 on page 776.
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 g_e bias s_t s_e s_r
0.040 3.8123 0.8636 0.5578 0.0150 0.3962 -0.0254 0.8228 0.5179 0.6394
0.045 4.0440 0.8489 0.5645 0.0150 0.3874 -0.0285 0.8429 0.5246 0.6597
0.050 4.1429 0.8580 0.5725 0.0150 0.3731 -0.0181 0.8512 0.5199 0.6740
0.055 4.3092 0.8424 0.5765 0.0150 0.3746 0.0004 0.8583 0.5253 0.6788
0.060 4.3770 0.8458 0.5798 0.0150 0.4192 -0.0120 0.8591 0.5563 0.6547
0.065 4.5185 0.8273 0.5796 0.0150 0.3888 -0.0226 0.8452 0.5270 0.6607
0.070 4.4591 0.8394 0.5762 0.0150 0.3872 -0.0346 0.8423 0.5241 0.6594
0.075 4.5939 0.8313 0.5804 0.0150 0.3775 -0.0241 0.8473 0.5205 0.6685
0.080 4.4832 0.8541 0.5792 0.0150 0.3737 -0.0241 0.8421 0.5148 0.6664
0.085 4.5062 0.8481 0.5771 0.0150 0.3757 -0.0138 0.8344 0.5115 0.6593
0.090 4.4648 0.8536 0.5742 0.0150 0.4031 -0.0248 0.8304 0.5273 0.6415
0.095 4.3940 0.8580 0.5712 0.0150 0.4097 0.0040 0.8294 0.5309 0.6373
0.100 4.3391 0.8620 0.5666 0.0150 0.3841 -0.0045 0.8254 0.5116 0.6477
0.120 4.0505 0.8933 0.5546 0.0150 0.3589 -0.0202 0.7960 0.4768 0.6374
0.140 3.5599 0.9379 0.5350 0.0150 0.3528 -0.0293 0.7828 0.4650 0.6298
0.160 3.1311 0.9736 0.5175 0.0150 0.3324 -0.0246 0.7845 0.4523 0.6409
0.180 2.7012 1.0030 0.4985 0.0150 0.3291 -0.0196 0.7717 0.4427 0.6321
0.200 2.5485 0.9988 0.4850 0.0150 0.3439 -0.0250 0.7551 0.4428 0.6116
0.220 2.2699 1.0125 0.4710 0.0150 0.3240 -0.0205 0.7431 0.4229 0.6109
0.240 1.9130 1.0450 0.4591 0.0150 0.3285 -0.0246 0.7369 0.4223 0.6039
0.260 1.7181 1.0418 0.4450 0.0150 0.3595 -0.0220 0.7264 0.4356 0.5814
0.280 1.4039 1.0782 0.4391 0.0150 0.3381 -0.0260 0.7209 0.4191 0.5865
0.300 1.1080 1.1038 0.4287 0.0150 0.3537 -0.0368 0.7198 0.4281 0.5787
0.320 1.0652 1.0868 0.4208 0.0150 0.3702 -0.0345 0.7206 0.4384 0.5719
0.340 0.8319 1.1088 0.4142 0.0150 0.3423 -0.0381 0.7264 0.4250 0.5891
0.360 0.4965 1.1408 0.4044 0.0150 0.3591 -0.0383 0.7255 0.4348 0.5808
0.380 0.3173 1.1388 0.3930 0.0150 0.3673 -0.0264 0.7292 0.4419 0.5800
0.400 0.2735 1.1533 0.4067 0.0134 0.3956 -0.0317 0.7272 0.4574 0.5653
0.450 0.0990 1.1662 0.4127 0.0117 0.3466 -0.0267 0.7216 0.4249 0.5833
0.500 -0.0379 1.2206 0.4523 0.0084 0.3519 -0.0338 0.7189 0.4265 0.5788
0.550 -0.3512 1.2445 0.4493 0.0076 0.3529 -0.0298 0.7095 0.4215 0.5707
0.600 -0.6897 1.2522 0.4421 0.0067 0.3691 -0.0127 0.7084 0.4304 0.5627
0.650 -0.6673 1.2995 0.4785 0.0051 0.3361 -0.0192 0.7065 0.4096 0.5756
0.700 -0.7154 1.3263 0.5068 0.0034 0.3200 -0.0243 0.7070 0.3999 0.5830
0.750 -0.7015 1.2994 0.5056 0.0029 0.3364 -0.0122 0.7092 0.4113 0.5778
0.800 -0.8581 1.3205 0.5103 0.0023 0.3164 -0.0337 0.6974 0.3923 0.5766
0.850 -0.9712 1.3375 0.5201 0.0018 0.3435 -0.0244 0.6906 0.4047 0.5596
0.900 -1.0970 1.3532 0.5278 0.0012 0.3306 -0.0275 0.6923 0.3980 0.5665
0.950 -1.2346 1.3687 0.5345 0.0007 0.3264 -0.0306 0.6863 0.3921 0.5632
1.000 -1.2600 1.3652 0.5426 0.0001 0.3194 -0.0183 0.6798 0.3842 0.5608
1.100 -1.7687 1.4146 0.5342 0.0001 0.3336 -0.0229 0.6701 0.3871 0.5471
1.200 -2.1339 1.4417 0.5263 0.0001 0.3445 -0.0232 0.6697 0.3931 0.5422
1.300 -2.4122 1.4577 0.5201 0.0001 0.3355 -0.0231 0.6801 0.3939 0.5544
1.400 -2.5442 1.4618 0.5242 0.0001 0.3759 -0.0039 0.6763 0.4146 0.5343
1.500 -2.8509 1.4920 0.5220 0.0001 0.3780 -0.0122 0.6765 0.4159 0.5335
1.600 -3.0887 1.5157 0.5215 0.0001 0.3937 -0.0204 0.6674 0.4187 0.5197
1.700 -3.4884 1.5750 0.5261 0.0001 0.4130 -0.0208 0.6480 0.4164 0.4965
1.800 -3.7195 1.5966 0.5255 0.0001 0.3967 -0.0196 0.6327 0.3985 0.4914
1.900 -4.0141 1.6162 0.5187 0.0001 0.4248 -0.0107 0.6231 0.4062 0.4726
2.000 -4.1908 1.6314 0.5199 0.0001 0.3967 -0.0133 0.6078 0.3828 0.4721
2.500 -5.1104 1.7269 0.5277 0.0001 0.4302 -0.0192 0.6001 0.3936 0.4530
3.000 -5.5926 1.7515 0.5298 0.0001 0.4735 -0.0319 0.6029 0.4148 0.4375
3.500 -6.1202 1.8077 0.5402 0.0001 0.4848 -0.0277 0.6137 0.4273 0.4405
4.000 -6.5318 1.8353 0.5394 0.0001 0.5020 -0.0368 0.6201 0.4393 0.4376
4.500 -6.9744 1.8685 0.5328 0.0001 0.5085 -0.0539 0.6419 0.4577 0.4500
5.000 -7.1389 1.8721 0.5376 0.0001 0.5592 -0.0534 0.6701 0.5011 0.4449
pga 2.4862 0.9392 0.5061 0.0150 0.3850 -0.0181 0.7500 0.4654 0.5882
""")
| gem/oq-engine | openquake/hazardlib/gsim/arroyo_2010.py | Python | agpl-3.0 | 9,135 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bangkok Rubber Module',
'version': '0.1',
'category': 'Tools',
'description': """
""",
'author': 'Mr.Tititab Srisookco',
'website': 'http://www.ineco.co.th',
'summary': '',
'depends': ['account','purchase','sale','stock','product'],
'data': [ ],
'update_xml': [
'security.xml',
'stock_view.xml',
'adempier_view.xml',
],
'images': [],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| titasakgm/brc-stock | openerp/addons/bangkok_rubber/__openerp__.py | Python | agpl-3.0 | 1,546 |
import re
from django.contrib.auth.models import User
from django.db import models
from common.models import ExtraBase
CHALLENGES_TYPE = (
('p', 'playable player'),
('np', 'not playable player'),
)
class Challenge(models.Model, ExtraBase):
name = models.CharField(max_length=200, blank=True, null=True)
desc = models.TextField(max_length=1024, blank=True, null=True)
solution = models.TextField(max_length=1024, blank=True, null=True)
ctype = models.CharField(max_length=8, choices=CHALLENGES_TYPE, default='p')
extra = models.TextField(max_length=1024, blank=True, null=True)
# options in extra:
# {"options":
# [
# {"type": "text", "question": "who is the killer?"},
# {"type": "option", "question": "with which weapon?",
# "answers": ["knife", "rope", "gun", "bare hands", "venom"]},
# ...
# ]
# }
depends = models.ManyToManyField('Challenge', related_name="requiedby",
blank=True)
# challenges to give when resolve this challenge, only if solution is
# not null and we resolve this
child_challenges = models.ManyToManyField('Challenge',
related_name="parents",
blank=True)
def mainclues(self):
return self.clues.filter(main=True)
def depends_on(self):
return ", ".join(i.name for i in self.depends.all())
def get_desc_html(self):
# search #[NUM][solution] and return [('NUM', 'solution'), ... ]
qregex = re.compile("#\[[\d]+\]\[([^#]*)\]")
desc_html = self.desc[:]
for sre in qregex.finditer(self.desc):
ini_pos, end_pos = sre.span()
rex = self.desc[ini_pos:end_pos]
solution = sre.group(1)
desc_html = desc_html.replace(rex, "<b>{}</b>".format(solution))
return desc_html
def __str__(self):
desc = self.desc[:10] if self.desc else "-"
return "{} - {}...".format(self.name, desc)
class Meta:
ordering = ['pk']
class Game(models.Model, ExtraBase):
name = models.CharField(max_length=200, blank=True, null=True)
desc = models.TextField(max_length=1024, blank=True, null=True)
solution = models.TextField(max_length=1024, blank=True, null=True)
challenges = models.ManyToManyField(Challenge, related_name="games")
author = models.ForeignKey(User, related_name="games", blank=True, null=True)
auto_assign_clue = models.BooleanField(default=True)
visible_players = models.BooleanField(default=True)
extra = models.TextField(max_length=1024, blank=True, null=True)
# options in extra:
# {"options":
# [
# {"type": "text", "question": "who is the killer?"},
# {"type": "option", "question": "with which weapon?",
# "answers": ["knife", "rope", "gun", "bare hands", "venom"]},
# ...
# ]
# }
def get_desc_html(self):
# search #[NUM][type][question] and return [('NUM', 'type', 'question'), ... ]
qregex = re.compile("#\[[\d]+\]\[(?:option|text)\]\[([^#]*)\]")
desc_html = self.desc[:]
for sre in qregex.finditer(self.desc):
ini_pos, end_pos = sre.span()
rex = self.desc[ini_pos:end_pos]
question = sre.group(1)
desc_html = desc_html.replace(rex, "<b>{}</b>".format(question))
return desc_html
def __str__(self):
return self.name
| wadobo/socializa | backend/game/models.py | Python | agpl-3.0 | 3,494 |
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
## OOPS this is a custom widget that works for initializing
## tinymce instances on stacked and tabular inlines
## for flatpages, just use the tinymce packaged one.
#from content.widgets import TinyMCE
from tinymce.widgets import TinyMCE
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': TinyMCE(attrs={'cols': 100, 'rows': 15}),
}
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
| voer-platform/vp.web | vpw/admin.py | Python | agpl-3.0 | 724 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015-2017 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import account_analytic_attribution
from . import account_analytic_distribution_line
| ecino/compassion-accounting | account_analytic_attribution/models/__init__.py | Python | agpl-3.0 | 499 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
import colander
from pyramid.view import view_config
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.form import FormView
from pontus.schema import Schema, select
from pontus.widget import RadioChoiceWidget
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from lac.views.widget import EmailInputWidget
from lac.content.processes.lac_view_manager.behaviors import (
Improve)
from lac.content.lac_application import CreationCulturelleApplication
from lac import _
class ImproveStudyReport(BasicView):
title = 'Alert improve'
name = 'alertimprove'
template = 'lac:views/lac_view_manager/questionnaire/templates/improve_info.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class Improve1Schema(Schema):
id = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="ID",
missing="improve"
)
url = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="URL",
missing="None"
)
improvement = colander.SchemaNode(
colander.String(),
widget=deform.widget.TextAreaWidget(rows=3, cols=30),
title=_('Vos suggestions')
)
email = colander.SchemaNode(
colander.String(),
widget=EmailInputWidget(),
validator=colander.All(
colander.Email(),
colander.Length(max=100)
),
title=_('Email')
)
class ImproveFormView(FormView):
title = _('Votre avis')
schema = select(Improve1Schema(),
['id', 'url', 'improvement', 'email'])
behaviors = [Improve]
formid = 'formimprove'
name = 'improveform'
def before_update(self):
user = get_current()
if getattr(user, 'email', ''):
self.schema.get('email').widget = deform.widget.HiddenWidget()
def default_data(self):
user = get_current()
return {'email': getattr(user, 'email', '')}
@view_config(
name='improve',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class ImproveView(MultipleView):
title = _('Votre avis')
name = 'improve'
viewid = 'improve'
template = 'daceui:templates/simple_mergedmultipleview.pt'
views = (ImproveStudyReport, ImproveFormView)
validators = [Improve.get_validator()]
requirements = {'css_links': [],
'js_links': ['lac:static/js/questionnaire.js']}
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{Improve: ImproveView})
| ecreall/lagendacommun | lac/views/lac_view_manager/questionnaire/improve.py | Python | agpl-3.0 | 3,097 |
#-*- encoding=utf-8 -*-
'''
Created on Jan 18, 2013
@author: brian
'''
import openid
from openid.fetchers import HTTPFetcher, HTTPResponse
from urlparse import parse_qs, urlparse
from django.conf import settings
from django.test import TestCase, LiveServerTestCase
from django.core.cache import cache
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from unittest import skipUnless
from student.tests.factories import UserFactory
from openedx.core.djangoapps.external_auth.views import provider_login
class MyFetcher(HTTPFetcher):
"""A fetcher that uses server-internal calls for performing HTTP
requests.
"""
def __init__(self, client):
"""@param client: A test client object"""
super(MyFetcher, self).__init__()
self.client = client
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by Django
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
# method = 'POST'
# undo the URL encoding of the POST arguments
data = parse_qs(body)
response = self.client.post(url, data)
else:
# method = 'GET'
data = {}
if headers and 'Accept' in headers:
data['CONTENT_TYPE'] = headers['Accept']
response = self.client.get(url, data)
# Translate the test client response to the fetcher's HTTP response abstraction
content = response.content
final_url = url
response_headers = {}
if 'Content-Type' in response:
response_headers['content-type'] = response['Content-Type']
if 'X-XRDS-Location' in response:
response_headers['x-xrds-location'] = response['X-XRDS-Location']
status = response.status_code
return HTTPResponse(
body=content,
final_url=final_url,
headers=response_headers,
status=status,
)
class OpenIdProviderTest(TestCase):
"""
Tests of the OpenId login
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_xrds_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_login_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-login')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
for expected_input in (
'<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />',
'<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />',
'<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />',
'<input type="submit" value="Continue" />',
'<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />',
'<input name="openid.ax.type.lastname" '
'type="hidden" value="http://axschema.org/namePerson/last" />',
'<input name="openid.ax.type.firstname" '
'type="hidden" value="http://axschema.org/namePerson/first" />',
'<input name="openid.ax.required" type="hidden" '
'value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />',
'<input name="openid.ax.type.nickname" '
'type="hidden" value="http://axschema.org/namePerson/friendly" />',
'<input name="openid.ax.type.old_email" '
'type="hidden" value="http://schema.openid.net/contact/email" />',
'<input name="openid.ax.type.old_nickname" '
'type="hidden" value="http://schema.openid.net/namePerson/friendly" />',
'<input name="openid.ax.type.old_fullname" '
'type="hidden" value="http://schema.openid.net/namePerson" />',
'<input name="openid.identity" '
'type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />',
'<input name="openid.claimed_id" '
'type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />',
# should work on the test server as well
'<input name="openid.realm" '
'type="hidden" value="http://testserver/" />',
):
self.assertContains(resp, expected_input, html=True)
# not included here are elements that will vary from run to run:
# <input name="openid.return_to" type="hidden"
# value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" />
# <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" />
def attempt_login(self, expected_code, login_method='POST', **kwargs):
""" Attempt to log in through the open id provider login """
url = reverse('openid-provider-login')
args = {
"openid.mode": "checkid_setup",
"openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H",
"openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}",
"openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.realm": "http://testserver/",
"openid.identity": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
"openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname",
"openid.ax.type.fullname": "http://axschema.org/namePerson",
"openid.ax.type.lastname": "http://axschema.org/namePerson/last",
"openid.ax.type.firstname": "http://axschema.org/namePerson/first",
"openid.ax.type.nickname": "http://axschema.org/namePerson/friendly",
"openid.ax.type.email": "http://axschema.org/contact/email",
"openid.ax.type.old_email": "http://schema.openid.net/contact/email",
"openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly",
"openid.ax.type.old_fullname": "http://schema.openid.net/namePerson",
}
# override the default args with any given arguments
for key in kwargs:
args["openid." + key] = kwargs[key]
if login_method == 'POST':
resp = self.client.post(url, args)
elif login_method == 'GET':
resp = self.client.get(url, args)
else:
self.fail('Invalid login method')
code = expected_code
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_open_id_setup(self):
""" Attempt a standard successful login """
self.attempt_login(200)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_namespace(self):
""" Test for 403 error code when the namespace of the request is invalid"""
self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0")
@override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org'])
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_return_url(self):
""" Test for 403 error code when the url"""
self.attempt_login(403, return_to="http://apps.cs50.edx.or")
def _send_bad_redirection_login(self):
"""
Attempt to log in to the provider with setup parameters
Intentionally fail the login to force a redirect
"""
user = UserFactory()
factory = RequestFactory()
post_params = {'email': user.email, 'password': 'password'}
fake_url = 'fake url'
request = factory.post(reverse('openid-provider-login'), post_params)
openid_setup = {
'request': factory.request(),
'url': fake_url,
'post_params': {}
}
request.session = {
'openid_setup': openid_setup
}
response = provider_login(request)
return response
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection(self):
""" Test to see that we can handle login redirection properly"""
response = self._send_bad_redirection_login()
self.assertEquals(response.status_code, 302)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# log in attempts before the rate gets limited
for _ in xrange(30):
self._send_bad_redirection_login()
response = self._send_bad_redirection_login()
# verify that we are not returning the default 403
self.assertEquals(response.status_code, 302)
# clear the ratelimit cache so that we don't fail other logins
cache.clear()
def _attempt_login_and_perform_final_response(self, user, profile_name):
"""
Performs full procedure of a successful OpenID provider login for user,
all required data is taken form ``user`` attribute which is an instance
of ``User`` model. As a convenience this method will also set
``profile.name`` for the user.
"""
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = profile_name
user.profile.save()
# It is asssumed that user's password is test (default for UserFactory)
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
post_args = {
'email': user.email,
'password': 'test'
}
# call url again, this time with username and password
return self.client.post(url, post_args)
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email(self):
user = UserFactory(email=u"user.ąęł@gmail.com")
resp = self._attempt_login_and_perform_final_response(user, u"Jan ĄĘŁ")
location = resp['Location']
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0],
user.profile.name.encode('utf-8')) # pylint: disable=no-member
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0],
user.email.encode('utf-8')) # pylint: disable=no-member
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email_invalid_password(self):
user = UserFactory(email=u"user.ąęł@gmail.com")
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = u"Jan ĄĘ"
user.profile.save()
# It is asssumed that user's password is test (default for UserFactory)
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
# We trigger situation where user password is invalid at last phase
# of openid login
post_args = {
'email': user.email,
'password': 'invalid-password'
}
# call url again, this time with username and password
return self.client.post(url, post_args)
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email_inactive_account(self):
user = UserFactory(email=u"user.ąęł@gmail.com")
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = u'Jan ĄĘ'
user.profile.save() # pylint: disable=no-member
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
# We trigger situation where user is not active at final phase of
# OpenId login.
user.is_active = False
user.save() # pylint: disable=no-member
post_args = {
'email': user.email,
'password': 'test'
}
# call url again, this time with username and password
self.client.post(url, post_args)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_final_response(self):
user = UserFactory()
# login to the client so that we can persist session information
for name in ['Robot 33', '☃']:
resp = self._attempt_login_and_perform_final_response(user, name)
# all information is embedded in the redirect url
location = resp['Location']
# parse the url
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0], user.email)
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0], user.profile.name)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_invalid_password(self):
url = reverse('openid-provider-login')
user = UserFactory()
# login to the client so that we can persist session information
for method in ['POST', 'GET']:
self.client.login(username=user.username, password='test')
self.attempt_login(200, method)
openid_setup = self.client.session['openid_setup']
self.assertIn('post_params', openid_setup)
post_args = {
'email': user.email,
'password': 'bad_password',
}
# call url again, this time with username and password
resp = self.client.post(url, post_args)
self.assertEquals(resp.status_code, 302)
redirect_url = resp['Location']
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url[4])
self.assertIn('openid.return_to', query_params)
self.assertTrue(
query_params['openid.return_to'][0].startswith('http://testserver/openid/complete/')
)
class OpenIdProviderLiveServerTest(LiveServerTestCase):
"""
In order for this absolute URL to work (i.e. to get xrds, then authentication)
in the test environment, we either need a live server that works with the default
fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
Here we do the former.
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@classmethod
def tearDownClass(cls):
"""
Workaround for a runtime error that occurs
intermittently when the server thread doesn't shut down
within 2 seconds.
Since the server is running in a Django thread and will
be terminated when the test suite terminates,
this shouldn't cause a resource allocation issue.
"""
try:
super(OpenIdProviderLiveServerTest, cls).tearDownClass()
except RuntimeError:
print "Warning: Could not shut down test server."
| caesar2164/edx-platform | openedx/core/djangoapps/external_auth/tests/test_openid_provider.py | Python | agpl-3.0 | 21,198 |
# -*- coding: utf-8 -*-
# Copyright© 2016 ICTSTUDIO <http://www.ictstudio.eu>
# License: AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
"name": 'Operating Unit - Changeable Invoice Line',
"version": "8.0.1.0.0",
"summary": "Default use operating unit of Invoice but be able to change per line",
"author": "ICTSTUDIO",
"website": "http://www.ictstudio.eu",
"category": "Accounting & Finance",
"depends": ['account_operating_unit', 'account_anglo_saxon_stock_move'],
"license": "AGPL-3",
"data": [
"views/account_invoice.xml",
],
}
| ICTSTUDIO/accounting-addons | account_operating_unit_invoice_line/__openerp__.py | Python | agpl-3.0 | 588 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-16 16:39
from __future__ import unicode_literals
import base.models.learning_unit_year
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0259_auto_20180416_1404'),
]
operations = [
migrations.RemoveField(
model_name='learningunit',
name='acronym',
),
migrations.RemoveField(
model_name='learningunit',
name='title',
),
migrations.AlterField(
model_name='learningcontaineryear',
name='common_title',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='common_official_title'),
),
migrations.AlterField(
model_name='learningcontaineryear',
name='common_title_english',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='common_official_english_title'),
),
migrations.AlterField(
model_name='learningcontaineryear',
name='container_type',
field=models.CharField(choices=[('COURSE', 'COURSE'), ('INTERNSHIP', 'INTERNSHIP'), ('DISSERTATION', 'DISSERTATION'), ('OTHER_COLLECTIVE', 'OTHER_COLLECTIVE'), ('OTHER_INDIVIDUAL', 'OTHER_INDIVIDUAL'), ('MASTER_THESIS', 'MASTER_THESIS'), ('EXTERNAL', 'EXTERNAL')], max_length=20, verbose_name='type'),
),
migrations.AlterField(
model_name='learningcontaineryear',
name='language',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.Language', verbose_name='language'),
),
migrations.AlterField(
model_name='learningunit',
name='faculty_remark',
field=models.TextField(blank=True, null=True, verbose_name='faculty_remark'),
),
migrations.AlterField(
model_name='learningunit',
name='other_remark',
field=models.TextField(blank=True, null=True, verbose_name='other_remark'),
),
migrations.AlterField(
model_name='learningunit',
name='periodicity',
field=models.CharField(choices=[('ANNUAL', 'ANNUAL'), ('BIENNIAL_EVEN', 'BIENNIAL_EVEN'), ('BIENNIAL_ODD', 'BIENNIAL_ODD')], default='ANNUAL', max_length=20, verbose_name='periodicity'),
),
migrations.AlterField(
model_name='learningunityear',
name='academic_year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.AcademicYear', validators=[base.models.learning_unit_year.academic_year_validator], verbose_name='academic_year'),
),
migrations.AlterField(
model_name='learningunityear',
name='acronym',
field=models.CharField(db_index=True, max_length=15, validators=[django.core.validators.RegexValidator('^[BLMW][A-Z]{2,4}\\d{4}[A-Z0-9]{0,1}$')], verbose_name='code'),
),
migrations.AlterField(
model_name='learningunityear',
name='internship_subtype',
field=models.CharField(blank=True, choices=[('TEACHING_INTERNSHIP', 'TEACHING_INTERNSHIP'), ('CLINICAL_INTERNSHIP', 'CLINICAL_INTERNSHIP'), ('PROFESSIONAL_INTERNSHIP', 'PROFESSIONAL_INTERNSHIP'), ('RESEARCH_INTERNSHIP', 'RESEARCH_INTERNSHIP')], max_length=250, null=True, verbose_name='internship_subtype'),
),
migrations.AlterField(
model_name='learningunityear',
name='quadrimester',
field=models.CharField(blank=True, choices=[('Q1', 'Q1'), ('Q2', 'Q2'), ('Q1&2', 'Q1&2'), ('Q1|2', 'Q1|2'), ('Q3', 'Q3')], max_length=4, null=True, verbose_name='quadrimester'),
),
migrations.AlterField(
model_name='learningunityear',
name='specific_title',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='official_title_proper_to_UE'),
),
migrations.AlterField(
model_name='learningunityear',
name='specific_title_english',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='official_english_title_proper_to_UE'),
),
migrations.AlterField(
model_name='learningunityear',
name='status',
field=models.BooleanField(default=False, verbose_name='active_title'),
),
migrations.AlterField(
model_name='learningunityear',
name='subtype',
field=models.CharField(choices=[('FULL', 'FULL'), ('PARTIM', 'PARTIM')], default='FULL', max_length=50),
),
]
| uclouvain/osis_louvain | base/migrations/0260_auto_20180416_1839.py | Python | agpl-3.0 | 4,812 |
#!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
# from pyproj import Proj, pj_list, pj_ellps
import cv2
def geo_ref_tracks(tracks, frame, uav, debug=False):
"""
Geo-references tracks'points
:param tracks: list of drifters' trajectories
:param frame: CV2 frame
:param uav: UAV class object
:return: geo-referenced tracks in degrees and tracks relative to center point in meters
"""
# Meter per pixel ratio
# TODO: Lens correction could be needed here
diagLength = 2.0 * np.tan(np.deg2rad(uav.FOV/2.0)) * uav.altitude
nx = float(frame.shape[1])
ny = float(frame.shape[0])
phi = np.arctan(ny / nx)
horiMpP = diagLength * np.cos(phi) / nx # horizontal meters per pixel ratio
vertiMpP = diagLength * np.sin(phi) / ny # vertical meters per pixel ratio.
if uav.yaw < 0.0: # UAV convention
alibi = True
else:
alibi = False
yaw = np.abs(np.deg2rad(uav.yaw))
# Need this before of tuples
tracksInDeg = []
tracksInRelativeM = []
for tr in tracks:
tracksInDeg.append([])
tracksInRelativeM.append([])
# Relative distance
for tr, TR in zip(tracks, tracksInRelativeM):
for pt in tr:
pt = list(pt)
x = (pt[0] - (nx/2.0)) * horiMpP
y = ((ny - pt[1]) - (ny/2.0)) * vertiMpP # Origin frame is top left corner
if alibi:
# Correction with Active (aka Alibi) transformation
xr = x * np.cos(yaw) - y * np.sin(yaw)
yr = x * np.sin(yaw) + y * np.cos(yaw)
else:
# Correction with Passive (aka Alias) transformation
xr = x*np.cos(yaw) + y*np.sin(yaw)
yr = y*np.cos(yaw) - x*np.sin(yaw)
TR.append([xr, yr])
# Conversion deg. to m. / Version 2.0
y2lat = 1.0 / (110.54 * 1000.0)
x2lon = 1.0 / (111.320 * 1000.0 * np.cos(np.deg2rad(uav.centreCoordinates[1])))
lonC, latC = uav.centreCoordinates[0], uav.centreCoordinates[1]
for tr, trM in zip(tracksInDeg, tracksInRelativeM):
for ptM in trM:
lon, lat = lonC + (ptM[0] * x2lon), latC + (ptM[1] * y2lat)
tr.append([lon, lat])
# Conversion deg. to m. / version 1.0
# proj = raw_input("Use default projection UTM/WGS84 (yes/no)?: ").upper()
# if proj in "YES":
# myproj = Proj(proj='utm', ellps='WGS84') # LatLon with WGS84 datum used by GPS units
# else:
# print "Choose a coordinate projection from the following list:"
# for key in pj_list:
# print key + ": " + pj_list[key]
# proj = raw_input("Type in the coordinate projection: ")
# print "Choose a coordinate ellipse from the following list:"
# for key in pj_list:
# print key + ": " + pj_list[key]
# ellps = raw_input("Type in the coordinate ellipse: ")
# myproj = Proj(proj=proj, ellps=ellps)
# xc, yc = myproj(uav.centreCoordinates[0], uav.centreCoordinates[1])
# # Absolute distance and conversion m. to deg.
# for tr, trM in zip(tracksInDeg, tracksInRelativeM):
# for ptM in trM:
# x, y = xc + ptM[0], yc + ptM[1]
# lon, lat = myproj(x, y, inverse=True)
# tr.append([lon, lat])
# # Recompute relative distance in new referential
# tracksInRelativeM = []
# for tr in tracks:
# tracksInRelativeM.append([])
# lat2m = 110.54 * 1000.0
# lon2m = 111.320 * 1000.0 * np.cos(np.deg2rad(uav.centreCoordinates[1]))
# for tr, trM in zip(tracksInDeg, tracksInRelativeM):
# for pt in tr:
# x = lon2m * (pt[0] - uav.centreCoordinates[0])
# y = lat2m * (pt[1] - uav.centreCoordinates[1])
# trM.append([x, y])
return tracksInDeg, tracksInRelativeM
# TODO: def geo_ref_contours
def geo_ref_contours(surfTurbArea, uav, debug=False):
"""
Geo-references surface turbulence areas
:param surfTurbArea: frame of surface turbulence areas
:param uav: UAV object
:return: geo-referenced contours
"""
# Find contours from white areas
imgray = cv2.cvtColor(surfTurbArea,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if debug:
im = cv2.drawContours(surfTurbArea, contours, -1, (0,255,0), 3)
cv2.namedWindow('Areas & contours', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Areas & contours', 1200, 1200)
cv2.imshow('Areas & contours', im)
# Reformating
contoursList = []
for cnt in contours:
coordsList = []
for coords in cnt:
coordsList.append(tuple(coords[0]))
contoursList.append(coordsList)
# Georeference contours
contoursInDeg, contoursInM = geo_ref_tracks(contoursList, surfTurbArea, uav, debug=debug)
return contoursInDeg
| theelectricbrain/Drones-Drifters | drones_n_drifters/georef/geo_referencing.py | Python | agpl-3.0 | 4,964 |
from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import APITestCase, SerializationMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.models import LevelType
from course_discovery.apps.course_metadata.tests.factories import LevelTypeFactory
class LevelTypeViewSetTests(SerializationMixin, APITestCase):
list_path = reverse('api:v1:level_type-list')
def setUp(self):
super().setUp()
self.user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=self.user.username, password=USER_PASSWORD)
def test_authentication(self):
""" Verify the endpoint requires the user to be authenticated. """
response = self.client.get(self.list_path)
assert response.status_code == 200
self.client.logout()
response = self.client.get(self.list_path)
assert response.status_code == 401
def test_list(self):
""" Verify the endpoint returns a list of all program types. """
LevelTypeFactory.create_batch(4)
expected = LevelType.objects.all()
with self.assertNumQueries(6):
response = self.client.get(self.list_path)
assert response.status_code == 200
assert response.data['results'] == self.serialize_level_type(expected, many=True)
def test_retrieve(self):
""" The request should return details for a single level type. """
level_type = LevelTypeFactory()
level_type.set_current_language('en')
level_type.name_t = level_type.name
level_type.save()
url = reverse('api:v1:level_type-detail', kwargs={'name': level_type.name})
print(level_type.__dict__)
with self.assertNumQueries(5):
response = self.client.get(url)
assert response.status_code == 200
assert response.data == self.serialize_level_type(level_type)
| edx/course-discovery | course_discovery/apps/api/v1/tests/test_views/test_level_types.py | Python | agpl-3.0 | 1,985 |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Greeting(models.Model):
when = models.DateTimeField('date created', auto_now_add=True)
class Material(models.Model):
name = models.CharField(max_length=200)
price = models.FloatField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Bunch(models.Model):
user = models.ForeignKey(User)
material = models.ForeignKey(Material)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
image = models.ImageField(upload_to="bunches")
pounds = models.FloatField(blank=True, null=True)
contact = models.CharField(max_length=255)
| Emrals/matter | hello/models.py | Python | agpl-3.0 | 900 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import pygtk
pygtk.require("2.0")
import gtk
from GenericObject import ObjectPageAbstract
from GenericObject import PageFrame
from GenericObject import FrameLabel
from skarphedadmin.gui import IconStock
from skarphedadmin.glue.lng import _
class RolePage(ObjectPageAbstract):
def __init__(self,parent,role):
ObjectPageAbstract.__init__(self,parent,role)
self.roleId = role.getLocalId()
role.fetchPermissions()
self.headline = gtk.Label()
self.pack_start(self.headline,False)
self.info = PageFrame(self,_("Information"), IconStock.ROLE)
self.infobox = gtk.VBox()
self.info.add(self.infobox)
self.pack_start(self.info,False)
self.perm = PageFrame(self,_("Permissions"), IconStock.PERMISSION)
self.permbox = gtk.Table(1,2,False)
self.permbox.set_row_spacings(10)
self.permbox.set_col_spacings(10)
self.permbox.set_border_width(10)
self.perm_permlabel = FrameLabel(self,_("Please choose the Permissions you want to assign to the user here:"), IconStock.PERMISSION)
self.perm_permlistview = gtk.TreeView()
self.perm_permlist = gtk.ListStore(int, str,str)
self.perm_permlistview.set_model(self.perm_permlist)
self.perm_permlist_col_checkbox = gtk.TreeViewColumn('')
self.perm_permlist_col_identifier = gtk.TreeViewColumn(_('Permission Identifier'))
self.perm_permlist_col_name = gtk.TreeViewColumn(_('Permission Name'))
self.perm_permlistview.append_column(self.perm_permlist_col_checkbox)
self.perm_permlistview.append_column(self.perm_permlist_col_identifier)
self.perm_permlistview.append_column(self.perm_permlist_col_name)
self.perm_permlist_renderer_checkbox= gtk.CellRendererToggle()
self.perm_permlist_renderer_identifier = gtk.CellRendererText()
self.perm_permlist_renderer_name = gtk.CellRendererText()
self.perm_permlist_col_checkbox.pack_start(self.perm_permlist_renderer_checkbox)
self.perm_permlist_col_identifier.pack_start(self.perm_permlist_renderer_identifier)
self.perm_permlist_col_name.pack_start(self.perm_permlist_renderer_name)
self.perm_permlist_col_checkbox.add_attribute(self.perm_permlist_renderer_checkbox,'active',0)
self.perm_permlist_col_identifier.add_attribute(self.perm_permlist_renderer_identifier,'text',1)
self.perm_permlist_col_name.add_attribute(self.perm_permlist_renderer_name,'text',2)
self.perm_permlist_renderer_checkbox.set_activatable(True)
self.perm_permlist_renderer_checkbox.connect("toggled",self.toggledRight)
self.permbox.attach(self.perm_permlabel,0,1,0,1)
self.permbox.attach(self.perm_permlistview,0,1,1,2)
self.perm.add(self.permbox)
self.pack_start(self.perm,False)
self.show_all()
self.render()
def render(self):
role = self.getMyObject()
if not role:
return
self.headline.set_markup(_("<b>Edit Role: "+role.getName()+"</b>"))
if role.permissiondata is not None:
self.perm_permlist.clear()
for permission in role.permissiondata:
self.perm_permlist.append((int(permission['granted']),str(permission['right']),''))
def toggledRight(self,renderer = None, path = None):
rowiter = self.perm_permlist.get_iter(path)
perm = self.perm_permlist.get_value(rowiter,1)
val = 1-self.perm_permlist.get_value(rowiter,0)
role = self.getApplication().getLocalObjectById(self.roleId)
if val == 1:
role.assignPermission(perm)
else:
role.removePermission(perm)
| skarphed/skarphed | admin/src/skarphedadmin/gui/skarphed/Role.py | Python | agpl-3.0 | 4,713 |
"""Utilities for working with ID tokens."""
import json
from time import time
from django.conf import settings
from django.utils.functional import cached_property
from jwkest import jwk
from jwkest.jws import JWS
from student.models import UserProfile, anonymous_id_for_user
class JwtBuilder(object):
"""Utility for building JWTs.
Unifies diverse approaches to JWT creation in a single class. This utility defaults to using the system's
JWT configuration.
NOTE: This utility class will allow you to override the signing key and audience claim to support those
clients which still require this. This approach to JWT creation is DEPRECATED. Avoid doing this for new clients.
Arguments:
user (User): User for which to generate the JWT.
Keyword Arguments:
asymmetric (Boolean): Whether the JWT should be signed with this app's private key.
secret (string): Overrides configured JWT secret (signing) key. Unused if an asymmetric signature is requested.
issuer (string): Overrides configured JWT issuer.
"""
def __init__(self, user, asymmetric=False, secret=None, issuer=None):
self.user = user
self.asymmetric = asymmetric
self.secret = secret
self.issuer = issuer
self.jwt_auth = settings.JWT_AUTH
def build_token(self, scopes, expires_in=None, aud=None, additional_claims=None):
"""Returns a JWT access token.
Arguments:
scopes (list): Scopes controlling which optional claims are included in the token.
Keyword Arguments:
expires_in (int): Time to token expiry, specified in seconds.
aud (string): Overrides configured JWT audience claim.
additional_claims (dict): Additional claims to include in the token.
Returns:
str: Encoded JWT
"""
now = int(time())
expires_in = expires_in or self.jwt_auth['JWT_EXPIRATION']
payload = {
# TODO Consider getting rid of this claim since we don't use it.
'aud': aud if aud else self.jwt_auth['JWT_AUDIENCE'],
'exp': now + expires_in,
'iat': now,
'iss': self.issuer if self.issuer else self.jwt_auth['JWT_ISSUER'],
'preferred_username': self.user.username,
'scopes': scopes,
'version': self.jwt_auth['JWT_SUPPORTED_VERSION'],
'sub': anonymous_id_for_user(self.user, None),
}
if additional_claims:
payload.update(additional_claims)
for scope in scopes:
handler = self.claim_handlers.get(scope)
if handler:
handler(payload)
return self.encode(payload)
@cached_property
def claim_handlers(self):
"""Returns a dictionary mapping scopes to methods that will add claims to the JWT payload."""
return {
'email': self.attach_email_claim,
'profile': self.attach_profile_claim
}
def attach_email_claim(self, payload):
"""Add the email claim details to the JWT payload."""
payload['email'] = self.user.email
def attach_profile_claim(self, payload):
"""Add the profile claim details to the JWT payload."""
try:
# Some users (e.g., service users) may not have user profiles.
name = UserProfile.objects.get(user=self.user).name
except UserProfile.DoesNotExist:
name = None
payload.update({
'name': name,
'family_name': self.user.last_name,
'given_name': self.user.first_name,
'administrator': self.user.is_staff,
})
def encode(self, payload):
"""Encode the provided payload."""
keys = jwk.KEYS()
if self.asymmetric:
serialized_keypair = json.loads(self.jwt_auth['JWT_PRIVATE_SIGNING_JWK'])
keys.add(serialized_keypair)
algorithm = self.jwt_auth['JWT_SIGNING_ALGORITHM']
else:
key = self.secret if self.secret else self.jwt_auth['JWT_SECRET_KEY']
keys.add({'key': key, 'kty': 'oct'})
algorithm = self.jwt_auth['JWT_ALGORITHM']
data = json.dumps(payload)
jws = JWS(data, alg=algorithm)
return jws.sign_compact(keys=keys)
| ahmedaljazzar/edx-platform | openedx/core/lib/token_utils.py | Python | agpl-3.0 | 4,328 |
from telecommand import Telecommand
class GetCompileInfoTelecommand(Telecommand):
def __init__(self):
Telecommand.__init__(self)
def apid(self):
return 0x27
def payload(self):
return [] | PW-Sat2/PWSat2OBC | integration_tests/telecommand/compile_info.py | Python | agpl-3.0 | 236 |
"""
The latest version of this package is available at:
<http://github.com/jantman/webhook2lambda2sqs>
################################################################################
Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com>
This file is part of webhook2lambda2sqs, also known as webhook2lambda2sqs.
webhook2lambda2sqs is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
webhook2lambda2sqs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with webhook2lambda2sqs. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/webhook2lambda2sqs> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
VERSION = '0.2.0'
PROJECT_URL = 'https://github.com/jantman/webhook2lambda2sqs'
| jantman/webhook2lambda2sqs | webhook2lambda2sqs/version.py | Python | agpl-3.0 | 1,938 |
from odoo import api, fields, models, tools
class UnrelatedDocumentsReport(models.Model):
_name = "sicon.unrelated_documents.report"
_description = 'Documents not related yet to any concession'
_auto = False
dependence_id = fields.Many2one(comodel_name='tmc.dependence',
readonly=True)
document_type_id = fields.Many2one(comodel_name='tmc.document_type',
readonly=True)
number = fields.Integer(readonly=True)
period = fields.Integer(readonly=True)
document_object = fields.Char(readonly=True)
name = fields.Char(string='Document', readonly=True)
_depends = {
'tmc.document': ['name', 'document_object', 'main_topic_ids'],
'sicon.event': ['document_id']
}
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""
CREATE OR REPLACE VIEW sicon_unrelated_documents_report AS (
SELECT
doc.id,
doc.document_object,
doc.name
FROM (
tmc_document doc
LEFT JOIN document_main_topic_rel rel
ON (rel.tmc_document_id = doc.id)
LEFT JOIN tmc_document_topic doc_topic
ON (rel.tmc_document_topic_id = doc_topic.id)
LEFT JOIN tmc_dependence dep
ON doc.dependence_id = dep.id
LEFT JOIN tmc_document_type doc_type
ON doc.document_type_id = doc_type.id
)
WHERE doc_topic.name = 'Concesiones Generales'
AND doc_type.abbreviation = 'DEC'
AND doc.id NOT IN (
SELECT
document_id
FROM sicon_event e WHERE document_id IS NOT NULL)
ORDER BY doc.period, doc.number
)
""")
| tmcrosario/odoo-sicon | sicon/report/unrelated_documents.py | Python | agpl-3.0 | 2,065 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
class doctor_disability(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(doctor_disability, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'select_type': self.select_type,
'select_age': self.select_age,
'select_diseases': self.select_diseases,
'select_diseases_type': self.select_diseases_type,
'return_street_home': self.return_street_home,
'return_number_phone': self.return_number_phone,
'return_sex': self.return_sex
})
def return_street_home(self, country, state, city):
street = ""
if country:
street += country.title() + " - "
if state:
street += state.title() + " - "
if city:
street += city.title() + " - "
return street[:len(street) -2]
def return_number_phone(self, phone, mobile):
return_phone = ""
if phone:
return_phone += phone + " - "
if mobile:
return_phone += mobile + " - "
return return_phone[:len(return_phone)-2]
def return_sex(self, sex):
if sex == 'm':
return "Masculino"
return "Femenino"
def select_type(self, tipo_usuario):
if tipo_usuario:
tipo = self.pool.get('doctor.tipousuario.regimen').browse(self.cr, self.uid, tipo_usuario).name
else:
tipo= None
return tipo
def select_age(self, age):
context = {}
context.update({'lang' : self.pool.get('res.users').browse(self.cr, self.uid, self.uid, context=context).lang})
attentions = self.pool.get('doctor.attentions')
age_unit = dict(attentions.fields_get(self.cr, self.uid, 'age_unit',context=context).get('age_unit').get('selection')).get(
str(age))
return age_unit
def select_diseases(self, status):
if status== 'presumptive':
return "Impresión Diagnóstica"
if status== 'confirm':
return "Confirmado"
if status== 'recurrent':
return "Recurrente"
return ""
def select_diseases_type(self, diseases_type):
if diseases_type== 'main':
return "Principal"
if diseases_type== 'related':
return "Relacionado"
return ""
report_sxw.report_sxw('report.doctor_disability_half', 'doctor.attentions',
'addons/l10n_co_doctor/report/doctor_disability_half.rml',
parser=doctor_disability, header=False)
| hivam/l10n_co_doctor | report/doctor_disability_half.py | Python | agpl-3.0 | 3,267 |
from annoying.functions import get_object_or_None
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
# Class to permit the authentication using email or username, with case sensitive and insensitive matches.
class CustomBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
UserModel = get_user_model()
case_sensitive = UserModel.objects.filter(Q(username__exact=username) | Q(email__iexact=username)).distinct()
case_insensitive = UserModel.objects.filter(Q(username__iexact=username) | Q(email__iexact=username)).distinct()
user = None
if case_sensitive.exists():
user = case_sensitive.first()
elif case_insensitive.exists():
count = case_insensitive.count()
if count == 1:
user = case_insensitive.first()
if user and user.check_password(password):
return user
return None
def get_user(self, user_id):
return get_object_or_None(get_user_model(), pk=user_id)
| astrobin/astrobin | astrobin/auth.py | Python | agpl-3.0 | 1,134 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('ccx', '0017_auto_20170721_0437'),
]
operations = [
migrations.AlterField(
model_name='customcourseforedx',
name='time',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 21, 6, 10, 51, 471098)),
),
]
| mbareta/edx-platform-ft | lms/djangoapps/ccx/migrations/0018_auto_20170721_0611.py | Python | agpl-3.0 | 472 |
"""Auth pipeline definitions.
Auth pipelines handle the process of authenticating a user. They involve a
consumer system and a provider service. The general pattern is:
1. The consumer system exposes a URL endpoint that starts the process.
2. When a user visits that URL, the client system redirects the user to a
page served by the provider. The user authenticates with the provider.
The provider handles authentication failure however it wants.
3. On success, the provider POSTs to a URL endpoint on the consumer to
invoke the pipeline. It sends back an arbitrary payload of data about
the user.
4. The pipeline begins, executing each function in its stack. The stack is
defined on django's settings object's SOCIAL_AUTH_PIPELINE. This is done
in settings._set_global_settings.
5. Each pipeline function is variadic. Most pipeline functions are part of
the pythons-social-auth library; our extensions are defined below. The
pipeline is the same no matter what provider is used.
6. Pipeline functions can return a dict to add arguments to the function
invoked next. They can return None if this is not necessary.
7. Pipeline functions may be decorated with @partial.partial. This pauses
the pipeline and serializes its state onto the request's session. When
this is done they may redirect to other edX handlers to execute edX
account registration/sign in code.
8. In that code, redirecting to get_complete_url() resumes the pipeline.
This happens by hitting a handler exposed by the consumer system.
9. In this way, execution moves between the provider, the pipeline, and
arbitrary consumer system code.
Gotcha alert!:
Bear in mind that when pausing and resuming a pipeline function decorated with
@partial.partial, execution resumes by re-invoking the decorated function
instead of invoking the next function in the pipeline stack. For example, if
you have a pipeline of
A
B
C
with an implementation of
@partial.partial
def B(*args, **kwargs):
[...]
B will be invoked twice: once when initially proceeding through the pipeline
before it is paused, and once when other code finishes and the pipeline
resumes. Consequently, many decorated functions will first invoke a predicate
to determine if they are in their first or second execution (usually by
checking side-effects from the first run).
This is surprising but important behavior, since it allows a single function in
the pipeline to consolidate all the operations needed to establish invariants
rather than spreading them across two functions in the pipeline.
See http://python-social-auth.readthedocs.io/en/latest/pipeline.html for more docs.
"""
import base64
import hashlib
import hmac
import json
import urllib
from collections import OrderedDict
from logging import getLogger
from smtplib import SMTPException
import analytics
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail.message import EmailMessage
from django.urls import reverse
from django.http import HttpResponseBadRequest
from django.shortcuts import redirect
import social_django
from social_core.exceptions import AuthException
from social_core.pipeline import partial
from social_core.pipeline.social_auth import associate_by_email
import student
from edxmako.shortcuts import render_to_string
from eventtracking import tracker
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from third_party_auth.utils import user_exists
from lms.djangoapps.verify_student.models import SSOVerification
from lms.djangoapps.verify_student.utils import earliest_allowed_verification_date
from . import provider
# These are the query string params you can pass
# to the URL that starts the authentication process.
#
# `AUTH_ENTRY_KEY` is required and indicates how the user
# enters the authentication process.
#
# `AUTH_REDIRECT_KEY` provides an optional URL to redirect
# to upon successful authentication
# (if not provided, defaults to `_SOCIAL_AUTH_LOGIN_REDIRECT_URL`)
AUTH_ENTRY_KEY = 'auth_entry'
AUTH_REDIRECT_KEY = 'next'
# The following are various possible values for the AUTH_ENTRY_KEY.
AUTH_ENTRY_LOGIN = 'login'
AUTH_ENTRY_REGISTER = 'register'
AUTH_ENTRY_ACCOUNT_SETTINGS = 'account_settings'
# Entry modes into the authentication process by a remote API call (as opposed to a browser session).
AUTH_ENTRY_LOGIN_API = 'login_api'
AUTH_ENTRY_REGISTER_API = 'register_api'
# AUTH_ENTRY_CUSTOM: Custom auth entry point for post-auth integrations.
# This should be a dict where the key is a word passed via ?auth_entry=, and the
# value is a dict with an arbitrary 'secret_key' and a 'url'.
# This can be used as an extension point to inject custom behavior into the auth
# process, replacing the registration/login form that would normally be seen
# immediately after the user has authenticated with the third party provider.
# If a custom 'auth_entry' query parameter is used, then once the user has
# authenticated with a specific backend/provider, they will be redirected to the
# URL specified with this setting, rather than to the built-in
# registration/login form/logic.
AUTH_ENTRY_CUSTOM = getattr(settings, 'THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS', {})
def is_api(auth_entry):
"""Returns whether the auth entry point is via an API call."""
return (auth_entry == AUTH_ENTRY_LOGIN_API) or (auth_entry == AUTH_ENTRY_REGISTER_API)
# URLs associated with auth entry points
# These are used to request additional user information
# (for example, account credentials when logging in),
# and when the user cancels the auth process
# (e.g., refusing to grant permission on the provider's login page).
# We don't use "reverse" here because doing so may cause modules
# to load that depend on this module.
AUTH_DISPATCH_URLS = {
AUTH_ENTRY_LOGIN: '/login',
AUTH_ENTRY_REGISTER: '/register',
AUTH_ENTRY_ACCOUNT_SETTINGS: '/account/settings',
}
_AUTH_ENTRY_CHOICES = frozenset([
AUTH_ENTRY_LOGIN,
AUTH_ENTRY_REGISTER,
AUTH_ENTRY_ACCOUNT_SETTINGS,
AUTH_ENTRY_LOGIN_API,
AUTH_ENTRY_REGISTER_API,
] + AUTH_ENTRY_CUSTOM.keys())
logger = getLogger(__name__)
class AuthEntryError(AuthException):
"""Raised when auth_entry is invalid on URLs.
auth_entry tells us whether the auth flow was initiated to register a new
user (in which case it has the value of AUTH_ENTRY_REGISTER) or log in an
existing user (in which case it has the value of AUTH_ENTRY_LOGIN).
This is necessary because the edX code we hook into the pipeline to
redirect to the existing auth flows needs to know what case we are in in
order to format its output correctly (for example, the register code is
invoked earlier than the login code, and it needs to know if the login flow
was requested to dispatch correctly).
"""
class ProviderUserState(object):
"""Object representing the provider state (attached or not) for a user.
This is intended only for use when rendering templates. See for example
lms/templates/dashboard.html.
"""
def __init__(self, enabled_provider, user, association):
# Boolean. Whether the user has an account associated with the provider
self.has_account = association is not None
if self.has_account:
# UserSocialAuth row ID
self.association_id = association.id
# Identifier of this user according to the remote provider:
self.remote_id = enabled_provider.get_remote_id_from_social_auth(association)
else:
self.association_id = None
self.remote_id = None
# provider.BaseProvider child. Callers must verify that the provider is
# enabled.
self.provider = enabled_provider
# django.contrib.auth.models.User.
self.user = user
def get_unlink_form_name(self):
"""Gets the name used in HTML forms that unlink a provider account."""
return self.provider.provider_id + '_unlink_form'
def get(request):
"""Gets the running pipeline's data from the passed request."""
strategy = social_django.utils.load_strategy(request)
token = strategy.session_get('partial_pipeline_token')
partial_object = strategy.partial_load(token)
pipeline_data = None
if partial_object:
pipeline_data = {'kwargs': partial_object.kwargs, 'backend': partial_object.backend}
return pipeline_data
def get_real_social_auth_object(request):
"""
At times, the pipeline will have a "social" kwarg that contains a dictionary
rather than an actual DB-backed UserSocialAuth object. We need the real thing,
so this method allows us to get that by passing in the relevant request.
"""
running_pipeline = get(request)
if running_pipeline and 'social' in running_pipeline['kwargs']:
social = running_pipeline['kwargs']['social']
if isinstance(social, dict):
social = social_django.models.UserSocialAuth.objects.get(**social)
return social
def quarantine_session(request, locations):
"""
Set a session variable indicating that the session is restricted
to being used in views contained in the modules listed by string
in the `locations` argument.
Example: ``quarantine_session(request, ('enterprise.views',))``
"""
request.session['third_party_auth_quarantined_modules'] = locations
def lift_quarantine(request):
"""
Remove the session quarantine variable.
"""
request.session.pop('third_party_auth_quarantined_modules', None)
def get_authenticated_user(auth_provider, username, uid):
"""Gets a saved user authenticated by a particular backend.
Between pipeline steps User objects are not saved. We need to reconstitute
the user and set its .backend, which is ordinarily monkey-patched on by
Django during authenticate(), so it will function like a user returned by
authenticate().
Args:
auth_provider: the third_party_auth provider in use for the current pipeline.
username: string. Username of user to get.
uid: string. The user ID according to the third party.
Returns:
User if user is found and has a social auth from the passed
provider.
Raises:
User.DoesNotExist: if no user matching user is found, or the matching
user has no social auth associated with the given backend.
AssertionError: if the user is not authenticated.
"""
match = social_django.models.DjangoStorage.user.get_social_auth(provider=auth_provider.backend_name, uid=uid)
if not match or match.user.username != username:
raise User.DoesNotExist
user = match.user
user.backend = auth_provider.get_authentication_backend()
return user
def _get_enabled_provider(provider_id):
"""Gets an enabled provider by its provider_id member or throws."""
enabled_provider = provider.Registry.get(provider_id)
if not enabled_provider:
raise ValueError('Provider %s not enabled' % provider_id)
return enabled_provider
def _get_url(view_name, backend_name, auth_entry=None, redirect_url=None,
extra_params=None, url_params=None):
"""Creates a URL to hook into social auth endpoints."""
url_params = url_params or {}
url_params['backend'] = backend_name
url = reverse(view_name, kwargs=url_params)
query_params = OrderedDict()
if auth_entry:
query_params[AUTH_ENTRY_KEY] = auth_entry
if redirect_url:
query_params[AUTH_REDIRECT_KEY] = redirect_url
if extra_params:
query_params.update(extra_params)
return u"{url}?{params}".format(
url=url,
params=urllib.urlencode(query_params)
)
def get_complete_url(backend_name):
"""Gets URL for the endpoint that returns control to the auth pipeline.
Args:
backend_name: string. Name of the python-social-auth backend from the
currently-running pipeline.
Returns:
String. URL that finishes the auth pipeline for a provider.
Raises:
ValueError: if no provider is enabled with the given backend_name.
"""
if not any(provider.Registry.get_enabled_by_backend_name(backend_name)):
raise ValueError('Provider with backend %s not enabled' % backend_name)
return _get_url('social:complete', backend_name)
def get_disconnect_url(provider_id, association_id):
"""Gets URL for the endpoint that starts the disconnect pipeline.
Args:
provider_id: string identifier of the social_django.models.ProviderConfig child you want
to disconnect from.
association_id: int. Optional ID of a specific row in the UserSocialAuth
table to disconnect (useful if multiple providers use a common backend)
Returns:
String. URL that starts the disconnection pipeline.
Raises:
ValueError: if no provider is enabled with the given ID.
"""
backend_name = _get_enabled_provider(provider_id).backend_name
if association_id:
return _get_url('social:disconnect_individual', backend_name, url_params={'association_id': association_id})
else:
return _get_url('social:disconnect', backend_name)
def get_login_url(provider_id, auth_entry, redirect_url=None):
"""Gets the login URL for the endpoint that kicks off auth with a provider.
Args:
provider_id: string identifier of the social_django.models.ProviderConfig child you want
to disconnect from.
auth_entry: string. Query argument specifying the desired entry point
for the auth pipeline. Used by the pipeline for later branching.
Must be one of _AUTH_ENTRY_CHOICES.
Keyword Args:
redirect_url (string): If provided, redirect to this URL at the end
of the authentication process.
Returns:
String. URL that starts the auth pipeline for a provider.
Raises:
ValueError: if no provider is enabled with the given provider_id.
"""
assert auth_entry in _AUTH_ENTRY_CHOICES
enabled_provider = _get_enabled_provider(provider_id)
return _get_url(
'social:begin',
enabled_provider.backend_name,
auth_entry=auth_entry,
redirect_url=redirect_url,
extra_params=enabled_provider.get_url_params(),
)
def get_duplicate_provider(messages):
"""Gets provider from message about social account already in use.
python-social-auth's exception middleware uses the messages module to
record details about duplicate account associations. It records exactly one
message there is a request to associate a social account S with an edX
account E if S is already associated with an edX account E'.
This messaging approach is stringly-typed and the particular string is
unfortunately not in a reusable constant.
Returns:
string name of the python-social-auth backend that has the duplicate
account, or None if there is no duplicate (and hence no error).
"""
social_auth_messages = [m for m in messages if m.message.endswith('is already in use.')]
if not social_auth_messages:
return
assert len(social_auth_messages) == 1
backend_name = social_auth_messages[0].extra_tags.split()[1]
return backend_name
def get_provider_user_states(user):
"""Gets list of states of provider-user combinations.
Args:
django.contrib.auth.User. The user to get states for.
Returns:
List of ProviderUserState. The list of states of a user's account with
each enabled provider.
"""
states = []
found_user_auths = list(social_django.models.DjangoStorage.user.get_social_auth_for_user(user))
for enabled_provider in provider.Registry.enabled():
association = None
for auth in found_user_auths:
if enabled_provider.match_social_auth(auth):
association = auth
break
if enabled_provider.accepts_logins or association:
states.append(
ProviderUserState(enabled_provider, user, association)
)
return states
def running(request):
"""Returns True iff request is running a third-party auth pipeline."""
return get(request) is not None # Avoid False for {}.
# Pipeline functions.
# Signatures are set by python-social-auth; prepending 'unused_' causes
# TypeError on dispatch to the auth backend's authenticate().
# pylint: disable=unused-argument
def parse_query_params(strategy, response, *args, **kwargs):
"""Reads whitelisted query params, transforms them into pipeline args."""
# If auth_entry is not in the session, we got here by a non-standard workflow.
# We simply assume 'login' in that case.
auth_entry = strategy.request.session.get(AUTH_ENTRY_KEY, AUTH_ENTRY_LOGIN)
if auth_entry not in _AUTH_ENTRY_CHOICES:
raise AuthEntryError(strategy.request.backend, 'auth_entry invalid')
return {'auth_entry': auth_entry}
def set_pipeline_timeout(strategy, user, *args, **kwargs):
"""
Set a short session timeout while the pipeline runs, to improve security.
Consider the following attack:
1. Attacker on a public computer visits edX and initiates the third-party login flow
2. Attacker logs into their own third-party account
3. Attacker closes the window and does not complete the login flow
4. Victim on the same computer logs into edX with username/password
5. edX links attacker's third-party account with victim's edX account
6. Attacker logs into victim's edX account using attacker's own third-party account
We have two features of the pipeline designed to prevent this attack:
* This method shortens the Django session timeout during the pipeline. This should mean that
if there is a reasonable delay between steps 3 and 4, the session and pipeline will be
reset, and the attack foiled.
Configure the timeout with the SOCIAL_AUTH_PIPELINE_TIMEOUT setting (Default: 600 seconds)
* On step 4, the login page displays an obvious message to the user, saying "You've
successfully signed into (Google), but your (Google) account isn't linked with an edX
account. To link your accounts, login now using your edX password.".
"""
if strategy.request and not user: # If user is set, we're currently logged in (and/or linked) so it doesn't matter.
strategy.request.session.set_expiry(strategy.setting('PIPELINE_TIMEOUT', 600))
# We don't need to reset this timeout later. Because the user is not logged in and this
# account is not yet linked to an edX account, either the normal 'login' or 'register'
# code must occur during the subsequent ensure_user_information step, and those methods
# will change the session timeout to the "normal" value according to the "Remember Me"
# choice of the user.
def redirect_to_custom_form(request, auth_entry, details, kwargs):
"""
If auth_entry is found in AUTH_ENTRY_CUSTOM, this is used to send provider
data to an external server's registration/login page.
The data is sent as a base64-encoded values in a POST request and includes
a cryptographic checksum in case the integrity of the data is important.
"""
backend_name = request.backend.name
provider_id = provider.Registry.get_from_pipeline({'backend': backend_name, 'kwargs': kwargs}).provider_id
form_info = AUTH_ENTRY_CUSTOM[auth_entry]
secret_key = form_info['secret_key']
if isinstance(secret_key, unicode):
secret_key = secret_key.encode('utf-8')
custom_form_url = form_info['url']
data_str = json.dumps({
"auth_entry": auth_entry,
"backend_name": backend_name,
"provider_id": provider_id,
"user_details": details,
})
digest = hmac.new(secret_key, msg=data_str, digestmod=hashlib.sha256).digest()
# Store the data in the session temporarily, then redirect to a page that will POST it to
# the custom login/register page.
request.session['tpa_custom_auth_entry_data'] = {
'data': base64.b64encode(data_str),
'hmac': base64.b64encode(digest),
'post_url': custom_form_url,
}
return redirect(reverse('tpa_post_to_custom_auth_form'))
@partial.partial
def ensure_user_information(strategy, auth_entry, backend=None, user=None, social=None, current_partial=None,
allow_inactive_user=False, details=None, *args, **kwargs):
"""
Ensure that we have the necessary information about a user (either an
existing account or registration data) to proceed with the pipeline.
"""
# We're deliberately verbose here to make it clear what the intended
# dispatch behavior is for the various pipeline entry points, given the
# current state of the pipeline. Keep in mind the pipeline is re-entrant
# and values will change on repeated invocations (for example, the first
# time through the login flow the user will be None so we dispatch to the
# login form; the second time it will have a value so we continue to the
# next pipeline step directly).
#
# It is important that we always execute the entire pipeline. Even if
# behavior appears correct without executing a step, it means important
# invariants have been violated and future misbehavior is likely.
def dispatch_to_login():
"""Redirects to the login page."""
return redirect(AUTH_DISPATCH_URLS[AUTH_ENTRY_LOGIN])
def dispatch_to_register():
"""Redirects to the registration page."""
return redirect(AUTH_DISPATCH_URLS[AUTH_ENTRY_REGISTER])
def should_force_account_creation():
""" For some third party providers, we auto-create user accounts """
current_provider = provider.Registry.get_from_pipeline({'backend': current_partial.backend, 'kwargs': kwargs})
return (current_provider and
(current_provider.skip_email_verification or current_provider.send_to_registration_first))
if not user:
if user_exists(details or {}):
# User has not already authenticated and the details sent over from
# identity provider belong to an existing user.
return dispatch_to_login()
if is_api(auth_entry):
return HttpResponseBadRequest()
elif auth_entry == AUTH_ENTRY_LOGIN:
# User has authenticated with the third party provider but we don't know which edX
# account corresponds to them yet, if any.
if should_force_account_creation():
return dispatch_to_register()
return dispatch_to_login()
elif auth_entry == AUTH_ENTRY_REGISTER:
# User has authenticated with the third party provider and now wants to finish
# creating their edX account.
return dispatch_to_register()
elif auth_entry == AUTH_ENTRY_ACCOUNT_SETTINGS:
raise AuthEntryError(backend, 'auth_entry is wrong. Settings requires a user.')
elif auth_entry in AUTH_ENTRY_CUSTOM:
# Pass the username, email, etc. via query params to the custom entry page:
return redirect_to_custom_form(strategy.request, auth_entry, details or {}, kwargs)
else:
raise AuthEntryError(backend, 'auth_entry invalid')
if not user.is_active:
# The user account has not been verified yet.
if allow_inactive_user:
# This parameter is used by the auth_exchange app, which always allows users to
# login, whether or not their account is validated.
pass
elif social is None:
# The user has just registered a new account as part of this pipeline. Their account
# is inactive but we allow the login to continue, because if we pause again to force
# the user to activate their account via email, the pipeline may get lost (e.g.
# email takes too long to arrive, user opens the activation email on a different
# device, etc.). This is consistent with first party auth and ensures that the
# pipeline completes fully, which is critical.
pass
else:
# This is an existing account, linked to a third party provider but not activated.
# Double-check these criteria:
assert user is not None
assert social is not None
# We now also allow them to login again, because if they had entered their email
# incorrectly then there would be no way for them to recover the account, nor
# register anew via SSO. See SOL-1324 in JIRA.
# However, we will log a warning for this case:
logger.warning(
'User "%s" is using third_party_auth to login but has not yet activated their account. ',
user.username
)
@partial.partial
def set_logged_in_cookies(backend=None, user=None, strategy=None, auth_entry=None, current_partial=None,
*args, **kwargs):
"""This pipeline step sets the "logged in" cookie for authenticated users.
Some installations have a marketing site front-end separate from
edx-platform. Those installations sometimes display different
information for logged in versus anonymous users (e.g. a link
to the student dashboard instead of the login page.)
Since social auth uses Django's native `login()` method, it bypasses
our usual login view that sets this cookie. For this reason, we need
to set the cookie ourselves within the pipeline.
The procedure for doing this is a little strange. On the one hand,
we need to send a response to the user in order to set the cookie.
On the other hand, we don't want to drop the user out of the pipeline.
For this reason, we send a redirect back to the "complete" URL,
so users immediately re-enter the pipeline. The redirect response
contains a header that sets the logged in cookie.
If the user is not logged in, or the logged in cookie is already set,
the function returns `None`, indicating that control should pass
to the next pipeline step.
"""
if not is_api(auth_entry) and user is not None and user.is_authenticated:
request = strategy.request if strategy else None
# n.b. for new users, user.is_active may be False at this point; set the cookie anyways.
if request is not None:
# Check that the cookie isn't already set.
# This ensures that we allow the user to continue to the next
# pipeline step once he/she has the cookie set by this step.
has_cookie = student.cookies.is_logged_in_cookie_set(request)
if not has_cookie:
try:
redirect_url = get_complete_url(current_partial.backend)
except ValueError:
# If for some reason we can't get the URL, just skip this step
# This may be overly paranoid, but it's far more important that
# the user log in successfully than that the cookie is set.
pass
else:
response = redirect(redirect_url)
return student.cookies.set_logged_in_cookies(request, response, user)
@partial.partial
def login_analytics(strategy, auth_entry, current_partial=None, *args, **kwargs):
""" Sends login info to Segment """
event_name = None
if auth_entry == AUTH_ENTRY_LOGIN:
event_name = 'edx.bi.user.account.authenticated'
elif auth_entry in [AUTH_ENTRY_ACCOUNT_SETTINGS]:
event_name = 'edx.bi.user.account.linked'
if event_name is not None and hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
kwargs['user'].id,
event_name,
{
'category': "conversion",
'label': None,
'provider': kwargs['backend'].name
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@partial.partial
def associate_by_email_if_login_api(auth_entry, backend, details, user, current_partial=None, *args, **kwargs):
"""
This pipeline step associates the current social auth with the user with the
same email address in the database. It defers to the social library's associate_by_email
implementation, which verifies that only a single database user is associated with the email.
This association is done ONLY if the user entered the pipeline through a LOGIN API.
"""
association_response = associate_by_email(backend, details, user, *args, **kwargs)
if (
association_response and
association_response.get('user') and
association_response['user'].is_active
):
# Only return the user matched by email if their email has been activated.
# Otherwise, an illegitimate user can create an account with another user's
# email address and the legitimate user would now login to the illegitimate
# account.
return association_response
def user_details_force_sync(auth_entry, strategy, details, user=None, *args, **kwargs):
"""
Update normally protected user details using data from provider.
This step in the pipeline is akin to `social_core.pipeline.user.user_details`, which updates
the user details but has an unconfigurable protection over updating the username & email, and
is unable to update information such as the user's full name which isn't on the user model, but
rather on the user profile model.
Additionally, because the email field is normally used to log in, if the email is changed by this
forced synchronization, we send an email to both the old and new emails, letting the user know.
This step is controlled by the `sync_learner_profile_data` flag on the provider's configuration.
"""
current_provider = provider.Registry.get_from_pipeline({'backend': strategy.request.backend.name, 'kwargs': kwargs})
if user and current_provider.sync_learner_profile_data:
# Keep track of which incoming values get applied.
changed = {}
# Map each incoming field from the provider to the name on the user model (by default, they always match).
field_mapping = {field: (user, field) for field in details.keys() if hasattr(user, field)}
# This is a special case where the field mapping should go to the user profile object and not the user object,
# in some cases with differing field names (i.e. 'fullname' vs. 'name').
field_mapping.update({
'fullname': (user.profile, 'name'),
'country': (user.profile, 'country'),
})
# Remove username from list of fields for update
field_mapping.pop('username', None)
# Track any fields that would raise an integrity error if there was a conflict.
integrity_conflict_fields = {'email': user.email, 'username': user.username}
for provider_field, (model, field) in field_mapping.items():
provider_value = details.get(provider_field)
current_value = getattr(model, field)
if provider_value is not None and current_value != provider_value:
if field in integrity_conflict_fields and User.objects.filter(**{field: provider_value}).exists():
logger.warning('User with ID [%s] tried to synchronize profile data through [%s] '
'but there was a conflict with an existing [%s]: [%s].',
user.id, current_provider.name, field, provider_value)
continue
changed[provider_field] = current_value
setattr(model, field, provider_value)
if changed:
logger.info(
"User [%s] performed SSO through [%s] who synchronizes profile data, and the "
"following fields were changed: %s", user.username, current_provider.name, changed.keys(),
)
# Save changes to user and user.profile models.
strategy.storage.user.changed(user)
user.profile.save()
# Send an email to the old and new email to alert the user that their login email changed.
if changed.get('email'):
old_email = changed['email']
new_email = user.email
email_context = {'old_email': old_email, 'new_email': new_email}
# Subjects shouldn't have new lines.
subject = ''.join(render_to_string(
'emails/sync_learner_profile_data_email_change_subject.txt',
email_context
).splitlines())
body = render_to_string('emails/sync_learner_profile_data_email_change_body.txt', email_context)
from_email = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
email = EmailMessage(subject=subject, body=body, from_email=from_email, to=[old_email, new_email])
email.content_subtype = "html"
try:
email.send()
except SMTPException:
logger.exception('Error sending IdP learner data sync-initiated email change '
'notification email for user [%s].', user.username)
def set_id_verification_status(auth_entry, strategy, details, user=None, *args, **kwargs):
"""
Use the user's authentication with the provider, if configured, as evidence of their identity being verified.
"""
current_provider = provider.Registry.get_from_pipeline({'backend': strategy.request.backend.name, 'kwargs': kwargs})
if user and current_provider.enable_sso_id_verification:
# Get previous valid, non expired verification attempts for this SSO Provider and user
verifications = SSOVerification.objects.filter(
user=user,
status="approved",
created_at__gte=earliest_allowed_verification_date(),
identity_provider_type=current_provider.full_class_name,
identity_provider_slug=current_provider.slug,
)
# If there is none, create a new approved verification for the user.
if not verifications:
SSOVerification.objects.create(
user=user,
status="approved",
name=user.profile.name,
identity_provider_type=current_provider.full_class_name,
identity_provider_slug=current_provider.slug,
)
| CredoReference/edx-platform | common/djangoapps/third_party_auth/pipeline.py | Python | agpl-3.0 | 35,110 |
#!python3.3
# -*- coding: utf-8 -*-
"""
.. module:: examples.benchmarks
:platform: Agnostic, Windows
:synopsis: Full suite of benchmarks
Created on 10/08/2013
"""
def standard_iges_setup(system, filename):
system.StartSection.Prolog = " "
system.GlobalSection.IntegerBits = int(32)
system.GlobalSection.SPMagnitude = int(38)
system.GlobalSection.SPSignificance = int(6)
system.GlobalSection.DPMagnitude = int(38)
system.GlobalSection.DPSignificance = int(15)
system.GlobalSection.MaxNumberLineWeightGrads = int(8)
system.GlobalSection.WidthMaxLineWeightUnits = float(0.016)
system.GlobalSection.MaxCoordValue = float(71)
index_dot = filename.index('.')
system.GlobalSection.ProductIdentificationFromSender = filename[:index_dot]
system.GlobalSection.FileName = filename
system.GlobalSection.ProductIdentificationForReceiver = \
system.GlobalSection.ProductIdentificationFromSender
system.GlobalSection.AuthorOrg = "Queensland Uni. of Tech."
system.GlobalSection.NameOfAuthor = "Rodney Persky"
| mdecourse/2016fallcadp | data/pyIGES/docs/examples/benchmarks/__init__.py | Python | agpl-3.0 | 1,103 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-06 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NewsItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('content', models.TextField()),
],
),
]
| nmunro/azathoth | news/migrations/0001_initial.py | Python | agpl-3.0 | 603 |
Subsets and Splits