repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mjenrungrot/algorithm | Kattis/collatz.py | 2 | 1179 | """
Problem: collatz
Link: https://open.kattis.com/problems/collatz
Source: Kattis / CTU Open 2011
"""
while True:
A, B = list(map(int, input().split()))
if A == 0 and B == 0: break
if A == B:
print("{:} needs 0 steps, {:} needs 0 steps, they meet at {:}".format(A, B, A))
continue
origA = A
origB = B
mapA = dict()
mapB = dict()
mapA[A] = 0
mapB[B] = 0
stepA = 0
stepB = 0
while True:
if A != 1:
if A % 2 == 0:
A = A // 2
else:
A = 3*A + 1
if A not in mapA:
stepA += 1
mapA[A] = stepA
if B != 1:
if B % 2 == 0:
B = B // 2
else:
B = 3*B + 1
if B not in mapB:
stepB += 1
mapB[B] = stepB
if A in mapB:
print("{:} needs {:} steps, {:} needs {:} steps, they meet at {:}".format(origA, stepA, origB, mapB[A], A))
break
if B in mapA:
print("{:} needs {:} steps, {:} needs {:} steps, they meet at {:}".format(origA, mapA[B], origB, stepB, B))
break
| mit | 1,773,556,188,656,078,800 | 22.117647 | 119 | 0.412214 | false |
SimenB/thefuck | tests/rules/test_terraform_init.py | 3 | 1455 | import pytest
from thefuck.rules.terraform_init import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('script, output', [
('terraform plan', 'Error: Initialization required. '
'Please see the error message above.'),
('terraform plan', 'This module is not yet installed. Run "terraform init" '
'to install all modules required by this configuration.'),
('terraform apply', 'Error: Initialization required. '
'Please see the error message above.'),
('terraform apply', 'This module is not yet installed. Run "terraform init" '
'to install all modules required by this configuration.')])
def test_match(script, output):
assert match(Command(script, output))
@pytest.mark.parametrize('script, output', [
('terraform --version', 'Terraform v0.12.2'),
('terraform plan', 'No changes. Infrastructure is up-to-date.'),
('terraform apply', 'Apply complete! Resources: 0 added, 0 changed, 0 destroyed.'),
])
def test_not_match(script, output):
assert not match(Command(script, output=output))
@pytest.mark.parametrize('command, new_command', [
(Command('terraform plan', ''), 'terraform init && terraform plan'),
(Command('terraform apply', ''), 'terraform init && terraform apply'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| mit | -2,430,255,138,444,243,000 | 43.090909 | 87 | 0.66323 | false |
vankesteren/jasp-desktop | Tools/modScripts/translationSplitter.py | 6 | 7635 | #!/usr/bin/python
import os
import sys
import regex
from collections import namedtuple
from enum import Enum
if len(sys.argv) < 4:
print("Usage: python translationSplitter.py toBeTranslated.po alreadyTranslated.po { po-file-folder | po-file+ }")
exit(1)
keepTalking = False
toBeTranslatedFilename = sys.argv[1]
alreadyTranslatedFilename = sys.argv[2]
poFiles = []
#collect the po files we need to check
if len(sys.argv) == 4: #Maybe we've got a po-file-folder here
possibleFolder = sys.argv[3]
if os.path.isfile(possibleFolder):
poFiles.append(possibleFolder)
elif os.path.isdir(possibleFolder):
#print("possibleFolder: " + possibleFolder)
for entry in os.listdir(possibleFolder):
entryFull = possibleFolder + "/" + entry
if os.path.isfile(entryFull):
poFiles.append(entryFull)
else:
for i in range(3, len(sys.argv)):
poFiles.append(sys.argv[i])
print(poFiles)
checkUs = poFiles
poFiles = []
for poFile in checkUs:
if not(poFile.endswith(".po")):
print("poFile '" + poFile + "' does not end in .po, we will not use it!")
else:
poFiles.append(poFile)
if len(poFiles) == 0:
print("You didn't specify any *.po files (or the folder you specified didn't contain any")
exit(2)
class parseState(Enum):
LIMBO = 1
MSGID = 2
MSGID_PLURAL = 3
MSGSTRS = 4
MSGCTXT = 5
msgFactory = namedtuple('msg', ['msgid', 'msgid_plural', 'msgstrs', 'msgctxt', 'comments'])
msgsToDo = dict()
msgsDone = dict()
msgid = ""
msgid_plural = ""
msgstrs = []
msgstate = parseState.LIMBO
msgctxt = ""
comments = []
def resetMsgVars():
if keepTalking:
print("Looking for new msg")
global msgid
global msgid_plural
global msgstrs
global msgstate
global msgctxt
msgctxt = ""
msgid = ""
msgid_plural = ""
msgstrs = []
msgstate = parseState.LIMBO
parseMsgstr = regex.compile(r"""\s*
msgstr(\[\d+\])? #msgstr possibly with [#] behind it
\s+(\"[^\"]*\")
""", regex.VERBOSE | regex.MULTILINE)
toDoDone = 0
def storeMsg():
global msgsDone
global msgsToDo
global toDoDone
global msgid
global msgid_plural
global msgstrs
global msgctxt
global comments
translatorFilledAll = True
curMsg = msgFactory(msgid = msgid, msgid_plural=msgid_plural, msgstrs=msgstrs, msgctxt=msgctxt, comments=comments)
comments = [] #This is cleared here to make sure we get any comments leading up to the next one
if keepTalking:
print("---------------------------------------------------\nStore msg: " + str(curMsg) + "---------------------------------------------------")
for msgstr in msgstrs:
m = parseMsgstr.match(msgstr)
if m:
cap = m.captures(2)[0]
if keepTalking:
print("For msgstr '" + msgstr + "' I find cap: '" + str(cap) + "' and length = " + str(len(cap)))
if len(cap) == 2: #apparently this one is empty
translatorFilledAll = False
else:
print("Couldnt parse msgstr '" + msgstr + "' for msgid '" + msgid + "' aborting!")
exit(3)
if translatorFilledAll:
if msgid in msgsDone:
print("msg was filled in twice, msgid doubled:" + msgid + " overwriting it (ctxt:"+msgsDone[msgid].msgctxt+" and msgstrs:" + str(msgsDone[msgid].msgstrs) + ") and keeping the last one (ctxt:" + msgctxt + " and msgstrs: " + str(msgstrs) + ").")
oldComments = msgsDone[msgid].comments
for comment in comments:
oldComments.append(comment)
comments = oldComments
if msgid in msgsToDo: #Ok, it is also in the ToDo list, so we can remove it from there now, but keep the comments
oldComments = msgsToDo[msgid].comments
for comment in comments:
oldComments.append(comment)
comments = oldComments
#We should keep the context for the one that is not filled in, because it probably is better!
if msgsToDo[msgid].msgctxt != "":
if msgctxt == "":
comments.append("#Context for translated wasn't present")
else:
comments.append("#Context for translated: " + msgctxt)
msgctxt = msgsToDo[msgid].msgctxt
print("Using context from empty one (" + msgctxt + ")")
del msgsToDo[msgid]
toDoDone = toDoDone + 1
msgsDone[msgid] = curMsg
else:
if keepTalking:
print("Not filled in...")
if msgid in msgsDone:
toDoDone = toDoDone + 1
else:
msgsToDo[msgid] = curMsg
def printParseLine(line):
if keepTalking:
print("State: " + str(msgstate) + " and line: " + line)
def parseLineLimbo(line):
global msgid
global msgstate
global msgctxt
printParseLine(line)
if line == "": #Boring but fine I guess?
return
if line.startswith("msgctxt"): #Great!
resetMsgVars()
msgctxt = line
msgstate = parseState.MSGCTXT
elif line.startswith("msgid"): #Also great!
resetMsgVars()
msgid = line
msgstate = parseState.MSGID
def parseLineMsgCtxt(line):
global msgstate
global msgid
global msgctxt
printParseLine(line)
if line.startswith('"'):
msgctxt += "\n"
msgctxt += line
elif line.startswith("msgid"): #Great!
msgid = line
msgstate = parseState.MSGID
else:
print("Expected a msgid after msgctxt, but didn't get it!")
exit(5)
def parseLineMsgid(line):
global msgstate
global msgid
global msgid_plural
printParseLine(line)
if line.startswith('"'):
msgid += "\n"
msgid += line
elif line.startswith("msgid_plural"):
msgid_plural = line
msgstate = parseState.MSGID_PLURAL
elif line.startswith("msgstr"):
msgstrs.append(line)
msgstate = parseState.MSGSTRS
def parseLineMsgidPlural(line):
global msgstate
global msgid_plural
printParseLine(line)
if line.startswith('"'):
msgid_plural += "\n"
msgid_plural += line
elif line.startswith("msgstr"):
msgstrs.append(line)
msgstate = parseState.MSGSTRS
def parseLineMsgStrs(line):
global msgstate
global msgstrs
printParseLine(line)
if line.startswith('"'):
msgstrs[len(msgstrs) - 1] += "\n"
msgstrs[len(msgstrs) - 1] += line
elif line.startswith("msgstr"):
msgstrs.append(line)
elif line.startswith("msgid") or line.startswith("msgctxt"):
msgstate = parseState.LIMBO
storeMsg()
parseLineLimbo(line)
elif line == "": #I guess empty line means we are done with this msg?
msgstate = parseState.LIMBO
storeMsg()
else:
print("I am totally confused with this file, I was expecting something like 'msgstr' but got: " + line)
exit(4)
parseSwitch = {
parseState.LIMBO : parseLineLimbo,
parseState.MSGID : parseLineMsgid,
parseState.MSGID_PLURAL : parseLineMsgidPlural,
parseState.MSGSTRS : parseLineMsgStrs,
parseState.MSGCTXT : parseLineMsgCtxt
}
def parsePoFile(poFilename):
poFile = open(poFilename)
poLines = poFile.readlines()
for poLine in poLines:
stripped = poLine.strip()
if poLine.startswith("#"):
comments.append(poLine)
else:
parseSwitch[msgstate](poLine.strip())
print("Start parsing")
for poFileName in poFiles:
parsePoFile(poFileName)
print("Im done parsing!")
def writeMsgsToFile(msgs, fileName):
outFile = open(fileName, "w")
for msgKey in msgs:
msg = msgs[msgKey]
msgid = msg.msgid
msgid_plural = msg.msgid_plural
msgstrs = msg.msgstrs
msgctxt = msg.msgctxt
comments = msg.comments
for comment in comments:
outFile.write(comment + "\n")
if msgctxt != "":
outFile.write(msgctxt + "\n")
outFile.write(msgid + "\n")
if msgid_plural != "":
outFile.write(msgid_plural)
outFile.write("\n")
for msgstr in msgstrs:
outFile.write(msgstr)
outFile.write("\n")
outFile.write("\n")
writeMsgsToFile(msgsDone, alreadyTranslatedFilename)
writeMsgsToFile(msgsToDo, toBeTranslatedFilename)
print("Files written")
print("Of the non-translated msgs there were #" + str(toDoDone) + " that turned out to be translated somewhere already!") | agpl-3.0 | -3,025,433,825,465,988,000 | 23.24127 | 246 | 0.682515 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_5_0/device_group_defn_broker.py | 16 | 123011 | from ..broker import Broker
class DeviceGroupDefnBroker(Broker):
controller = "device_group_defns"
def index(self, **kwargs):
"""Lists the available device group defns. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupName: The device group name, as specified by the user.
:type GroupName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupName: The device group name, as specified by the user.
:type GroupName: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` GroupID
:param sort: The data field(s) to use for sorting the output. Default is GroupID. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupDefn. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defns: An array of the DeviceGroupDefn objects that match the specified input criteria.
:rtype device_group_defns: Array of DeviceGroupDefn
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified device group defn.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defn: The device group defn identified by the specified GroupID.
:rtype device_group_defn: DeviceGroupDefn
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available device group defns matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ARPCacheRefreshInd: A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection.
:type ARPCacheRefreshInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ARPCacheRefreshInd: A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection.
:type ARPCacheRefreshInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param AdvancedGroupInd: A flag indicating whether this group is an advanced group.
:type AdvancedGroupInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param AdvancedGroupInd: A flag indicating whether this group is an advanced group.
:type AdvancedGroupInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BlackoutDuration: The blackout duration in minutes.
:type BlackoutDuration: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BlackoutDuration: The blackout duration in minutes.
:type BlackoutDuration: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CCSCollection: A flag indicating whether job execution is enabled against this group.
:type CCSCollection: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CCSCollection: A flag indicating whether job execution is enabled against this group.
:type CCSCollection: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CLIPolling: A flag indicating whether this group should be polled via the command line interface.
:type CLIPolling: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CLIPolling: A flag indicating whether this group should be polled via the command line interface.
:type CLIPolling: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ConfigLocked: Indicates whether configuration changes within this group are considered authorized or unauthorized.
:type ConfigLocked: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigLocked: Indicates whether configuration changes within this group are considered authorized or unauthorized.
:type ConfigLocked: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ConfigPolling: A flag indicating whether configuration file collection is enabled for this group.
:type ConfigPolling: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigPolling: A flag indicating whether configuration file collection is enabled for this group.
:type ConfigPolling: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Criteria: The criteria used to place members within the group.
:type Criteria: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Criteria: The criteria used to place members within the group.
:type Criteria: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FingerPrint: A flag indicating whether network fingerprinting should be performed on this group.
:type FingerPrint: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FingerPrint: A flag indicating whether network fingerprinting should be performed on this group.
:type FingerPrint: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupName: The device group name, as specified by the user.
:type GroupName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupName: The device group name, as specified by the user.
:type GroupName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IncludeEndHostsInd: A flag indicating whether this group should include end host devices.
:type IncludeEndHostsInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IncludeEndHostsInd: A flag indicating whether this group should include end host devices.
:type IncludeEndHostsInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param MemberCount: Not used.
:type MemberCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param MemberCount: Not used.
:type MemberCount: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NetBIOSScanningInd: A flag indicating whether to scan this group for NetBOIS names.
:type NetBIOSScanningInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetBIOSScanningInd: A flag indicating whether to scan this group for NetBOIS names.
:type NetBIOSScanningInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ParentDeviceGroupID: Internal identifier for the parent device group. A value of 0 is used for root level groups.
:type ParentDeviceGroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceGroupID: Internal identifier for the parent device group. A value of 0 is used for root level groups.
:type ParentDeviceGroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PerfEnvPollingInd: A flag that indicates if Performance and Environment polling is enabled for the device group members.
:type PerfEnvPollingInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PerfEnvPollingInd: A flag that indicates if Performance and Environment polling is enabled for the device group members.
:type PerfEnvPollingInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolFreqModifier: Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value.
:type PolFreqModifier: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolFreqModifier: Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value.
:type PolFreqModifier: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyScheduleMode: Not used.
:type PolicyScheduleMode: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyScheduleMode: Not used.
:type PolicyScheduleMode: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PortControlBlackoutDuration: Port Control Blackout in minutes.
:type PortControlBlackoutDuration: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PortControlBlackoutDuration: Port Control Blackout in minutes.
:type PortControlBlackoutDuration: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PortScanning: A flag indicating whether port scanning is enabled for this group.
:type PortScanning: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PortScanning: A flag indicating whether port scanning is enabled for this group.
:type PortScanning: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PrivilegedPollingInd: A flag indicated that NetMRI should send enable command when interacting with device
:type PrivilegedPollingInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PrivilegedPollingInd: A flag indicated that NetMRI should send enable command when interacting with device
:type PrivilegedPollingInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Rank: The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used.
:type Rank: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Rank: The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used.
:type Rank: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SAMLicensedInd: A flag indicating whether or not access diff viewer is available for this entry.
:type SAMLicensedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SAMLicensedInd: A flag indicating whether or not access diff viewer is available for this entry.
:type SAMLicensedInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SNMPAnalysis: A flag indicating whether issue analysis should be performed on this group.
:type SNMPAnalysis: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SNMPAnalysis: A flag indicating whether issue analysis should be performed on this group.
:type SNMPAnalysis: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SNMPPolling: A flag indicating whether this group should be polled via SNMP.
:type SNMPPolling: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SNMPPolling: A flag indicating whether this group should be polled via SNMP.
:type SNMPPolling: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SPMCollectionInd: A flag indicating whether Switch Port Management collection applies to this group.
:type SPMCollectionInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SPMCollectionInd: A flag indicating whether Switch Port Management collection applies to this group.
:type SPMCollectionInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StandardsCompliance: A flag indicating whether this group is subject to standard's compliance reporting.
:type StandardsCompliance: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StandardsCompliance: A flag indicating whether this group is subject to standard's compliance reporting.
:type StandardsCompliance: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartBlackoutSchedule: The blackout start time in cron format.
:type StartBlackoutSchedule: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartBlackoutSchedule: The blackout start time in cron format.
:type StartBlackoutSchedule: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartPortControlBlackoutSchedule: Port Control Blackout in cron format.
:type StartPortControlBlackoutSchedule: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartPortControlBlackoutSchedule: Port Control Blackout in cron format.
:type StartPortControlBlackoutSchedule: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SystemGroupInd: A flag indicating if this device group is system-created
:type SystemGroupInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SystemGroupInd: A flag indicating if this device group is system-created
:type SystemGroupInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UpdatedAt: The date and time this record was last modified.
:type UpdatedAt: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UpdatedAt: The date and time this record was last modified.
:type UpdatedAt: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UseGlobalPolFreq: A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency.
:type UseGlobalPolFreq: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UseGlobalPolFreq: A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency.
:type UseGlobalPolFreq: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VendorDefaultCollection: A flag indicating whether vendor default credential collection is enabled for this group.
:type VendorDefaultCollection: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VendorDefaultCollection: A flag indicating whether vendor default credential collection is enabled for this group.
:type VendorDefaultCollection: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` GroupID
:param sort: The data field(s) to use for sorting the output. Default is GroupID. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupDefn. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device group defns, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ARPCacheRefreshInd, AdvancedGroupInd, BlackoutDuration, CCSCollection, CLIPolling, ConfigLocked, ConfigPolling, CredentialGroupID, Criteria, FingerPrint, GroupID, GroupName, IncludeEndHostsInd, MemberCount, NetBIOSScanningInd, ParentDeviceGroupID, PerfEnvPollingInd, PolFreqModifier, PolicyScheduleMode, PortControlBlackoutDuration, PortScanning, PrivilegedPollingInd, Rank, SAMLicensedInd, SNMPAnalysis, SNMPPolling, SPMCollectionInd, StandardsCompliance, StartBlackoutSchedule, StartPortControlBlackoutSchedule, SystemGroupInd, UpdatedAt, UseGlobalPolFreq, VendorDefaultCollection.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defns: An array of the DeviceGroupDefn objects that match the specified input criteria.
:rtype device_group_defns: Array of DeviceGroupDefn
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device group defns matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ARPCacheRefreshInd, AdvancedGroupInd, BlackoutDuration, CCSCollection, CLIPolling, ConfigLocked, ConfigPolling, CredentialGroupID, Criteria, FingerPrint, GroupID, GroupName, IncludeEndHostsInd, MemberCount, NetBIOSScanningInd, ParentDeviceGroupID, PerfEnvPollingInd, PolFreqModifier, PolicyScheduleMode, PortControlBlackoutDuration, PortScanning, PrivilegedPollingInd, Rank, SAMLicensedInd, SNMPAnalysis, SNMPPolling, SPMCollectionInd, StandardsCompliance, StartBlackoutSchedule, StartPortControlBlackoutSchedule, SystemGroupInd, UpdatedAt, UseGlobalPolFreq, VendorDefaultCollection.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ARPCacheRefreshInd: The operator to apply to the field ARPCacheRefreshInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ARPCacheRefreshInd: A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ARPCacheRefreshInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ARPCacheRefreshInd: If op_ARPCacheRefreshInd is specified, the field named in this input will be compared to the value in ARPCacheRefreshInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ARPCacheRefreshInd must be specified if op_ARPCacheRefreshInd is specified.
:type val_f_ARPCacheRefreshInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ARPCacheRefreshInd: If op_ARPCacheRefreshInd is specified, this value will be compared to the value in ARPCacheRefreshInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ARPCacheRefreshInd must be specified if op_ARPCacheRefreshInd is specified.
:type val_c_ARPCacheRefreshInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_AdvancedGroupInd: The operator to apply to the field AdvancedGroupInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. AdvancedGroupInd: A flag indicating whether this group is an advanced group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_AdvancedGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_AdvancedGroupInd: If op_AdvancedGroupInd is specified, the field named in this input will be compared to the value in AdvancedGroupInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_AdvancedGroupInd must be specified if op_AdvancedGroupInd is specified.
:type val_f_AdvancedGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_AdvancedGroupInd: If op_AdvancedGroupInd is specified, this value will be compared to the value in AdvancedGroupInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_AdvancedGroupInd must be specified if op_AdvancedGroupInd is specified.
:type val_c_AdvancedGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BlackoutDuration: The operator to apply to the field BlackoutDuration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BlackoutDuration: The blackout duration in minutes. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BlackoutDuration: If op_BlackoutDuration is specified, the field named in this input will be compared to the value in BlackoutDuration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BlackoutDuration must be specified if op_BlackoutDuration is specified.
:type val_f_BlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BlackoutDuration: If op_BlackoutDuration is specified, this value will be compared to the value in BlackoutDuration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BlackoutDuration must be specified if op_BlackoutDuration is specified.
:type val_c_BlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CCSCollection: The operator to apply to the field CCSCollection. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CCSCollection: A flag indicating whether job execution is enabled against this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CCSCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CCSCollection: If op_CCSCollection is specified, the field named in this input will be compared to the value in CCSCollection using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CCSCollection must be specified if op_CCSCollection is specified.
:type val_f_CCSCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CCSCollection: If op_CCSCollection is specified, this value will be compared to the value in CCSCollection using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CCSCollection must be specified if op_CCSCollection is specified.
:type val_c_CCSCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CLIPolling: The operator to apply to the field CLIPolling. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CLIPolling: A flag indicating whether this group should be polled via the command line interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CLIPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CLIPolling: If op_CLIPolling is specified, the field named in this input will be compared to the value in CLIPolling using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CLIPolling must be specified if op_CLIPolling is specified.
:type val_f_CLIPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CLIPolling: If op_CLIPolling is specified, this value will be compared to the value in CLIPolling using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CLIPolling must be specified if op_CLIPolling is specified.
:type val_c_CLIPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ConfigLocked: The operator to apply to the field ConfigLocked. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ConfigLocked: Indicates whether configuration changes within this group are considered authorized or unauthorized. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ConfigLocked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ConfigLocked: If op_ConfigLocked is specified, the field named in this input will be compared to the value in ConfigLocked using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ConfigLocked must be specified if op_ConfigLocked is specified.
:type val_f_ConfigLocked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ConfigLocked: If op_ConfigLocked is specified, this value will be compared to the value in ConfigLocked using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ConfigLocked must be specified if op_ConfigLocked is specified.
:type val_c_ConfigLocked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ConfigPolling: The operator to apply to the field ConfigPolling. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ConfigPolling: A flag indicating whether configuration file collection is enabled for this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ConfigPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ConfigPolling: If op_ConfigPolling is specified, the field named in this input will be compared to the value in ConfigPolling using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ConfigPolling must be specified if op_ConfigPolling is specified.
:type val_f_ConfigPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ConfigPolling: If op_ConfigPolling is specified, this value will be compared to the value in ConfigPolling using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ConfigPolling must be specified if op_ConfigPolling is specified.
:type val_c_ConfigPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CredentialGroupID: The operator to apply to the field CredentialGroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CredentialGroupID: The unique identifier of the credential group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CredentialGroupID: If op_CredentialGroupID is specified, the field named in this input will be compared to the value in CredentialGroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CredentialGroupID must be specified if op_CredentialGroupID is specified.
:type val_f_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CredentialGroupID: If op_CredentialGroupID is specified, this value will be compared to the value in CredentialGroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CredentialGroupID must be specified if op_CredentialGroupID is specified.
:type val_c_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Criteria: The operator to apply to the field Criteria. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Criteria: The criteria used to place members within the group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Criteria: If op_Criteria is specified, the field named in this input will be compared to the value in Criteria using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Criteria must be specified if op_Criteria is specified.
:type val_f_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Criteria: If op_Criteria is specified, this value will be compared to the value in Criteria using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Criteria must be specified if op_Criteria is specified.
:type val_c_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FingerPrint: The operator to apply to the field FingerPrint. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FingerPrint: A flag indicating whether network fingerprinting should be performed on this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FingerPrint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FingerPrint: If op_FingerPrint is specified, the field named in this input will be compared to the value in FingerPrint using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FingerPrint must be specified if op_FingerPrint is specified.
:type val_f_FingerPrint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FingerPrint: If op_FingerPrint is specified, this value will be compared to the value in FingerPrint using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FingerPrint must be specified if op_FingerPrint is specified.
:type val_c_FingerPrint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_GroupID: The operator to apply to the field GroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GroupID: The internal NetMRI identifier for this device group definition. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_GroupID: If op_GroupID is specified, the field named in this input will be compared to the value in GroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GroupID must be specified if op_GroupID is specified.
:type val_f_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_GroupID: If op_GroupID is specified, this value will be compared to the value in GroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GroupID must be specified if op_GroupID is specified.
:type val_c_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_GroupName: The operator to apply to the field GroupName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GroupName: The device group name, as specified by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_GroupName: If op_GroupName is specified, the field named in this input will be compared to the value in GroupName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GroupName must be specified if op_GroupName is specified.
:type val_f_GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_GroupName: If op_GroupName is specified, this value will be compared to the value in GroupName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GroupName must be specified if op_GroupName is specified.
:type val_c_GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IncludeEndHostsInd: The operator to apply to the field IncludeEndHostsInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IncludeEndHostsInd: A flag indicating whether this group should include end host devices. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IncludeEndHostsInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IncludeEndHostsInd: If op_IncludeEndHostsInd is specified, the field named in this input will be compared to the value in IncludeEndHostsInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IncludeEndHostsInd must be specified if op_IncludeEndHostsInd is specified.
:type val_f_IncludeEndHostsInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IncludeEndHostsInd: If op_IncludeEndHostsInd is specified, this value will be compared to the value in IncludeEndHostsInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IncludeEndHostsInd must be specified if op_IncludeEndHostsInd is specified.
:type val_c_IncludeEndHostsInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_MemberCount: The operator to apply to the field MemberCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. MemberCount: Not used. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_MemberCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_MemberCount: If op_MemberCount is specified, the field named in this input will be compared to the value in MemberCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_MemberCount must be specified if op_MemberCount is specified.
:type val_f_MemberCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_MemberCount: If op_MemberCount is specified, this value will be compared to the value in MemberCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_MemberCount must be specified if op_MemberCount is specified.
:type val_c_MemberCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NetBIOSScanningInd: The operator to apply to the field NetBIOSScanningInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NetBIOSScanningInd: A flag indicating whether to scan this group for NetBOIS names. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NetBIOSScanningInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NetBIOSScanningInd: If op_NetBIOSScanningInd is specified, the field named in this input will be compared to the value in NetBIOSScanningInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NetBIOSScanningInd must be specified if op_NetBIOSScanningInd is specified.
:type val_f_NetBIOSScanningInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NetBIOSScanningInd: If op_NetBIOSScanningInd is specified, this value will be compared to the value in NetBIOSScanningInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NetBIOSScanningInd must be specified if op_NetBIOSScanningInd is specified.
:type val_c_NetBIOSScanningInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ParentDeviceGroupID: The operator to apply to the field ParentDeviceGroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ParentDeviceGroupID: Internal identifier for the parent device group. A value of 0 is used for root level groups. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ParentDeviceGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ParentDeviceGroupID: If op_ParentDeviceGroupID is specified, the field named in this input will be compared to the value in ParentDeviceGroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ParentDeviceGroupID must be specified if op_ParentDeviceGroupID is specified.
:type val_f_ParentDeviceGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ParentDeviceGroupID: If op_ParentDeviceGroupID is specified, this value will be compared to the value in ParentDeviceGroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ParentDeviceGroupID must be specified if op_ParentDeviceGroupID is specified.
:type val_c_ParentDeviceGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PerfEnvPollingInd: The operator to apply to the field PerfEnvPollingInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PerfEnvPollingInd: A flag that indicates if Performance and Environment polling is enabled for the device group members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PerfEnvPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PerfEnvPollingInd: If op_PerfEnvPollingInd is specified, the field named in this input will be compared to the value in PerfEnvPollingInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PerfEnvPollingInd must be specified if op_PerfEnvPollingInd is specified.
:type val_f_PerfEnvPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PerfEnvPollingInd: If op_PerfEnvPollingInd is specified, this value will be compared to the value in PerfEnvPollingInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PerfEnvPollingInd must be specified if op_PerfEnvPollingInd is specified.
:type val_c_PerfEnvPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolFreqModifier: The operator to apply to the field PolFreqModifier. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolFreqModifier: Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolFreqModifier: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolFreqModifier: If op_PolFreqModifier is specified, the field named in this input will be compared to the value in PolFreqModifier using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolFreqModifier must be specified if op_PolFreqModifier is specified.
:type val_f_PolFreqModifier: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolFreqModifier: If op_PolFreqModifier is specified, this value will be compared to the value in PolFreqModifier using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolFreqModifier must be specified if op_PolFreqModifier is specified.
:type val_c_PolFreqModifier: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyScheduleMode: The operator to apply to the field PolicyScheduleMode. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyScheduleMode: Not used. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyScheduleMode: If op_PolicyScheduleMode is specified, the field named in this input will be compared to the value in PolicyScheduleMode using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyScheduleMode must be specified if op_PolicyScheduleMode is specified.
:type val_f_PolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyScheduleMode: If op_PolicyScheduleMode is specified, this value will be compared to the value in PolicyScheduleMode using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyScheduleMode must be specified if op_PolicyScheduleMode is specified.
:type val_c_PolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PortControlBlackoutDuration: The operator to apply to the field PortControlBlackoutDuration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PortControlBlackoutDuration: Port Control Blackout in minutes. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PortControlBlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PortControlBlackoutDuration: If op_PortControlBlackoutDuration is specified, the field named in this input will be compared to the value in PortControlBlackoutDuration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PortControlBlackoutDuration must be specified if op_PortControlBlackoutDuration is specified.
:type val_f_PortControlBlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PortControlBlackoutDuration: If op_PortControlBlackoutDuration is specified, this value will be compared to the value in PortControlBlackoutDuration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PortControlBlackoutDuration must be specified if op_PortControlBlackoutDuration is specified.
:type val_c_PortControlBlackoutDuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PortScanning: The operator to apply to the field PortScanning. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PortScanning: A flag indicating whether port scanning is enabled for this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PortScanning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PortScanning: If op_PortScanning is specified, the field named in this input will be compared to the value in PortScanning using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PortScanning must be specified if op_PortScanning is specified.
:type val_f_PortScanning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PortScanning: If op_PortScanning is specified, this value will be compared to the value in PortScanning using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PortScanning must be specified if op_PortScanning is specified.
:type val_c_PortScanning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PrivilegedPollingInd: The operator to apply to the field PrivilegedPollingInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PrivilegedPollingInd: A flag indicated that NetMRI should send enable command when interacting with device For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PrivilegedPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PrivilegedPollingInd: If op_PrivilegedPollingInd is specified, the field named in this input will be compared to the value in PrivilegedPollingInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PrivilegedPollingInd must be specified if op_PrivilegedPollingInd is specified.
:type val_f_PrivilegedPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PrivilegedPollingInd: If op_PrivilegedPollingInd is specified, this value will be compared to the value in PrivilegedPollingInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PrivilegedPollingInd must be specified if op_PrivilegedPollingInd is specified.
:type val_c_PrivilegedPollingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Rank: The operator to apply to the field Rank. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Rank: The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Rank: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Rank: If op_Rank is specified, the field named in this input will be compared to the value in Rank using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Rank must be specified if op_Rank is specified.
:type val_f_Rank: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Rank: If op_Rank is specified, this value will be compared to the value in Rank using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Rank must be specified if op_Rank is specified.
:type val_c_Rank: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SAMLicensedInd: The operator to apply to the field SAMLicensedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SAMLicensedInd: A flag indicating whether or not access diff viewer is available for this entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SAMLicensedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SAMLicensedInd: If op_SAMLicensedInd is specified, the field named in this input will be compared to the value in SAMLicensedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SAMLicensedInd must be specified if op_SAMLicensedInd is specified.
:type val_f_SAMLicensedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SAMLicensedInd: If op_SAMLicensedInd is specified, this value will be compared to the value in SAMLicensedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SAMLicensedInd must be specified if op_SAMLicensedInd is specified.
:type val_c_SAMLicensedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SNMPAnalysis: The operator to apply to the field SNMPAnalysis. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SNMPAnalysis: A flag indicating whether issue analysis should be performed on this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SNMPAnalysis: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SNMPAnalysis: If op_SNMPAnalysis is specified, the field named in this input will be compared to the value in SNMPAnalysis using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SNMPAnalysis must be specified if op_SNMPAnalysis is specified.
:type val_f_SNMPAnalysis: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SNMPAnalysis: If op_SNMPAnalysis is specified, this value will be compared to the value in SNMPAnalysis using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SNMPAnalysis must be specified if op_SNMPAnalysis is specified.
:type val_c_SNMPAnalysis: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SNMPPolling: The operator to apply to the field SNMPPolling. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SNMPPolling: A flag indicating whether this group should be polled via SNMP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SNMPPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SNMPPolling: If op_SNMPPolling is specified, the field named in this input will be compared to the value in SNMPPolling using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SNMPPolling must be specified if op_SNMPPolling is specified.
:type val_f_SNMPPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SNMPPolling: If op_SNMPPolling is specified, this value will be compared to the value in SNMPPolling using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SNMPPolling must be specified if op_SNMPPolling is specified.
:type val_c_SNMPPolling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SPMCollectionInd: The operator to apply to the field SPMCollectionInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SPMCollectionInd: A flag indicating whether Switch Port Management collection applies to this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SPMCollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SPMCollectionInd: If op_SPMCollectionInd is specified, the field named in this input will be compared to the value in SPMCollectionInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SPMCollectionInd must be specified if op_SPMCollectionInd is specified.
:type val_f_SPMCollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SPMCollectionInd: If op_SPMCollectionInd is specified, this value will be compared to the value in SPMCollectionInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SPMCollectionInd must be specified if op_SPMCollectionInd is specified.
:type val_c_SPMCollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StandardsCompliance: The operator to apply to the field StandardsCompliance. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StandardsCompliance: A flag indicating whether this group is subject to standard's compliance reporting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StandardsCompliance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StandardsCompliance: If op_StandardsCompliance is specified, the field named in this input will be compared to the value in StandardsCompliance using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StandardsCompliance must be specified if op_StandardsCompliance is specified.
:type val_f_StandardsCompliance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StandardsCompliance: If op_StandardsCompliance is specified, this value will be compared to the value in StandardsCompliance using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StandardsCompliance must be specified if op_StandardsCompliance is specified.
:type val_c_StandardsCompliance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartBlackoutSchedule: The operator to apply to the field StartBlackoutSchedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartBlackoutSchedule: The blackout start time in cron format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartBlackoutSchedule: If op_StartBlackoutSchedule is specified, the field named in this input will be compared to the value in StartBlackoutSchedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartBlackoutSchedule must be specified if op_StartBlackoutSchedule is specified.
:type val_f_StartBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartBlackoutSchedule: If op_StartBlackoutSchedule is specified, this value will be compared to the value in StartBlackoutSchedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartBlackoutSchedule must be specified if op_StartBlackoutSchedule is specified.
:type val_c_StartBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartPortControlBlackoutSchedule: The operator to apply to the field StartPortControlBlackoutSchedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartPortControlBlackoutSchedule: Port Control Blackout in cron format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartPortControlBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartPortControlBlackoutSchedule: If op_StartPortControlBlackoutSchedule is specified, the field named in this input will be compared to the value in StartPortControlBlackoutSchedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartPortControlBlackoutSchedule must be specified if op_StartPortControlBlackoutSchedule is specified.
:type val_f_StartPortControlBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartPortControlBlackoutSchedule: If op_StartPortControlBlackoutSchedule is specified, this value will be compared to the value in StartPortControlBlackoutSchedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartPortControlBlackoutSchedule must be specified if op_StartPortControlBlackoutSchedule is specified.
:type val_c_StartPortControlBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SystemGroupInd: The operator to apply to the field SystemGroupInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SystemGroupInd: A flag indicating if this device group is system-created For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SystemGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SystemGroupInd: If op_SystemGroupInd is specified, the field named in this input will be compared to the value in SystemGroupInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SystemGroupInd must be specified if op_SystemGroupInd is specified.
:type val_f_SystemGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SystemGroupInd: If op_SystemGroupInd is specified, this value will be compared to the value in SystemGroupInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SystemGroupInd must be specified if op_SystemGroupInd is specified.
:type val_c_SystemGroupInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UpdatedAt: The operator to apply to the field UpdatedAt. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UpdatedAt: The date and time this record was last modified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UpdatedAt: If op_UpdatedAt is specified, the field named in this input will be compared to the value in UpdatedAt using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UpdatedAt must be specified if op_UpdatedAt is specified.
:type val_f_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UpdatedAt: If op_UpdatedAt is specified, this value will be compared to the value in UpdatedAt using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UpdatedAt must be specified if op_UpdatedAt is specified.
:type val_c_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UseGlobalPolFreq: The operator to apply to the field UseGlobalPolFreq. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UseGlobalPolFreq: A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UseGlobalPolFreq: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UseGlobalPolFreq: If op_UseGlobalPolFreq is specified, the field named in this input will be compared to the value in UseGlobalPolFreq using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UseGlobalPolFreq must be specified if op_UseGlobalPolFreq is specified.
:type val_f_UseGlobalPolFreq: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UseGlobalPolFreq: If op_UseGlobalPolFreq is specified, this value will be compared to the value in UseGlobalPolFreq using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UseGlobalPolFreq must be specified if op_UseGlobalPolFreq is specified.
:type val_c_UseGlobalPolFreq: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VendorDefaultCollection: The operator to apply to the field VendorDefaultCollection. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VendorDefaultCollection: A flag indicating whether vendor default credential collection is enabled for this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VendorDefaultCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VendorDefaultCollection: If op_VendorDefaultCollection is specified, the field named in this input will be compared to the value in VendorDefaultCollection using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VendorDefaultCollection must be specified if op_VendorDefaultCollection is specified.
:type val_f_VendorDefaultCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VendorDefaultCollection: If op_VendorDefaultCollection is specified, this value will be compared to the value in VendorDefaultCollection using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VendorDefaultCollection must be specified if op_VendorDefaultCollection is specified.
:type val_c_VendorDefaultCollection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` GroupID
:param sort: The data field(s) to use for sorting the output. Default is GroupID. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupDefn. Valid values are GroupID, ParentDeviceGroupID, GroupName, Criteria, Rank, SNMPPolling, CLIPolling, SNMPAnalysis, FingerPrint, CCSCollection, VendorDefaultCollection, ConfigPolling, PortScanning, StandardsCompliance, MemberCount, ConfigLocked, PrivilegedPollingInd, UseGlobalPolFreq, PolFreqModifier, PolicyScheduleMode, PerfEnvPollingInd, SPMCollectionInd, NetBIOSScanningInd, ARPCacheRefreshInd, SAMLicensedInd, StartBlackoutSchedule, BlackoutDuration, StartPortControlBlackoutSchedule, PortControlBlackoutDuration, UpdatedAt, AdvancedGroupInd, IncludeEndHostsInd, CredentialGroupID, SystemGroupInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defns: An array of the DeviceGroupDefn objects that match the specified input criteria.
:rtype device_group_defns: Array of DeviceGroupDefn
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def create(self, **kwargs):
"""Creates a new device group defn.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ARPCacheRefreshInd: A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection.
:type ARPCacheRefreshInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param AdvancedGroupInd: A flag indicating whether this group is an advanced group.
:type AdvancedGroupInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param BlackoutDuration: The blackout duration in minutes.
:type BlackoutDuration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param CCSCollection: A flag indicating whether job execution is enabled against this group.
:type CCSCollection: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param CLIPolling: A flag indicating whether this group should be polled via the command line interface.
:type CLIPolling: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ConfigLocked: Indicates whether configuration changes within this group are considered authorized or unauthorized.
:type ConfigLocked: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ConfigPolling: A flag indicating whether configuration file collection is enabled for this group.
:type ConfigPolling: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Criteria: The criteria used to place members within the group.
:type Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param FingerPrint: A flag indicating whether network fingerprinting should be performed on this group.
:type FingerPrint: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param GroupName: The device group name, as specified by the user.
:type GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param IncludeEndHostsInd: A flag indicating whether this group should include end host devices.
:type IncludeEndHostsInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param NetBIOSScanningInd: A flag indicating whether to scan this group for NetBOIS names.
:type NetBIOSScanningInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ParentDeviceGroupID: Internal identifier for the parent device group. A value of 0 is used for root level groups.
:type ParentDeviceGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PerfEnvPollingInd: A flag that indicates if Performance and Environment polling is enabled for the device group members.
:type PerfEnvPollingInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolFreqModifier: Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value.
:type PolFreqModifier: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param PortControlBlackoutDuration: Port Control Blackout in minutes.
:type PortControlBlackoutDuration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param PortScanning: A flag indicating whether port scanning is enabled for this group.
:type PortScanning: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param PrivilegedPollingInd: A flag indicated that NetMRI should send enable command when interacting with device
:type PrivilegedPollingInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param Rank: The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used.
:type Rank: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SAMLicensedInd: A flag indicating whether or not access diff viewer is available for this entry.
:type SAMLicensedInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SNMPAnalysis: A flag indicating whether issue analysis should be performed on this group.
:type SNMPAnalysis: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SNMPPolling: A flag indicating whether this group should be polled via SNMP.
:type SNMPPolling: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SPMCollectionInd: A flag indicating whether Switch Port Management collection applies to this group.
:type SPMCollectionInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param StandardsCompliance: A flag indicating whether this group is subject to standard's compliance reporting.
:type StandardsCompliance: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param StartBlackoutSchedule: The blackout start time in cron format.
:type StartBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param StartPortControlBlackoutSchedule: Port Control Blackout in cron format.
:type StartPortControlBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UseGlobalPolFreq: A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency.
:type UseGlobalPolFreq: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param VendorDefaultCollection: A flag indicating whether vendor default credential collection is enabled for this group.
:type VendorDefaultCollection: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created device group defn.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created device group defn.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created device group defn.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defn: The newly created device group defn.
:rtype device_group_defn: DeviceGroupDefn
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing device group defn.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ARPCacheRefreshInd: A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection. If omitted, this field will not be updated.
:type ARPCacheRefreshInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param AdvancedGroupInd: A flag indicating whether this group is an advanced group. If omitted, this field will not be updated.
:type AdvancedGroupInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BlackoutDuration: The blackout duration in minutes. If omitted, this field will not be updated.
:type BlackoutDuration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CCSCollection: A flag indicating whether job execution is enabled against this group. If omitted, this field will not be updated.
:type CCSCollection: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CLIPolling: A flag indicating whether this group should be polled via the command line interface. If omitted, this field will not be updated.
:type CLIPolling: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigLocked: Indicates whether configuration changes within this group are considered authorized or unauthorized. If omitted, this field will not be updated.
:type ConfigLocked: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigPolling: A flag indicating whether configuration file collection is enabled for this group. If omitted, this field will not be updated.
:type ConfigPolling: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group. If omitted, this field will not be updated.
:type CredentialGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Criteria: The criteria used to place members within the group. If omitted, this field will not be updated.
:type Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FingerPrint: A flag indicating whether network fingerprinting should be performed on this group. If omitted, this field will not be updated.
:type FingerPrint: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupName: The device group name, as specified by the user. If omitted, this field will not be updated.
:type GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IncludeEndHostsInd: A flag indicating whether this group should include end host devices. If omitted, this field will not be updated.
:type IncludeEndHostsInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetBIOSScanningInd: A flag indicating whether to scan this group for NetBOIS names. If omitted, this field will not be updated.
:type NetBIOSScanningInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceGroupID: Internal identifier for the parent device group. A value of 0 is used for root level groups. If omitted, this field will not be updated.
:type ParentDeviceGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PerfEnvPollingInd: A flag that indicates if Performance and Environment polling is enabled for the device group members. If omitted, this field will not be updated.
:type PerfEnvPollingInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolFreqModifier: Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value. If omitted, this field will not be updated.
:type PolFreqModifier: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyScheduleMode: Not used. If omitted, this field will not be updated.
:type PolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PortControlBlackoutDuration: Port Control Blackout in minutes. If omitted, this field will not be updated.
:type PortControlBlackoutDuration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PortScanning: A flag indicating whether port scanning is enabled for this group. If omitted, this field will not be updated.
:type PortScanning: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PrivilegedPollingInd: A flag indicated that NetMRI should send enable command when interacting with device If omitted, this field will not be updated.
:type PrivilegedPollingInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Rank: The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used. If omitted, this field will not be updated.
:type Rank: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SAMLicensedInd: A flag indicating whether or not access diff viewer is available for this entry. If omitted, this field will not be updated.
:type SAMLicensedInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SNMPAnalysis: A flag indicating whether issue analysis should be performed on this group. If omitted, this field will not be updated.
:type SNMPAnalysis: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SNMPPolling: A flag indicating whether this group should be polled via SNMP. If omitted, this field will not be updated.
:type SNMPPolling: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SPMCollectionInd: A flag indicating whether Switch Port Management collection applies to this group. If omitted, this field will not be updated.
:type SPMCollectionInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StandardsCompliance: A flag indicating whether this group is subject to standard's compliance reporting. If omitted, this field will not be updated.
:type StandardsCompliance: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartBlackoutSchedule: The blackout start time in cron format. If omitted, this field will not be updated.
:type StartBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartPortControlBlackoutSchedule: Port Control Blackout in cron format. If omitted, this field will not be updated.
:type StartPortControlBlackoutSchedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UseGlobalPolFreq: A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency. If omitted, this field will not be updated.
:type UseGlobalPolFreq: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VendorDefaultCollection: A flag indicating whether vendor default credential collection is enabled for this group. If omitted, this field will not be updated.
:type VendorDefaultCollection: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated device group defn.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated device group defn.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated device group defn.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_defn: The updated device group defn.
:rtype device_group_defn: DeviceGroupDefn
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified device group defn from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param GroupID: The internal NetMRI identifier for this device group definition.
:type GroupID: Integer
| ``api version min:`` 3.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param force: Set this to true to delete system-created group
:type force: Boolean
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
| apache-2.0 | -4,876,036,232,465,950,000 | 53.094547 | 999 | 0.612685 | false |
ocaisa/easybuild-framework | easybuild/tools/toolchain/toolchain.py | 3 | 19735 | # #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
The toolchain module with the abstract Toolchain class.
Creating a new toolchain should be as simple as possible.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import os
import re
from vsc.utils import fancylogger
from easybuild.tools.config import build_option, install_path
from easybuild.tools.environment import setvar
from easybuild.tools.modules import get_software_root, get_software_version, modules_tool
from easybuild.tools.toolchain import DUMMY_TOOLCHAIN_NAME, DUMMY_TOOLCHAIN_VERSION
from easybuild.tools.toolchain.options import ToolchainOptions
from easybuild.tools.toolchain.toolchainvariables import ToolchainVariables
_log = fancylogger.getLogger('tools.toolchain', fname=False)
class Toolchain(object):
"""General toolchain class"""
OPTIONS_CLASS = ToolchainOptions
VARIABLES_CLASS = ToolchainVariables
NAME = None
VERSION = None
# class method
def _is_toolchain_for(cls, name):
"""see if this class can provide support for toolchain named name"""
# TODO report later in the initialization the found version
if name:
if hasattr(cls, 'NAME') and name == cls.NAME:
return True
else:
return False
else:
# is no name is supplied, check whether class can be used as a toolchain
return hasattr(cls, 'NAME') and cls.NAME
_is_toolchain_for = classmethod(_is_toolchain_for)
def __init__(self, name=None, version=None, mns=None):
"""Toolchain constructor."""
self.base_init()
self.dependencies = []
self.toolchain_dep_mods = []
if name is None:
name = self.NAME
if name is None:
self.log.error("Toolchain init: no name provided")
self.name = name
if version is None:
version = self.VERSION
if version is None:
self.log.error("Toolchain init: no version provided")
self.version = version
self.vars = None
self.modules_tool = modules_tool()
self.mns = mns
self.mod_full_name = None
self.mod_short_name = None
self.init_modpaths = None
if self.name != DUMMY_TOOLCHAIN_NAME:
# sometimes no module naming scheme class instance can/will be provided, e.g. with --list-toolchains
if self.mns is not None:
tc_dict = self.as_dict()
self.mod_full_name = self.mns.det_full_module_name(tc_dict)
self.mod_short_name = self.mns.det_short_module_name(tc_dict)
self.init_modpaths = self.mns.det_init_modulepaths(tc_dict)
def base_init(self):
if not hasattr(self, 'log'):
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
if not hasattr(self, 'options'):
self.options = self.OPTIONS_CLASS()
if not hasattr(self, 'variables'):
self.variables = self.VARIABLES_CLASS()
if hasattr(self, 'LINKER_TOGGLE_START_STOP_GROUP'):
self.variables.LINKER_TOGGLE_START_STOP_GROUP = self.LINKER_TOGGLE_START_STOP_GROUP
if hasattr(self, 'LINKER_TOGGLE_STATIC_DYNAMIC'):
self.variables.LINKER_TOGGLE_STATIC_DYNAMIC = self.LINKER_TOGGLE_STATIC_DYNAMIC
def get_variable(self, name, typ=str):
"""Get value for specified variable.
typ: indicates what type of return value is expected"""
if typ == str:
return str(self.variables[name])
elif typ == list:
return self.variables[name].flatten()
else:
self.log.error("get_variable: Don't know how to create value of type %s." % typ)
def set_variables(self):
"""Do nothing? Everything should have been set by others
Needs to be defined for super() relations
"""
if self.options.option('packed-linker-options'):
self.log.debug("set_variables: toolchain variables. packed-linker-options.")
self.variables.try_function_on_element('set_packed_linker_options')
self.log.debug("set_variables: toolchain variables. Do nothing.")
def generate_vars(self):
"""Convert the variables in simple vars"""
self.vars = {}
for k, v in self.variables.items():
self.vars[k] = str(v)
def show_variables(self, offset='', sep='\n', verbose=False):
"""Pretty print the variables"""
if self.vars is None:
self.generate_vars()
var_names = self.variables.keys()
var_names.sort()
res = []
for v in var_names:
res.append("%s=%s" % (v, self.variables[v]))
if verbose:
res.append("# type %s" % (type(self.variables[v])))
res.append("# %s" % (self.variables[v].show_el()))
res.append("# repr %s" % (self.variables[v].__repr__()))
if offset is None:
offset = ''
txt = sep.join(["%s%s" % (offset, x) for x in res])
self.log.debug("show_variables:\n%s" % txt)
return txt
def get_software_root(self, names):
"""Try to get the software root for all names"""
return self._get_software_multiple(names, self._get_software_root)
def get_software_version(self, names):
"""Try to get the software version for all names"""
return self._get_software_multiple(names, self._get_software_version)
def _get_software_multiple(self, names, function):
"""Execute function of each of names"""
if isinstance(names, (str,)):
names = [names]
res = []
for name in names:
res.append(function(name))
return res
def _get_software_root(self, name):
"""Try to get the software root for name"""
root = get_software_root(name)
if root is None:
self.log.error("get_software_root software root for %s was not found in environment" % name)
else:
self.log.debug("get_software_root software root %s for %s was found in environment" % (root, name))
return root
def _get_software_version(self, name):
"""Try to get the software root for name"""
version = get_software_version(name)
if version is None:
self.log.error("get_software_version software version for %s was not found in environment" % name)
else:
self.log.debug("get_software_version software version %s for %s was found in environment" % (version, name))
return version
def as_dict(self, name=None, version=None):
"""Return toolchain specification as a dictionary."""
if name is None:
name = self.name
if version is None:
version = self.version
return {
'name': name,
'version': version,
'toolchain': {'name': DUMMY_TOOLCHAIN_NAME, 'version': DUMMY_TOOLCHAIN_VERSION},
'versionsuffix': '',
'dummy': True,
'parsed': True, # pretend this is a parsed easyconfig file, as may be required by det_short_module_name
'hidden': False,
}
def det_short_module_name(self):
"""Determine module name for this toolchain."""
if self.mod_short_name is None:
self.log.error("Toolchain module name was not set yet (using set_module_info).")
return self.mod_short_name
def _toolchain_exists(self):
"""
Verify if there exists a toolchain by this name and version
"""
# short-circuit to returning module name for this (non-dummy) toolchain
if self.name == DUMMY_TOOLCHAIN_NAME:
self.log.debug("_toolchain_exists: %s toolchain always exists, returning True" % DUMMY_TOOLCHAIN_NAME)
return True
else:
if self.mod_short_name is None:
self.log.error("Toolchain module name was not set yet (using set_module_info).")
# check whether a matching module exists if self.mod_short_name contains a module name
return self.modules_tool.exist([self.mod_full_name])[0]
def set_options(self, options):
""" Process toolchain options """
for opt in options.keys():
# Only process supported opts
if opt in self.options:
self.options[opt] = options[opt]
else:
# used to be warning, but this is a severe error imho
known_opts = ','.join(self.options.keys())
self.log.error("Undefined toolchain option %s specified (known options: %s)" % (opt, known_opts))
def get_dependency_version(self, dependency):
""" Generate a version string for a dependency on a module using this toolchain """
# Add toolchain to version string
toolchain = ''
if self.name != DUMMY_TOOLCHAIN_NAME:
toolchain = '-%s-%s' % (self.name, self.version)
elif self.version != DUMMY_TOOLCHAIN_VERSION:
toolchain = '%s' % (self.version)
# Check if dependency is independent of toolchain
# TODO: assuming dummy here, what about version?
if DUMMY_TOOLCHAIN_NAME in dependency and dependency[DUMMY_TOOLCHAIN_NAME]:
toolchain = ''
suffix = dependency.get('versionsuffix', '')
if 'version' in dependency:
version = "".join([dependency['version'], toolchain, suffix])
self.log.debug("get_dependency_version: version in dependency return %s" % version)
return version
else:
toolchain_suffix = "".join([toolchain, suffix])
matches = self.modules_tool.available(dependency['name'], toolchain_suffix)
# Find the most recent (or default) one
if len(matches) > 0:
version = matches[-1][-1]
self.log.debug("get_dependency_version: version not in dependency return %s" % version)
return
else:
tup = (dependency['name'], toolchain_suffix)
self.log.error("No toolchain version for dependency name %s (suffix %s) found" % tup)
def add_dependencies(self, dependencies):
""" Verify if the given dependencies exist and add them """
self.log.debug("add_dependencies: adding toolchain dependencies %s" % dependencies)
dep_mod_names = [dep['full_mod_name'] for dep in dependencies]
deps_exist = self.modules_tool.exist(dep_mod_names)
for dep, dep_mod_name, dep_exists in zip(dependencies, dep_mod_names, deps_exist):
self.log.debug("add_dependencies: MODULEPATH: %s" % os.environ['MODULEPATH'])
if not dep_exists:
tup = (dep_mod_name, dep)
self.log.error("add_dependencies: no module '%s' found for dependency %s" % tup)
else:
self.dependencies.append(dep)
self.log.debug('add_dependencies: added toolchain dependency %s' % str(dep))
def is_required(self, name):
"""Determine whether this is a required toolchain element."""
# default: assume every element is required
return True
def definition(self):
"""
Determine toolchain elements for given Toolchain instance.
"""
var_suff = '_MODULE_NAME'
tc_elems = {}
for var in dir(self):
if var.endswith(var_suff):
tc_elems.update({var[:-len(var_suff)]: getattr(self, var)})
_log.debug("Toolchain definition for %s: %s" % (self.as_dict(), tc_elems))
return tc_elems
def is_dep_in_toolchain_module(self, name):
"""Check whether a specific software name is listed as a dependency in the module for this toolchain."""
return any(map(lambda m: self.mns.is_short_modname_for(m, name), self.toolchain_dep_mods))
def prepare(self, onlymod=None):
"""
Prepare a set of environment parameters based on name/version of toolchain
- load modules for toolchain and dependencies
- generate extra variables and set them in the environment
onlymod: Boolean/string to indicate if the toolchain should only load the environment
with module (True) or also set all other variables (False) like compiler CC etc
(If string: comma separated list of variables that will be ignored).
"""
if self.modules_tool is None:
self.log.error("No modules tool defined in Toolchain instance.")
if not self._toolchain_exists():
self.log.error("No module found for toolchain: %s" % self.mod_short_name)
if self.name == DUMMY_TOOLCHAIN_NAME:
if self.version == DUMMY_TOOLCHAIN_VERSION:
self.log.info('prepare: toolchain dummy mode, dummy version; not loading dependencies')
else:
self.log.info('prepare: toolchain dummy mode and loading dependencies')
self.modules_tool.load([dep['short_mod_name'] for dep in self.dependencies])
return
# Load the toolchain and dependencies modules
self.log.debug("Loading toolchain module and dependencies...")
# make sure toolchain is available using short module name by running 'module use' on module path subdir
if self.init_modpaths:
mod_path_suffix = build_option('suffix_modules_path')
for modpath in self.init_modpaths:
self.modules_tool.prepend_module_path(os.path.join(install_path('mod'), mod_path_suffix, modpath))
self.modules_tool.load([self.det_short_module_name()])
self.modules_tool.load([dep['short_mod_name'] for dep in self.dependencies])
# determine direct toolchain dependencies
mod_name = self.det_short_module_name()
self.toolchain_dep_mods = self.modules_tool.dependencies_for(mod_name, depth=0)
self.log.debug('prepare: list of direct toolchain dependencies: %s' % self.toolchain_dep_mods)
# only retain names of toolchain elements, excluding toolchain name
toolchain_definition = set([e for es in self.definition().values() for e in es if not e == self.name])
# filter out optional toolchain elements if they're not used in the module
for elem_name in toolchain_definition.copy():
if self.is_required(elem_name) or self.is_dep_in_toolchain_module(elem_name):
continue
# not required and missing: remove from toolchain definition
self.log.debug("Removing %s from list of optional toolchain elements." % elem_name)
toolchain_definition.remove(elem_name)
self.log.debug("List of toolchain dependencies from toolchain module: %s" % self.toolchain_dep_mods)
self.log.debug("List of toolchain elements from toolchain definition: %s" % toolchain_definition)
if all(map(self.is_dep_in_toolchain_module, toolchain_definition)):
self.log.info("List of toolchain dependency modules and toolchain definition match!")
else:
self.log.error("List of toolchain dependency modules and toolchain definition do not match " \
"(%s vs %s)" % (self.toolchain_dep_mods, toolchain_definition))
# Generate the variables to be set
self.set_variables()
# set the variables
# onlymod can be comma-separated string of variables not to be set
if onlymod == True:
self.log.debug("prepare: do not set additional variables onlymod=%s" % onlymod)
self.generate_vars()
else:
self.log.debug("prepare: set additional variables onlymod=%s" % onlymod)
# add LDFLAGS and CPPFLAGS from dependencies to self.vars
self._add_dependency_variables()
self.generate_vars()
self._setenv_variables(onlymod)
def _add_dependency_variables(self, names=None, cpp=None, ld=None):
""" Add LDFLAGS and CPPFLAGS to the self.variables based on the dependencies
names should be a list of strings containing the name of the dependency
"""
cpp_paths = ['include']
ld_paths = ['lib']
if not self.options.get('32bit', None):
ld_paths.insert(0, 'lib64')
if cpp is not None:
for p in cpp:
if not p in cpp_paths:
cpp_paths.append(p)
if ld is not None:
for p in ld:
if not p in ld_paths:
ld_paths.append(p)
if not names:
deps = self.dependencies
else:
deps = [{'name': name} for name in names if name is not None]
for root in self.get_software_root([dep['name'] for dep in deps]):
self.variables.append_subdirs("CPPFLAGS", root, subdirs=cpp_paths)
self.variables.append_subdirs("LDFLAGS", root, subdirs=ld_paths)
def _setenv_variables(self, donotset=None):
"""Actually set the environment variables"""
self.log.debug("_setenv_variables: setting variables: donotset=%s" % donotset)
donotsetlist = []
if isinstance(donotset, str):
# TODO : more legacy code that should be using proper type
self.log.error("_setenv_variables: using commas-separated list. should be deprecated.")
donotsetlist = donotset.split(',')
elif isinstance(donotset, list):
donotsetlist = donotset
for key, val in self.vars.items():
if key in donotsetlist:
self.log.debug("_setenv_variables: not setting environment variable %s (value: %s)." % (key, val))
continue
self.log.debug("_setenv_variables: setting environment variable %s to %s" % (key, val))
setvar(key, val)
# also set unique named variables that can be used in Makefiles
# - so you can have 'CFLAGS = $(EBVARCFLAGS)'
# -- 'CLFLAGS = $(CFLAGS)' gives '*** Recursive variable `CFLAGS'
# references itself (eventually). Stop' error
setvar("EBVAR%s" % key, val)
def get_flag(self, name):
"""Get compiler flag for a certain option."""
return "-%s" % self.options.option(name)
def comp_family(self):
""" Return compiler family used in this toolchain (abstract method)."""
raise NotImplementedError
def mpi_family(self):
""" Return type of MPI library used in this toolchain or 'None' if MPI is not
supported.
"""
return None
| gpl-2.0 | -4,989,489,100,763,440,000 | 41.902174 | 120 | 0.618546 | false |
jMyles/AutobahnPython | examples/wamp/rpc/symmetric/client.py | 18 | 1530 | ###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, time
from twisted.python import log
from twisted.internet import reactor
from autobahn.websocket import connectWS
from autobahn.wamp import exportRpc, WampClientFactory, WampClientProtocol
class MyClientProtocol(WampClientProtocol):
@exportRpc("getTime")
def getTime(self):
return time.strftime("%H:%M:%S", time.localtime())
def onSessionOpen(self):
self.registerForRpc(self, "http://example.com/client#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampClientFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = MyClientProtocol
connectWS(factory)
reactor.run()
| apache-2.0 | 3,721,318,196,799,021,600 | 29 | 79 | 0.636601 | false |
AuScope/vgml | src/main/resources/org/auscope/portal/server/scriptbuilder/templates/escript-magnetic.py | 3 | 3212 |
##############################################################################
#
# Copyright (c) 2009-2013 by University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Open Software License version 3.0
# http://www.opensource.org/licenses/osl-3.0.php
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development since 2012 by School of Earth Sciences
#
##############################################################################
"""3D magnetic inversion example using netCDF data"""
# Filename for input data
DATASET='${inversion-file}'
# background magnetic flux density (B_north, B_east, B_vertical) in nano Tesla.
B_b = [${bb-north}, ${bb-east}, ${bb-vertical}]
# maximum depth (in meters)
DEPTH = ${max-depth}
# buffer zone above data (in meters; 6-10km recommended)
AIR = ${air-buffer}
# number of mesh elements in vertical direction (~1 element per 2km recommended)
NE_Z = ${vertical-mesh-elements}
# amount of horizontal padding (this affects end result, about 20% recommended)
PAD_X = ${x-padding}
PAD_Y = ${y-padding}
N_THREADS = ${n-threads}
####### Do not change anything below this line #######
import os
import subprocess
import sys
try:
from esys.downunder import *
from esys.escript import unitsSI as U
from esys.weipa import saveSilo
except ImportError:
line=["/opt/escript/bin/run-escript","-t" + str(N_THREADS)]+sys.argv
ret=subprocess.call(line)
sys.exit(ret)
def saveAndUpload(fn, **args):
saveSilo(fn, **args)
subprocess.call(["cloud", "upload", fn, fn, "--set-acl=public-read"])
#Convert entered nano Tesla to Tesla
B_b=[b*U.Nano*U.Tesla for b in B_b]
DATA_UNITS = U.Nano * U.Tesla
source=NetCdfData(DataSource.MAGNETIC, DATASET, scale_factor=DATA_UNITS)
db=DomainBuilder()
db.addSource(source)
db.setVerticalExtents(depth=DEPTH, air_layer=AIR, num_cells=NE_Z)
db.setFractionalPadding(PAD_X, PAD_Y)
db.setBackgroundMagneticFluxDensity(B_b)
db.fixSusceptibilityBelow(depth=DEPTH)
inv=MagneticInversion()
inv.setup(db)
B, w = db.getMagneticSurveys()[0]
susceptibility=inv.run()
saveAndUpload('result.silo', magnetic_anomaly=B, magnetic_weight=w, susceptibility=susceptibility)
print("Results saved in result.silo")
# Visualise result.silo using VisIt
import visit
visit.LaunchNowin()
saveatts = visit.SaveWindowAttributes()
saveatts.fileName = 'result-visit.png'
saveatts.family = 0
saveatts.width = 1024
saveatts.height = 768
saveatts.resConstraint = saveatts.NoConstraint
saveatts.outputToCurrentDirectory = 1
visit.SetSaveWindowAttributes(saveatts)
visit.OpenDatabase('result.silo')
visit.AddPlot('Contour', 'susceptibility')
c=visit.ContourAttributes()
c.colorType=c.ColorByColorTable
c.colorTableName = "hot"
visit.SetPlotOptions(c)
visit.DrawPlots()
v=visit.GetView3D()
v.viewNormal=(-0.554924, 0.703901, 0.443377)
v.viewUp=(0.272066, -0.3501, 0.896331)
visit.SetView3D(v)
visit.SaveWindow()
subprocess.call(["cloud", "upload", "result-visit.png", "result-visit.png", "--set-acl=public-read"])
visit.DeleteAllPlots()
visit.CloseDatabase('result.silo')
| gpl-3.0 | -304,191,300,855,227,140 | 31.113402 | 101 | 0.687111 | false |
fenglu-g/incubator-airflow | airflow/config_templates/default_celery.py | 4 | 4104 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ssl
from airflow import configuration
from airflow.exceptions import AirflowConfigException, AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
def _broker_supports_visibility_timeout(url):
return url.startswith("redis://") or url.startswith("sqs://")
log = LoggingMixin().log
broker_url = configuration.conf.get('celery', 'BROKER_URL')
broker_transport_options = configuration.conf.getsection(
'celery_broker_transport_options'
)
if 'visibility_timeout' not in broker_transport_options:
if _broker_supports_visibility_timeout(broker_url):
broker_transport_options['visibility_timeout'] = 21600
DEFAULT_CELERY_CONFIG = {
'accept_content': ['json', 'pickle'],
'event_serializer': 'json',
'worker_prefetch_multiplier': 1,
'task_acks_late': True,
'task_default_queue': configuration.conf.get('celery', 'DEFAULT_QUEUE'),
'task_default_exchange': configuration.conf.get('celery', 'DEFAULT_QUEUE'),
'broker_url': broker_url,
'broker_transport_options': broker_transport_options,
'result_backend': configuration.conf.get('celery', 'RESULT_BACKEND'),
'worker_concurrency': configuration.conf.getint('celery', 'WORKER_CONCURRENCY'),
}
celery_ssl_active = False
try:
celery_ssl_active = configuration.conf.getboolean('celery', 'SSL_ACTIVE')
except AirflowConfigException:
log.warning("Celery Executor will run without SSL")
try:
if celery_ssl_active:
if 'amqp://' in broker_url:
broker_use_ssl = {'keyfile': configuration.conf.get('celery', 'SSL_KEY'),
'certfile': configuration.conf.get('celery', 'SSL_CERT'),
'ca_certs': configuration.conf.get('celery', 'SSL_CACERT'),
'cert_reqs': ssl.CERT_REQUIRED}
elif 'redis://' in broker_url:
broker_use_ssl = {'ssl_keyfile': configuration.conf.get('celery', 'SSL_KEY'),
'ssl_certfile': configuration.conf.get('celery', 'SSL_CERT'),
'ssl_ca_certs': configuration.conf.get('celery', 'SSL_CACERT'),
'ssl_cert_reqs': ssl.CERT_REQUIRED}
else:
raise AirflowException('The broker you configured does not support SSL_ACTIVE to be True. '
'Please use RabbitMQ or Redis if you would like to use SSL for broker.')
DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl
except AirflowConfigException:
raise AirflowException('AirflowConfigException: SSL_ACTIVE is True, '
'please ensure SSL_KEY, '
'SSL_CERT and SSL_CACERT are set')
except Exception as e:
raise AirflowException('Exception: There was an unknown Celery SSL Error. '
'Please ensure you want to use '
'SSL and/or have all necessary certs and key ({}).'.format(e))
result_backend = DEFAULT_CELERY_CONFIG['result_backend']
if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:
log.warning("You have configured a result_backend of %s, it is highly recommended "
"to use an alternative result_backend (i.e. a database).", result_backend)
| apache-2.0 | 1,846,901,440,203,672,600 | 44.6 | 107 | 0.662768 | false |
joachimwolff/minHashNearestNeighbors | sparse_neighbors_search/cluster/minHashDBSCAN.py | 1 | 3421 | # Copyright 2016, 2017, 2018, 2019, 2020 Joachim Wolff
# PhD Thesis
#
# Copyright 2015, 2016 Joachim Wolff
# Master Thesis
# Tutor: Fabrizio Costa
# Winter semester 2015/2016
#
# Chair of Bioinformatics
# Department of Computer Science
# Faculty of Engineering
# Albert-Ludwigs-University Freiburg im Breisgau
from sklearn.cluster import DBSCAN
from ..neighbors import MinHash
import numpy as np
class MinHashDBSCAN():
def __init__(self, eps=0.5, min_samples=5,
algorithm='auto', leaf_size=30, p=None, random_state=None,
fast=False, n_neighbors=5, radius=1.0,
number_of_hash_functions=400,
max_bin_size=50, minimal_blocks_in_common=1,
shingle_size=4, excess_factor=5,
number_of_cores=None, chunk_size=None):
self.eps = eps
self.min_samples = min_samples
# self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
self.radius = radius
self.fast = fast
self.number_of_hash_functions = number_of_hash_functions
self.max_bin_size = max_bin_size
self.minimal_blocks_in_common = minimal_blocks_in_common
self.shingle_size = shingle_size
self.excess_factor = excess_factor
self.number_of_cores = number_of_cores
self.chunk_size = chunk_size
self.n_neighbors = n_neighbors
self._dbscan = DBSCAN(eps=self.eps, min_samples=min_samples, metric='precomputed',
algorithm=self.algorithm, leaf_size=self.leaf_size, p=self.p)
self.labels_ = None
self._precomputed_graph = None
# only for compatible issues
def fit(self, X, y=None, pSaveMemory=None):
minHashNeighbors = MinHash(n_neighbors=self.n_neighbors,
radius=self.radius, fast=self.fast,
number_of_hash_functions=self.number_of_hash_functions,
max_bin_size=self.max_bin_size,
minimal_blocks_in_common=self.minimal_blocks_in_common,
shingle_size=self.shingle_size,
excess_factor=self.excess_factor,
number_of_cores=self.number_of_cores,
chunk_size=self.chunk_size, similarity=False)
if pSaveMemory is not None and pSaveMemory > 0:
if pSaveMemory > 1:
pSaveMemory = 1
number_of_elements = X.shape[0]
batch_size = int(np.floor(number_of_elements * pSaveMemory))
if batch_size < 1:
batch_size = 1
minHashNeighbors.fit(X[0:batch_size, :])
if batch_size < number_of_elements:
for i in range(batch_size, X.shape[0], batch_size):
minHashNeighbors.partial_fit(X[i:i + batch_size, :])
else:
minHashNeighbors.fit(X)
# minHashNeighbors.fit(X, y)
self._precomputed_graph = minHashNeighbors.kneighbors_graph(mode='distance')
self._dbscan.fit(self._precomputed_graph)
self.labels_ = self._dbscan.labels_
def fit_predict(self, X, y=None, pSaveMemory=None):
self.fit(X, y, pSaveMemory=None)
return self.labels_
| mit | 1,588,282,949,375,221,200 | 39.247059 | 91 | 0.579655 | false |
youssef-emad/shogun | examples/meta/generator/tests/test_translate_java.py | 3 | 6770 | from translate import Translator
import json
import unittest
class TestJavaTranslator(unittest.TestCase):
def setUp(self):
with open("targets/java.json", "r") as targetFile:
self.translator = Translator(json.load(targetFile))
def test_translateProgram(self):
"""
CSVFile trainf("train.dat")
RealFeatures feats_train(trainf)
CSVFile testf("test.dat")
Translates to:
import org.shogun.*;
import org.jblas.*;
class MyExample {
static {
System.loadLibrary("modshogun");
}
public static void main(String argv[]) {
modshogun.init_shogun_with_defaults();
CSVFile trainf = new CSVFile("train.dat");
RealFeatures feats_train = new RealFeatures(trainf);
CSVFile testf = new CSVFile("test.dat");
}
}
"""
programAST = [
{"Statement": {"Init": [{"ObjectType": "CSVFile"}, {"Identifier": "trainf"},{"ArgumentList": {"Expr": {"StringLiteral": "train.dat"}}}]}},
{"Statement": {"Init": [{"ObjectType": "RealFeatures"}, {"Identifier": "feats_train"}, {"ArgumentList": {"Expr": {"Identifier": "trainf"}}}]}},
{"Statement": {"Init": [{"ObjectType": "CSVFile"}, {"Identifier": "testf"}, {"ArgumentList": {"Expr": {"StringLiteral": "test.dat"}}}]}}
]
translation = self.translator.translateProgram(programAST, "MyExample")
self.assertEqual(translation, u"import org.shogun.*;\nimport org.jblas.*;\n\nclass MyExample {\nstatic {\nSystem.loadLibrary(\"modshogun\");\n}\n\npublic static void main(String argv[]) {\nmodshogun.init_shogun_with_defaults();\n\nCSVFile trainf = new CSVFile(\"train.dat\");\nRealFeatures feats_train = new RealFeatures(trainf);\nCSVFile testf = new CSVFile(\"test.dat\");\n\n}\n}\n")
def test_translateProgramWithNewlines(self):
programAST = [
{"Statement": {"Init": [{"ObjectType": "CSVFile"}, {"Identifier": "trainf"},{"ArgumentList": {"Expr": {"StringLiteral": "train.dat"}}}]}},
{"Statement": "\n"},
{"Statement": {"Init": [{"ObjectType": "RealFeatures"}, {"Identifier": "feats_train"}, {"ArgumentList": {"Expr": {"Identifier": "trainf"}}}]}},
{"Statement": "\n"},
{"Statement": {"Init": [{"ObjectType": "CSVFile"}, {"Identifier": "testf"}, {"ArgumentList": {"Expr": {"StringLiteral": "test.dat"}}}]}}
]
translation = self.translator.translateProgram(programAST, "MyExample")
self.assertEqual(translation, u"import org.shogun.*;\nimport org.jblas.*;\n\nclass MyExample {\nstatic {\nSystem.loadLibrary(\"modshogun\");\n}\n\npublic static void main(String argv[]) {\nmodshogun.init_shogun_with_defaults();\n\nCSVFile trainf = new CSVFile(\"train.dat\");\n\nRealFeatures feats_train = new RealFeatures(trainf);\n\nCSVFile testf = new CSVFile(\"test.dat\");\n\n}\n}\n")
def test_translateInitCopy(self):
initAST = [
{"ObjectType": "IntMatrix"},
{"Identifier": "multiple_k"},
{"Expr": {"MethodCall": [
{"Identifier": "knn"},
{"Identifier": "classify_for_multiple_k"}
]}}
]
translation = self.translator.translateInit(initAST)
self.assertEqual(translation, u"DoubleMatrix multiple_k = knn.classify_for_multiple_k()")
def test_translateInitConstruct(self):
initAST = [
{"ObjectType": "MulticlassLabels"},
{"Identifier": "labels"},
{"ArgumentList": {
"Expr": {"Identifier": "train_labels"}
}}
]
translation = self.translator.translateInit(initAST)
self.assertEqual(translation, u"MulticlassLabels labels = new MulticlassLabels(train_labels)")
def test_translateInitConstructMultiple(self):
initAST = [
{"ObjectType": "EuclideanDistance"},
{"Identifier": "distance"},
{"ArgumentList": [
{"Expr": {"Identifier": "feats_train"}},
{"Expr": {"Identifier": "feats_test"}}
]}
]
translation = self.translator.translateInit(initAST)
self.assertEqual(translation, u"EuclideanDistance distance = new EuclideanDistance(feats_train, feats_test)")
def test_translateStatementAssign(self):
stmtAST = {
"Assign": [
{"Identifier": "knn_train"},
{"Expr":
{"BoolLiteral": "False"}
}
]
}
translation = self.translator.translateStatement(stmtAST)
self.assertEqual(translation, u"knn_train = false;\n")
def test_translateStatementExpr(self):
stmtAST = {
"Expr": {
"MethodCall": [
{"Identifier": "knn"},
{"Identifier": "train"}
]
}
}
translation = self.translator.translateStatement(stmtAST)
self.assertEqual(translation, u"knn.train();\n")
def test_translateStatementNewLine(self):
stmtAST = "\n"
translation = self.translator.translateStatement(stmtAST)
self.assertEqual(translation, u"\n")
def test_translateStatementPrint(self):
stmtAST = {
"Print": {"Expr": {"Identifier": "multiple_k"}}
}
translation = self.translator.translateStatement(stmtAST)
self.assertEqual(translation, u"System.out.println(multiple_k);\n")
def test_translateType(self):
typeAST = {
"ObjectType": "IntMatrix"
}
translation = self.translator.translateType(typeAST)
self.assertEqual(translation, u"DoubleMatrix")
def test_translateExprEnum(self):
enumAST = {
"Enum": [{"Identifier":"LIBLINEAR_SOLVER_TYPE"}, {"Identifier": "L2R_L2LOSS_SVC_DUAL"}]
}
translation = self.translator.translateExpr(enumAST)
self.assertEqual(translation, u"L2R_L2LOSS_SVC_DUAL")
self.assertTrue((u"LIBLINEAR_SOLVER_TYPE", u"L2R_L2LOSS_SVC_DUAL") in self.translator.dependencies["Enums"])
def test_translateProgramComment(self):
programAST = [
{"Comment": " This is a comment"}
]
translation = self.translator.translateProgram(programAST, "MyExample")
trueTranslation = u"import org.shogun.*;\nimport org.jblas.*;\n\nclass MyExample {\nstatic {\nSystem.loadLibrary(\"modshogun\");\n}\n\npublic static void main(String argv[]) {\nmodshogun.init_shogun_with_defaults();\n\n// This is a comment\n\n}\n}\n"
self.assertEqual(translation, trueTranslation)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 2,583,687,988,764,884,000 | 41.3125 | 397 | 0.588183 | false |
RealKinetic/locust_k8s | docker/locust-tasks/locustfile.py | 2 | 1096 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
self.client.post("/login", {"username":"ellen_key", "password":"education"})
@task(2)
def index(self):
self.client.get("/")
@task(1)
def profile(self):
self.client.get("/profile")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000
| apache-2.0 | 1,910,149,797,277,030,400 | 28.621622 | 84 | 0.688869 | false |
tswast/google-cloud-python | bigquery/samples/table_insert_rows_explicit_none_insert_ids.py | 1 | 1343 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def table_insert_rows_explicit_none_insert_ids(table_id):
# [START bigquery_table_insert_rows_explicit_none_insert_ids]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the model to fetch.
# table_id = "your-project.your_dataset.your_table"
table = client.get_table(table_id) # Make an API request.
rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)]
errors = client.insert_rows(
table, rows_to_insert, row_ids=[None] * len(rows_to_insert)
) # Make an API request.
if errors == []:
print("New rows have been added.")
# [END bigquery_table_insert_rows_explicit_none_insert_ids]
| apache-2.0 | 7,757,110,027,831,912,000 | 36.305556 | 77 | 0.707372 | false |
slinderman/pyhawkes | pyhawkes/internals/weights.py | 1 | 24982 | import numpy as np
from scipy.special import gammaln, psi
from scipy.special import logsumexp
from joblib import Parallel, delayed
from pybasicbayes.abstractions import GibbsSampling, MeanField, MeanFieldSVI
from pyhawkes.internals.distributions import Bernoulli, Gamma
from pyhawkes.utils.utils import logistic, logit
class SpikeAndSlabGammaWeights(GibbsSampling):
"""
Encapsulates the KxK Bernoulli adjacency matrix and the
KxK gamma weight matrix. Implements Gibbs sampling given
the parent variables.
"""
def __init__(self, model, parallel_resampling=True):
"""
Initialize the spike-and-slab gamma weight model with either a
network object containing the prior or rho, alpha, and beta to
define an independent model.
"""
self.model = model
self.K = model.K
# assert isinstance(network, GibbsNetwork), "network must be a GibbsNetwork object"
self.network = model.network
# Specify whether or not to resample the columns of A in parallel
self.parallel_resampling = parallel_resampling
# Initialize parameters A and W
self.A = np.ones((self.K, self.K))
self.W = np.zeros((self.K, self.K))
self.resample()
@property
def W_effective(self):
return self.A * self.W
def log_likelihood(self, x):
"""
Compute the log likelihood of the given A and W
:param x: an (A,W) tuple
:return:
"""
A,W = x
assert isinstance(A, np.ndarray) and A.shape == (self.K,self.K), \
"A must be a KxK adjacency matrix"
assert isinstance(W, np.ndarray) and W.shape == (self.K,self.K), \
"W must be a KxK weight matrix"
# LL of A
rho = np.clip(self.network.P, 1e-32, 1-1e-32)
ll = (A * np.log(rho) + (1-A) * np.log(1-rho)).sum()
ll = np.nan_to_num(ll)
# Get the shape and scale parameters from the network model
kappa = self.network.kappa
v = self.network.V
# Add the LL of the gamma weights
lp_W = kappa * np.log(v) - gammaln(kappa) + \
(kappa-1) * np.log(W) - v * W
ll += (A*lp_W).sum()
return ll
def log_probability(self):
return self.log_likelihood((self.A, self.W))
def rvs(self,size=[]):
A = np.random.rand(self.K, self.K) < self.network.P
W = np.random.gamma(self.network.kappa, 1.0/self.network.V,
size(self.K, self.K))
return A,W
def _joint_resample_A_W(self):
"""
Not sure how to do this yet, but it would be nice to resample A
from its marginal distribution after integrating out W, and then
sample W | A.
:return:
"""
raise NotImplementedError()
def _joblib_resample_A_given_W(self, data):
"""
Resample A given W. This must be immediately followed by an
update of z | A, W. This version uses joblib to parallelize
over columns of A.
:return:
"""
# Use the module trick to avoid copying globals
import pyhawkes.internals.parallel_adjacency_resampling as par
par.model = self.model
par.data = data
par.K = self.model.K
if len(data) == 0:
self.A = np.random.rand(self.K, self.K) < self.network.P
return
# We can naively parallelize over receiving neurons, k2
# To avoid serializing and copying the data object, we
# manually extract the required arrays Sk, Fk, etc.
A_cols = Parallel(n_jobs=-1, backend="multiprocessing")(
delayed(par._resample_column_of_A)(k2)for k2 in range(self.K))
self.A = np.array(A_cols).T
def _resample_A_given_W(self, data):
"""
Resample A given W. This must be immediately followed by an
update of z | A, W.
:return:
"""
p = self.network.P
for k1 in range(self.K):
for k2 in range(self.K):
if self.model is None:
ll0 = 0
ll1 = 0
else:
# Compute the log likelihood of the events given W and A=0
self.A[k1,k2] = 0
ll0 = sum([d.log_likelihood_single_process(k2) for d in data])
# Compute the log likelihood of the events given W and A=1
self.A[k1,k2] = 1
ll1 = sum([d.log_likelihood_single_process(k2) for d in data])
# Sample A given conditional probability
lp0 = ll0 + np.log(1.0 - p[k1,k2])
lp1 = ll1 + np.log(p[k1,k2])
Z = logsumexp([lp0, lp1])
# ln p(A=1) = ln (exp(lp1) / (exp(lp0) + exp(lp1)))
# = lp1 - ln(exp(lp0) + exp(lp1))
# = lp1 - Z
self.A[k1,k2] = np.log(np.random.rand()) < lp1 - Z
def resample_W_given_A_and_z(self, data=[]):
"""
Resample the weights given A and z.
:return:
"""
ss = np.zeros((2, self.K, self.K)) + \
sum([d.compute_weight_ss() for d in data])
# Account for whether or not a connection is present in N
ss[1] *= self.A
kappa_post = self.network.kappa + ss[0]
v_post = self.network.V + ss[1 ]
self.W = np.atleast_1d(np.random.gamma(kappa_post, 1.0/v_post)).reshape((self.K, self.K))
def resample(self, data=[]):
"""
Resample A and W given the parents
:param N: A length-K vector specifying how many events occurred
on each of the K processes
:param Z: A TxKxKxB array of parent assignment counts
"""
# Resample W | A
self.resample_W_given_A_and_z(data)
# Resample A given W
if self.parallel_resampling:
self._joblib_resample_A_given_W(data)
else:
self._resample_A_given_W(data)
class GammaMixtureWeights(GibbsSampling, MeanField, MeanFieldSVI):
"""
For variational inference we approximate the spike at zero with a smooth
Gamma distribution that has infinite density at zero.
"""
def __init__(self, model, kappa_0=0.1, nu_0=10.0):
"""
Initialize the spike-and-slab gamma weight model with either a
network object containing the prior or rho, alpha, and beta to
define an independent model.
:param K: Number of processes
:param network: Pointer to a network object exposing rho, alpha, and beta
:param kappa_1: Shape for weight distribution
:param kappa_0: Shape for gamma spike (small)
:param nu_0: Scale for gamma spike (large)
"""
self.model = model
self.network = model.network
self.K = model.K
self.network = model.network
assert model.network is not None, "A network object must be given"
# Save gamma parameters
self.kappa_0 = kappa_0
self.nu_0 = nu_0
# Initialize the variational parameters to the prior mean
# Variational probability of edge
# self.mf_p = network.P * np.ones((self.K, self.K))
self.mf_p = np.ones((self.K, self.K)) - 1e-3
# Variational weight distribution given that there is no edge
self.mf_kappa_0 = self.kappa_0 * np.ones((self.K, self.K))
self.mf_v_0 = self.nu_0 * np.ones((self.K, self.K))
# Variational weight distribution given that there is an edge
self.mf_kappa_1 = self.network.kappa * np.ones((self.K, self.K))
# self.mf_v_1 = network.alpha / network.beta * np.ones((self.K, self.K))
self.mf_v_1 = self.network.V * np.ones((self.K, self.K))
# Initialize parameters A and W
self.A = np.ones((self.K, self.K))
self.W = np.zeros((self.K, self.K))
self.resample()
@property
def W_effective(self):
return self.W
def log_likelihood(self, x):
"""
Compute the log likelihood of the given A and W
:param x: an (A,W) tuple
:return:
"""
A,W = x
assert isinstance(A, np.ndarray) and A.shape == (self.K,self.K), \
"A must be a KxK adjacency matrix"
assert isinstance(W, np.ndarray) and W.shape == (self.K,self.K), \
"W must be a KxK weight matrix"
# LL of A
rho = self.network.P
lp_A = (A * np.log(rho) + (1-A) * np.log(1-rho))
# Get the shape and scale parameters from the network model
kappa = self.network.kappa
v = self.network.V
# Add the LL of the gamma weights
# lp_W = np.zeros((self.K, self.K))
# lp_W = A * (kappa * np.log(v) - gammaln(kappa)
# + (kappa-1) * np.log(W) - v * W)
lp_W0 = (self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0)
+ (self.kappa_0-1) * np.log(W) - self.nu_0 * W)[A==0]
lp_W1 = (kappa * np.log(v) - gammaln(kappa)
+ (kappa-1) * np.log(W) - v * W)[A==1]
# lp_W = A * (kappa * np.log(v) - gammaln(kappa)
# + (kappa-1) * np.log(W) - v * W) + \
# (1-A) * (self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0)
# + (self.kappa_0-1) * np.log(W) - self.nu_0 * W)
ll = lp_A.sum() + lp_W0.sum() + lp_W1.sum()
return ll
def log_probability(self):
return self.log_likelihood((self.A, self.W))
def rvs(self,size=[]):
raise NotImplementedError()
def expected_A(self):
return self.mf_p
def expected_W(self):
"""
Compute the expected W under the variational approximation
"""
p_A = self.expected_A()
E_W = p_A * self.expected_W_given_A(1.0) + (1-p_A) * self.expected_W_given_A(0.0)
if not self.network.allow_self_connections:
np.fill_diagonal(E_W, 0.0)
return E_W
def expected_W_given_A(self, A):
"""
Compute the expected W given A under the variational approximation
:param A: Either zero or 1
"""
return A * (self.mf_kappa_1 / self.mf_v_1) + \
(1.0 - A) * (self.mf_kappa_0 / self.mf_v_0)
def std_A(self):
"""
Compute the standard deviation of A
:return:
"""
return np.sqrt(self.mf_p * (1-self.mf_p))
def expected_log_W(self):
"""
Compute the expected log W under the variational approximation
"""
p_A = self.expected_A()
E_ln_W = p_A * self.expected_log_W_given_A(1.0) + \
(1-p_A) * self.expected_log_W_given_A(0.0)
if not self.network.allow_self_connections:
np.fill_diagonal(E_ln_W, -np.inf)
return E_ln_W
def expected_log_W_given_A(self, A):
"""
Compute the expected log W given A under the variational approximation
"""
return A * (psi(self.mf_kappa_1) - np.log(self.mf_v_1)) + \
(1.0 - A) * (psi(self.mf_kappa_0) - np.log(self.mf_v_0))
def expected_log_likelihood(self,x):
raise NotImplementedError()
def meanfieldupdate_p(self, stepsize=1.0):
"""
Update p given the network parameters and the current variational
parameters of the weight distributions.
:return:
"""
logit_p = self.network.expected_log_p() - self.network.expected_log_notp()
logit_p += self.network.kappa * self.network.expected_log_v() - gammaln(self.network.kappa)
logit_p += gammaln(self.mf_kappa_1) - self.mf_kappa_1 * np.log(self.mf_v_1)
logit_p += gammaln(self.kappa_0) - self.kappa_0 * np.log(self.nu_0)
logit_p += self.mf_kappa_0 * np.log(self.mf_v_0) - gammaln(self.mf_kappa_0)
# p_hat = logistic(logit_p)
# self.mf_p = (1.0 - stepsize) * self.mf_p + stepsize * p_hat
logit_p_hat = (1-stepsize) * logit(self.mf_p) + \
stepsize * logit_p
# self.mf_p = logistic(logit_p_hat)
self.mf_p = np.clip(logistic(logit_p_hat), 1e-8, 1-1e-8)
def meanfieldupdate_kappa_v(self, data=[], minibatchfrac=1.0, stepsize=1.0):
"""
Update the variational weight distributions
:return:
"""
exp_ss = sum([d.compute_exp_weight_ss() for d in data])
# kappa' = kappa + \sum_t \sum_b z[t,k,k',b]
kappa0_hat = self.kappa_0 + exp_ss[0] / minibatchfrac
kappa1_hat = self.network.kappa + exp_ss[0] / minibatchfrac
self.mf_kappa_0 = (1.0 - stepsize) * self.mf_kappa_0 + stepsize * kappa0_hat
self.mf_kappa_1 = (1.0 - stepsize) * self.mf_kappa_1 + stepsize * kappa1_hat
# v_0'[k,k'] = self.nu_0 + N[k]
v0_hat = self.nu_0 * np.ones((self.K, self.K)) + exp_ss[1] / minibatchfrac
self.mf_v_0 = (1.0 - stepsize) * self.mf_v_0 + stepsize * v0_hat
# v_1'[k,k'] = E[v[k,k']] + N[k]
v1_hat = self.network.expected_v() + exp_ss[1] / minibatchfrac
self.mf_v_1 = (1.0 - stepsize) * self.mf_v_1 + stepsize * v1_hat
def meanfieldupdate(self, data=[]):
self.meanfieldupdate_kappa_v(data)
self.meanfieldupdate_p()
def meanfield_sgdstep(self, data, minibatchfrac,stepsize):
self.meanfieldupdate_kappa_v(data, minibatchfrac=minibatchfrac, stepsize=stepsize)
self.meanfieldupdate_p(stepsize=stepsize)
def get_vlb(self):
"""
Variational lower bound for A_kk' and W_kk'
E[LN p(A_kk', W_kk' | p, kappa, v)] -
E[LN q(A_kk', W_kk' | mf_p, mf_kappa, mf_v)]
:return:
"""
vlb = 0
# First term:
# E[LN p(A | p)]
E_A = self.expected_A()
E_notA = 1.0 - E_A
E_ln_p = self.network.expected_log_p()
E_ln_notp = self.network.expected_log_notp()
vlb += Bernoulli().negentropy(E_x=E_A, E_notx=E_notA,
E_ln_p=E_ln_p, E_ln_notp=E_ln_notp).sum()
# E[LN p(W | A=1, kappa, v)]
kappa = self.network.kappa
E_v = self.network.expected_v()
E_ln_v = self.network.expected_log_v()
E_W1 = self.expected_W_given_A(A=1)
E_ln_W1 = self.expected_log_W_given_A(A=1)
vlb += (E_A * Gamma(kappa).negentropy(E_beta=E_v, E_ln_beta=E_ln_v,
E_lambda=E_W1, E_ln_lambda=E_ln_W1)).sum()
# E[LN p(W | A=0, kappa0, v0)]
kappa0 = self.kappa_0
v0 = self.nu_0
E_W0 = self.expected_W_given_A(A=0)
E_ln_W0 = self.expected_log_W_given_A(A=0)
vlb += (E_notA * Gamma(kappa0, v0).negentropy(E_lambda=E_W0, E_ln_lambda=E_ln_W0)).sum()
# Second term
# E[LN q(A)]
vlb -= Bernoulli(self.mf_p).negentropy().sum()
# E[LN q(W | A=1)]
vlb -= (E_A * Gamma(self.mf_kappa_1, self.mf_v_1).negentropy()).sum()
vlb -= (E_notA * Gamma(self.mf_kappa_0, self.mf_v_0).negentropy()).sum()
return vlb
def resample_from_mf(self):
"""
Resample from the mean field distribution
:return:
"""
self.A = np.random.rand(self.K, self.K) < self.mf_p
self.W = (1-self.A) * np.random.gamma(self.mf_kappa_0, 1.0/self.mf_v_0)
self.W += self.A * np.random.gamma(self.mf_kappa_1, 1.0/self.mf_v_1)
def resample(self, data=[]):
ss = np.zeros((2, self.K, self.K)) + \
sum([d.compute_weight_ss() for d in data])
# First resample A from its marginal distribution after integrating out W
self._resample_A(ss)
# Then resample W given A
self._resample_W_given_A(ss)
def _resample_A(self, ss):
"""
Resample A from the marginal distribution after integrating out W
:param ss:
:return:
"""
p = self.network.P
v = self.network.V
kappa0_post = self.kappa_0 + ss[0,:,:]
v0_post = self.nu_0 + ss[1,:,:]
kappa1_post = self.network.kappa + ss[0,:,:]
v1_post = v + ss[1,:,:]
# Compute the marginal likelihood of A=1 and of A=0
# The result of the integral is a ratio of gamma distribution normalizing constants
lp0 = self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0)
lp0 += gammaln(kappa0_post) - kappa0_post * np.log(v0_post)
lp1 = self.network.kappa * np.log(v) - gammaln(self.network.kappa)
lp1 += gammaln(kappa1_post) - kappa1_post * np.log(v1_post)
# Add the prior and normalize
lp0 = lp0 + np.log(1.0 - p)
lp1 = lp1 + np.log(p)
Z = logsumexp(np.concatenate((lp0[:,:,None], lp1[:,:,None]),
axis=2),
axis=2)
# ln p(A=1) = ln (exp(lp1) / (exp(lp0) + exp(lp1)))
# = lp1 - ln(exp(lp0) + exp(lp1))
# = lp1 - Z
self.A = np.log(np.random.rand(self.K, self.K)) < lp1 - Z
def _resample_W_given_A(self, ss):
# import pdb; pdb.set_trace()
kappa_prior = self.kappa_0 * (1-self.A) + self.network.kappa * self.A
kappa_cond = kappa_prior + ss[0,:,:]
v_prior = self.nu_0 * (1-self.A) + self.network.V * self.A
v_cond = v_prior + ss[1,:,:]
# Resample W from its gamma conditional
self.W = np.array(np.random.gamma(kappa_cond, 1.0/v_cond)).\
reshape((self.K, self.K))
self.W = np.clip(self.W, 1e-32, np.inf)
def initialize_from_gibbs(self, A, W, scale=100):
"""
Initialize from a Gibbs sample
:param A: Given adjacency matrix
:param W: Given weight matrix
:return:
"""
# Set mean field probability of connection to conf if A==1
# and (1-conf) if A == 0
conf = 0.95
self.mf_p = conf * A + (1-conf) * (1-A)
# Set variational weight distribution
self.mf_kappa_0 = self.kappa_0
self.mf_v_0 = self.nu_0
self.mf_kappa_1 = scale * W
self.mf_v_1 = scale
class SpikeAndSlabContinuousTimeGammaWeights(GibbsSampling):
"""
Implementation of spike and slab gamma weights from L&A 2014
"""
def __init__(self, model, parallel_resampling=True):
self.model = model
self.network = model.network
self.K = self.model.K
# Specify whether or not to resample the columns of A in parallel
self.parallel_resampling = parallel_resampling
# Initialize parameters A and W
self.A = np.ones((self.K, self.K))
self.W = np.zeros((self.K, self.K))
self.resample()
def log_likelihood(self,x):
raise NotImplementedError
def log_probability(self):
return 0
def rvs(self,size=[]):
raise NotImplementedError
@property
def W_effective(self):
return self.A * self.W
def _compute_weighted_impulses_at_events_manual(self, data):
# Compute the instantaneous rate at the individual events
# Sum over potential parents.
# TODO: Call cython function to evaluate instantaneous rate
N, S, C, Z, dt_max = data.N, data.S, data.C, data.Z, self.model.dt_max
W = self.W
# Initialize matrix of weighted impulses from each process
lmbda = np.zeros((self.K, N))
for n in range(N):
# First parent is just the background rate of this process
# lmbda[self.K, n] += lambda0[C[n]]
# Iterate backward from the most recent to compute probabilities of each parent spike
for par in range(n-1, -1, -1):
dt = S[n] - S[par]
if dt == 0:
continue
# Since the spikes are sorted, we can stop if we reach a potential
# parent that occurred greater than dt_max in the past
if dt >= dt_max:
break
lmbda[C[par], n] += W[C[par], C[n]] * self.model.impulse_model.impulse(dt, C[par], C[n])
return lmbda
def _compute_weighted_impulses_at_events(self, data):
from pyhawkes.internals.continuous_time_helpers import \
compute_weighted_impulses_at_events
N, S, C, Z, dt_max = data.N, data.S, data.C, data.Z, self.model.dt_max
W = self.W
mu, tau = self.model.impulse_model.mu, self.model.impulse_model.tau
lmbda = np.zeros((N, self.K))
compute_weighted_impulses_at_events(S, C, Z, dt_max, W, mu, tau, lmbda)
return lmbda
def _resample_A_given_W(self, data):
"""
Resample A given W. This must be immediately followed by an
update of z | A, W.
:return:
"""
# Precompute weightedi impulse responses for each event
lmbda_irs = [self._compute_weighted_impulses_at_events(d) for d in data]
# lmbda_irs_manual = [self._compute_weighted_impulses_at_events_manual(d) for d in data]
# for l1,l2 in zip(lmbda_irs_manual, lmbda_irs):
# assert np.allclose(l1,l2)
lmbda0 = self.model.lambda0
def _log_likelihood_single_process(k):
ll = 0
for lmbda_ir, d in zip(lmbda_irs, data):
Ns, C, T = d.Ns, d.C, d.T
# - \int lambda_k(t) dt
ll -= lmbda0[k] * T
ll -= self.W_effective[:,k].dot(Ns)
# + \sum_n log(lambda(s_n))
ll += np.log(lmbda0[k] + np.sum(self.A[:,k] * lmbda_ir[C==k,:], axis=1)).sum()
return ll
# TODO: Write a Cython function to sample this more efficiently
p = self.network.P
for k1 in range(self.K):
# sys.stdout.write('.')
# sys.stdout.flush()
for k2 in range(self.K):
# Handle deterministic cases
if p[k1,k2] == 0.:
self.A[k1,k2] = 0
continue
if p[k1,k2] == 1.:
self.A[k1,k2] = 1
continue
# Compute the log likelihood of the events given W and A=0
self.A[k1,k2] = 0
ll0 = _log_likelihood_single_process(k2)
# Compute the log likelihood of the events given W and A=1
self.A[k1,k2] = 1
ll1 = _log_likelihood_single_process(k2)
# Sample A given conditional probability
lp0 = ll0 + np.log(1.0 - p[k1,k2])
lp1 = ll1 + np.log(p[k1,k2])
Z = logsumexp([lp0, lp1])
self.A[k1,k2] = np.log(np.random.rand()) < lp1 - Z
# sys.stdout.write('\n')
# sys.stdout.flush()
def _joblib_resample_A_given_W(self, data):
"""
Resample A given W. This must be immediately followed by an
update of z | A, W. This version uses joblib to parallelize
over columns of A.
:return:
"""
# Use the module trick to avoid copying globals
import pyhawkes.internals.parallel_adjacency_resampling as par
par.model = self.model
par.data = data
par.lambda_irs = [par._compute_weighted_impulses_at_events(d) for d in data]
if len(data) == 0:
self.A = np.random.rand(self.K, self.K) < self.network.P
return
# We can naively parallelize over receiving neurons, k2
# To avoid serializing and copying the data object, we
# manually extract the required arrays Sk, Fk, etc.
A_cols = Parallel(n_jobs=-1, backend="multiprocessing")(
delayed(par._ct_resample_column_of_A)(k2) for k2 in range(self.K))
self.A = np.array(A_cols).T
def resample_W_given_A_and_z(self, N, Zsum):
"""
Resample the weights given A and z.
:return:
"""
kappa_post = self.network.kappa + Zsum
v_post = self.network.V + N[:,None] * self.A
self.W = np.array(np.random.gamma(kappa_post, 1.0/v_post)).reshape((self.K, self.K))
def resample(self, data=[]):
"""
Resample A and W given the parents
:param N: A length-K vector specifying how many events occurred
on each of the K processes
:param Z: A TxKxKxB array of parent assignment counts
"""
assert isinstance(data, list)
# Compute sufficient statistics
N = np.zeros((self.K,))
Zsum = np.zeros((self.K, self.K))
for d in data:
Zsum += d.weight_ss
N += d.Ns
# Resample W | A, Z
self.resample_W_given_A_and_z(N, Zsum)
# Resample A | W
if self.parallel_resampling:
self._joblib_resample_A_given_W(data)
else:
self._resample_A_given_W(data)
| mit | -8,214,192,277,978,752,000 | 35.153401 | 104 | 0.544872 | false |
gratefulfrog/ArduGuitar | Ardu2/design/POC-3_MAX395/pyboard/V1_WithHMI/pyboard_no_debug/csv.py | 2 | 5862 | # csv.py
# my implementation of csv reading and writing
"""
READING interface:
try:
with open(self.filePath, 'r') as csvfile:
#print "opened file: " + self.filePath
reader = csv.Reader(csvfile)
self.header = next(reader)
for row in reader:
self.rowDict2confDict(row)
WRITING Interface:
with open(self.filePath, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames = self.conf.Vocab.headings,
delimiter=',')
#quotechar="'",
#quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.header)
for p in self.presets.keys():
writer.writerow(self.confDict2RowDict(p,self.presets[p]))
"""
class CSV():
class Reader():
"""
usage:
f=open('aFile.csv', 'r')
reader = Reader(f)
header = next(reader)
lines = [l for l in reader]
f.close()
or
with open('aFile.csv', 'r') as f:
reader = Reader(f)
header = next(reader)
lines = [l for l in reader]
"""
def __init__(self,openFile,delimeter=',',quote='"'):
# read rows, return list of cells, quoting if needed,
# all cells are read and returned as strings
self.openFile = openFile
self.delimeter = delimeter
self.quote = quote
def __iter__(self):
return self
def __next__(self):
line = self.openFile.readline()
if line == '':
raise StopIteration
return self.parse(line)
def parse(self,line):
#print(line)
return self.parseIter(line,current='',mode=0,res=[])
def parseIter(self,line,current,mode,res):
"""
mode=0
current = ''
0: looking for any character or eol
: if c==delimeter, append current, loop
: if c==quote, mode<-1, loop
: if eol, append current, return res
: else current+=c, mode<-2, loop
1: looking for closing quote
: if eol: raise missing closing quote error
: if c==quote, mode<-3, loop
: else, current+=c, loop
2: reading chars, looking for delimeter or eol
: if eol: append current, return
: if c==delimeter, append current, current<-'', mode<-0, loop
: else: current+=c, loop
3: finished a quoted expr, need delimeter
: if c != delimeter: raise missing delimeter error
: else, append current, current<-'', mode<-0,loop
"""
for c in line:
if c=='\r':
continue
if mode==0:
if c=='\n':
res.append(current)
return res
if c == self.delimeter:
res.append(current)
current=''
continue
elif c==self.quote:
mode = 1
continue
else:
current+=c
mode=2
continue
elif mode==1:
if c=='\n':
raise Exception('Missing Closing Quote!')
if c==self.quote:
mode = 3
continue
else:
current+=c
continue
elif mode==2:
if c=='\n':
res.append(current)
return res
if c==self.delimeter:
res.append(current)
current = ''
mode=0
continue
else:
current+=c
continue
elif mode==3:
if c=='\n':
res.append(current)
return res
elif c==self.delimeter:
res.append(current)
current = ''
mode=0
continue
else:
raise Exception('Found character after quote before delimeter!')
return res
class Writer():
"""
writer = csv.DictWriter(csvfile)
writer.writeRow(self.header)
for p in self.presets.keys():
writer.writerow(self.confDict2RowDict(p,self.presets[p]))
"""
def __init__(self,openFile,delimeter=',',quote='"'):
self.openFile = openFile
self.delimeter = delimeter
self.quote = quote
def writeRow(self,row):
"""
row is a list of elements, potentially contaning a delimeter!
if the elt has a delimeter, then quote it,
"""
#import gc
#print(gc.mem_free())
#print(row)
#print(row[-1])
tooFile = ''
for elt in row[:-1]:
tooFile += self.fix(elt) + self.delimeter
tooFile += self.fix(row[-1]) + '\n'
self.openFile.write(tooFile)
def fix(self,elt):
res = str(elt)
if res.find(self.delimeter) >= 0:
return '%c%s%c'%(self.quote,res,self.quote)
else:
return res
| gpl-2.0 | -5,950,035,282,991,769,000 | 33.686391 | 88 | 0.418287 | false |
mgit-at/ansible | lib/ansible/modules/network/ios/ios_user.py | 9 | 15497 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies the SSH public key to configure
for the given username. This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
from copy import deepcopy
import re
import base64
import hashlib
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
# IOS will accept a MD5 fingerprint of the public key
# and is easier to configure in a single line
# we calculate this fingerprint here
if not sshkey:
return None
if ' ' in sshkey:
# ssh-rsa AAA...== comment
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
# just the key, assume rsa type
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
command.append('key-hash %s' % x)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data):
match = re.search(r'key-hash (\S+ \S+(?: .+)?)$', data, re.M)
if match:
return match.group(1)
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| section username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
sshregex = r'username %s\n\s+key-hash .+$' % user
sshcfg = re.findall(sshregex, data, re.M)
sshcfg = '\n'.join(sshcfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(sshcfg),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = sshkey_fingerprint(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,922,159,421,067,481,000 | 30.691207 | 110 | 0.62554 | false |
arne-cl/discoursegraphs | tests/test_relabel.py | 1 | 7853 | #!/usr/bin/env python
from networkx import (
convert_node_labels_to_integers, empty_graph, DiGraph, Graph, MultiDiGraph,
MultiGraph, nx)
import pytest
from discoursegraphs.relabel import relabel_nodes
"""
This module contains tests for the relabel module, which was slightly adapted
from the same module in networkx. Tests were 'translated' from nose to py.test.
"""
def assert_edges_equal(edges1, edges2):
# Assumes iterables with u,v nodes as
# edge tuples (u,v), or
# edge tuples with data dicts (u,v,d), or
# edge tuples with keys and data dicts (u,v,k, d)
from collections import defaultdict
d1 = defaultdict(dict)
d2 = defaultdict(dict)
c1 = 0
for c1,e in enumerate(edges1):
u, v = e[0], e[1]
data = e[2:]
d1[u][v] = data
d1[v][u] = data
c2 = 0
for c2, e in enumerate(edges2):
u, v = e[0], e[1]
data = e[2:]
d2[u][v] = data
d2[v][u] = data
assert c1 == c2
assert d1 == d2
@pytest.mark.xfail
def test_convert_node_labels_to_integers():
"""test stopped working after converting it from nose -> pytest
TypeError: 'int' object is not iterable
"""
# test that empty graph converts fine for all options
G = empty_graph()
H = convert_node_labels_to_integers(G, 100)
assert H.name == '(empty_graph(0))_with_int_labels'
assert list(H.nodes()) == []
assert list(H.edges()) == []
for opt in ["default", "sorted", "increasing degree",
"decreasing degree"]:
G = empty_graph()
H = convert_node_labels_to_integers(G, 100, ordering=opt)
assert H.name == '(empty_graph(0))_with_int_labels'
assert list(H.nodes()) == []
assert list(H.edges()) == []
G = empty_graph()
G.add_edges_from([('A','B'), ('A','C'), ('B','C'), ('C','D')])
G.name="paw"
H = convert_node_labels_to_integers(G)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
H = convert_node_labels_to_integers(G, 1000)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert list(H.nodes()) == [1000, 1001, 1002, 1003]
H = convert_node_labels_to_integers(G, ordering="increasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert degree(H, 0) == 1
assert degree(H, 1) == 2
assert degree(H, 2) == 2
assert degree(H, 3) == 3
H = convert_node_labels_to_integers(G,ordering="decreasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert degree(H,0) == 3
assert degree(H,1) == 2
assert degree(H,2) == 2
assert degree(H,3) == 1
H = convert_node_labels_to_integers(G,ordering="increasing degree",
label_attribute='label')
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert degree(H,0) == 1
assert degree(H,1) == 2
assert degree(H,2) == 2
assert degree(H,3) == 3
# check mapping
assert H.node[3]['label'] == 'C'
assert H.node[0]['label'] == 'D'
assert (H.node[1]['label'] == 'A' or H.node[2]['label'] == 'A')
assert (H.node[1]['label'] == 'B' or H.node[2]['label'] == 'B')
@pytest.mark.xfail
def test_convert_to_integers2():
"""test stopped working after converting it from nose -> pytest
TypeError: 'int' object is not iterable
"""
G = empty_graph()
G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
G.name="paw"
H = convert_node_labels_to_integers(G,ordering="sorted")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
H = convert_node_labels_to_integers(G,ordering="sorted",
label_attribute='label')
assert H.node[0]['label'] == 'A'
assert H.node[1]['label'] == 'B'
assert H.node[2]['label'] == 'C'
assert H.node[3]['label'] == 'D'
def test_convert_to_integers_raise():
G = nx.Graph()
with pytest.raises(nx.NetworkXError) as excinfo:
H=convert_node_labels_to_integers(G, ordering="increasing age")
@pytest.mark.xfail
def test_relabel_nodes_copy():
"""failed after switching to dg.relabel_nodes"""
G = empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H = relabel_nodes(G,mapping)
assert sorted(H.nodes()) == ['aardvark', 'bear', 'cat', 'dog']
@pytest.mark.xfail
def test_relabel_nodes_function():
"""failed after switching to dg.relabel_nodes"""
G = empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
# function mapping no longer encouraged but works
def mapping(n):
return ord(n)
H = relabel_nodes(G,mapping)
assert sorted(H.nodes()) == [65, 66, 67, 68]
@pytest.mark.xfail
def test_relabel_nodes_graph():
"""failed after switching to dg.relabel_nodes"""
G = Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping = {'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H = relabel_nodes(G,mapping)
assert sorted(H.nodes()) == ['aardvark', 'bear', 'cat', 'dog']
@pytest.mark.xfail
def test_relabel_nodes_digraph():
"""failed after switching to dg.relabel_nodes"""
G = DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping = {'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H = relabel_nodes(G,mapping,copy=False)
assert sorted(H.nodes()) == ['aardvark', 'bear', 'cat', 'dog']
@pytest.mark.xfail
def test_relabel_nodes_multigraph():
"""failed after switching to dg.relabel_nodes"""
G = MultiGraph([('a','b'),('a','b')])
mapping = {'a':'aardvark','b':'bear'}
G = relabel_nodes(G,mapping,copy=False)
assert sorted(G.nodes()) == ['aardvark', 'bear']
assert_edges_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
@pytest.mark.xfail
def test_relabel_nodes_multidigraph():
"""failed after switching to dg.relabel_nodes"""
G = MultiDiGraph([('a','b'),('a','b')])
mapping = {'a':'aardvark','b':'bear'}
G = relabel_nodes(G,mapping,copy=False)
assert sorted(G.nodes()) == ['aardvark', 'bear']
assert sorted(G.edges()) == [('aardvark', 'bear'), ('aardvark', 'bear')]
@pytest.mark.xfail
def test_relabel_isolated_nodes_to_same():
"""failed after switching to dg.relabel_nodes"""
G = Graph()
G.add_nodes_from(range(4))
mapping = {1:1}
H = relabel_nodes(G, mapping, copy=False)
assert sorted(H.nodes()) == list(range(4))
def test_relabel_nodes_missing():
G = Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping = {0:'aardvark'}
with pytest.raises(KeyError) as excinfo:
G = relabel_nodes(G,mapping,copy=False)
def test_relabel_toposort():
K5 = nx.complete_graph(4)
G = nx.complete_graph(4)
G = nx.relabel_nodes(G, {i: i+1 for i in range(4)}, copy=False)
assert nx.is_isomorphic(K5,G)
G = nx.complete_graph(4)
G = nx.relabel_nodes(G, {i: i-1 for i in range(4)}, copy=False)
assert nx.is_isomorphic(K5,G)
def test_relabel_selfloop():
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert sorted(G.nodes()) == ['One','Three','Two']
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert sorted(G.nodes()) == ['One','Three','Two']
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert sorted(G.nodes()) == [0]
| bsd-3-clause | 8,131,633,362,504,863,000 | 32.135021 | 79 | 0.573539 | false |
mrquim/mrquimrepo | script.module.youtube.dl/lib/youtube_dl/extractor/tinypic.py | 64 | 1896 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class TinyPicIE(InfoExtractor):
IE_NAME = 'tinypic'
IE_DESC = 'tinypic.com videos'
_VALID_URL = r'https?://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
_TESTS = [
{
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
'md5': '609b74432465364e72727ebc6203f044',
'info_dict': {
'id': '6xw7tc',
'ext': 'flv',
'title': 'shadow phenomenon weird',
},
},
{
'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id, 'Downloading page')
mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
r'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
if mobj is None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
file_id = mobj.group('fileid')
server_id = mobj.group('serverid')
KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting'
keywords = self._html_search_meta('keywords', webpage, 'title')
title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else ''
video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id)
thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id)
return {
'id': file_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title
}
| gpl-2.0 | -4,597,872,331,539,985,000 | 32.857143 | 104 | 0.53692 | false |
moyogo/defcon | Lib/defcon/objects/glyph.py | 1 | 51864 | from __future__ import absolute_import
import weakref
from warnings import warn
from fontTools.misc.arrayTools import unionRect
from defcon.objects.base import BaseObject
from defcon.objects.contour import Contour
from defcon.objects.point import Point
from defcon.objects.component import Component
from defcon.objects.anchor import Anchor
from defcon.objects.lib import Lib
from defcon.objects.guideline import Guideline
from defcon.objects.image import Image
from defcon.objects.color import Color
from defcon.tools.representations import glyphAreaRepresentationFactory
from defcon.pens.decomposeComponentPointPen import DecomposeComponentPointPen
def addRepresentationFactory(name, factory):
warn("addRepresentationFactory is deprecated. Use the functions in defcon.__init__.", DeprecationWarning)
Glyph.representationFactories[name] = dict(factory=factory, destructiveNotifications=["Glyph.Changed"])
def removeRepresentationFactory(name):
warn("removeRepresentationFactory is deprecated. Use the functions in defcon.__init__.", DeprecationWarning)
del Glyph.representationFactories[name]
class Glyph(BaseObject):
"""
This object represents a glyph and it contains contour, component, anchor
and other assorted bits data about the glyph.
**This object posts the following notifications:**
- Glyph.Changed
- Glyph.BeginUndo
- Glyph.EndUndo
- Glyph.BeginRedo
- Glyph.EndRedo
- Glyph.NameWillChange
- Glyph.NameChanged
- Glyph.UnicodesChanged
- Glyph.WidthChanged
- Glyph.HeightChanged
- Glyph.LeftMarginWillChange
- Glyph.LeftMarginDidChange
- Glyph.RightMarginWillChange
- Glyph.RightMarginDidChange
- Glyph.TopMarginWillChange
- Glyph.TopMarginDidChange
- Glyph.BottomMarginWillChange
- Glyph.BottomMarginDidChange
- Glyph.NoteChanged
- Glyph.LibChanged
- Glyph.ImageChanged
- Glyph.ImageWillBeDeleted
- Glyph.ContourWillBeAdded
- Glyph.ContourWillBeDeleted
- Glyph.ContoursChanged
- Glyph.ComponentWillBeAdded
- Glyph.ComponentWillBeDeleted
- Glyph.ComponentsChanged
- Glyph.AnchorWillBeAdded
- Glyph.AnchorWillBeDeleted
- Glyph.AnchorsChanged
- Glyph.GuidelineWillBeAdded
- Glyph.GuidelineWillBeDeleted
- Glyph.GuidelinesChanged
- Glyph.MarkColorChanged
- Glyph.VerticalOriginChanged
The Glyph object has list like behavior. This behavior allows you to interact
with contour data directly. For example, to get a particular contour::
contour = glyph[0]
To iterate over all contours::
for contour in glyph:
To get the number of contours::
contourCount = len(glyph)
To interact with components or anchors in a similar way,
use the ``components`` and ``anchors`` attributes.
"""
changeNotificationName = "Glyph.Changed"
beginUndoNotificationName = "Glyph.BeginUndo"
endUndoNotificationName = "Glyph.EndUndo"
beginRedoNotificationName = "Glyph.BeginRedo"
endRedoNotificationName = "Glyph.EndRedo"
representationFactories = {
"defcon.glyph.area" : dict(
factory=glyphAreaRepresentationFactory,
destructiveNotifications=("Glyph.ContoursChanged", "Glyph.ComponentsChanged", "Glyph.ComponentBaseGlyphDataChanged")
)
}
def __init__(self, layer=None,
contourClass=None, pointClass=None, componentClass=None, anchorClass=None,
guidelineClass=None, libClass=None, imageClass=None):
layerSet = font = None
if layer is not None:
layerSet = layer.layerSet
if layerSet is not None:
font = weakref.ref(layer.layerSet.font)
layerSet = weakref.ref(layer.layerSet)
layer = weakref.ref(layer)
self._font = font
self._layerSet = layerSet
self._layer = layer
super(Glyph, self).__init__()
self.beginSelfNotificationObservation()
self._isLoading = False
self._dirty = False
self._name = None
self._unicodes = []
self._width = 0
self._height = 0
self._note = None
self._image = None
self._identifiers = set()
self._shallowLoadedContours = None
self._contours = []
self._components = []
self._anchors = []
self._guidelines = []
self._lib = None
if contourClass is None:
contourClass = Contour
if pointClass is None:
pointClass = Point
if componentClass is None:
componentClass = Component
if anchorClass is None:
anchorClass = Anchor
if guidelineClass is None:
guidelineClass = Guideline
if libClass is None:
libClass = Lib
if imageClass is None:
imageClass = Image
self._contourClass = contourClass
self._pointClass = pointClass
self._componentClass = componentClass
self._anchorClass = anchorClass
self._guidelineClass = guidelineClass
self._libClass = libClass
self._imageClass = imageClass
def __del__(self):
super(Glyph, self).__del__()
self._contours = None
self._components = None
self._anchors = None
self._guidelines = None
self._lib = None
self._image = None
# --------------
# Parent Objects
# --------------
def getParent(self):
return self.font
def _get_font(self):
if self._font is None:
return None
return self._font()
font = property(_get_font, doc="The :class:`Font` that this glyph belongs to.")
def _get_layerSet(self):
if self._layerSet is None:
return None
return self._layerSet()
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this glyph belongs to.")
def _get_layer(self):
if self._layer is None:
return None
return self._layer()
layer = property(_get_layer, doc="The :class:`Layer` that this glyph belongs to.")
# ----------------
# Basic Attributes
# ----------------
# identifiers
def _get_identifiers(self):
return self._identifiers
identifiers = property(_get_identifiers, doc="Set of identifiers for the glyph. This is primarily for internal use.")
# name
def _set_name(self, value):
oldName = self._name
if oldName != value:
self.postNotification(notification="Glyph.NameWillChange", data=dict(oldValue=oldName, newValue=value))
self._name = value
self.postNotification(notification="Glyph.NameChanged", data=dict(oldValue=oldName, newValue=value))
self.dirty = True
def _get_name(self):
return self._name
name = property(_get_name, _set_name, doc="The name of the glyph. Setting this posts *GLyph.NameChanged* and *Glyph.NameChanged* notifications.")
# unicodes
def _get_unicodes(self):
return list(self._unicodes)
def _set_unicodes(self, value):
oldValue = self.unicodes
if oldValue != value:
self._unicodes = list(value)
self.postNotification(notification="Glyph.UnicodesChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
unicodes = property(_get_unicodes, _set_unicodes, doc="The list of unicode values assigned to the glyph. Setting this posts *Glyph.UnicodesChanged* and *Glyph.Changed* notifications.")
def _get_unicode(self):
if self._unicodes:
return self._unicodes[0]
return None
def _set_unicode(self, value):
if value is None:
self.unicodes = []
else:
self.unicodes = [value]
unicode = property(_get_unicode, _set_unicode, doc="The primary unicode value for the glyph. This is the equivalent of ``glyph.unicodes[0]``. This is a convenience attribute that works with the ``unicodes`` attribute.")
# -------
# Metrics
# -------
# bounds
def _getContourComponentBounds(self, attr):
bounds = None
subObjects = [contour for contour in self]
subObjects += [component for component in self.components]
for subObject in subObjects:
b = getattr(subObject, attr)
if b is not None:
if bounds is None:
bounds = b
else:
bounds = unionRect(bounds, b)
return bounds
def _get_bounds(self):
return self._getContourComponentBounds("bounds")
bounds = property(_get_bounds, doc="The bounds of the glyph's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
return self._getContourComponentBounds("controlPointBounds")
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the glyph. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
# area
def _get_area(self):
return self.getRepresentation("defcon.glyph.area")
area = property(_get_area, doc="The area of the glyph's outline.")
# margins
def _get_leftMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return xMin
def _set_leftMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
oldValue = xMin
diff = value - xMin
if value != oldValue:
self.postNotification(notification="Glyph.LeftMarginWillChange", data=dict(oldValue=oldValue, newValue=value))
self.move((diff, 0))
self.width += diff
self.dirty = True
self.postNotification(notification="Glyph.LeftMarginDidChange", data=dict(oldValue=oldValue, newValue=value))
leftMargin = property(_get_leftMargin, _set_leftMargin, doc="The left margin of the glyph. Setting this posts *Glyph.WidthChanged*, *Glyph.LeftMarginWillChange*, *Glyph.LeftMarginDidChange* and *Glyph.Changed* notifications among others.")
def _get_rightMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return self._width - xMax
def _set_rightMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
oldValue = self._width - xMax
if oldValue != value:
self.postNotification(notification="Glyph.RightMarginWillChange", data=dict(oldValue=oldValue, newValue=value))
self.width = xMax + value
self.dirty = True
self.postNotification(notification="Glyph.RightMarginDidChange", data=dict(oldValue=oldValue, newValue=value))
rightMargin = property(_get_rightMargin, _set_rightMargin, doc="The right margin of the glyph. Setting this posts *Glyph.WidthChanged*, *Glyph.RightMarginWillChange*, *Glyph.RightMarginDidChange* and *Glyph.Changed* notifications among others.")
def _get_bottomMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
if self.verticalOrigin is None:
return yMin
else:
return yMin - (self.verticalOrigin - self.height)
def _set_bottomMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
if self.verticalOrigin is None:
oldValue = yMin
self.verticalOrigin = self.height
else:
oldValue = yMin - (self.verticalOrigin - self.height)
diff = value - oldValue
if value != oldValue:
self.postNotification(notification="Glyph.BottomMarginWillChange", data=dict(oldValue=oldValue, newValue=value))
self.height += diff
self.dirty = True
self.postNotification(notification="Glyph.BottomMarginDidChange", data=dict(oldValue=oldValue, newValue=value))
bottomMargin = property(_get_bottomMargin, _set_bottomMargin, doc="The bottom margin of the glyph. Setting this posts *Glyph.HeightChanged*, *Glyph.BottomMarginWillChange*, *Glyph.BottomMarginDidChange* and *Glyph.Changed* notifications among others.")
def _get_topMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
if self.verticalOrigin is None:
return self._height - yMax
else:
return self.verticalOrigin - yMax
def _set_topMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
if self.verticalOrigin is None:
oldValue = self._height - yMax
else:
oldValue = self.verticalOrigin - yMax
diff = value - oldValue
if oldValue != value:
self.postNotification(notification="Glyph.TopMarginWillChange", data=dict(oldValue=oldValue, newValue=value))
self.verticalOrigin = yMax + value
self.height += diff
self.dirty = True
self.postNotification(notification="Glyph.TopMarginWillChange", data=dict(oldValue=oldValue, newValue=value))
topMargin = property(_get_topMargin, _set_topMargin, doc="The top margin of the glyph. Setting this posts *Glyph.HeightChanged*, *Glyph.VerticalOriginChanged*, *Glyph.TopMarginWillChange*, *Glyph.TopMarginDidChange* and *Glyph.Changed* notifications among others.")
# width
def _get_width(self):
return self._width
def _set_width(self, value):
oldValue = self._width
if oldValue != value:
self._width = value
self.postNotification(notification="Glyph.WidthChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
width = property(_get_width, _set_width, doc="The width of the glyph. Setting this posts *Glyph.WidthChanged* and *Glyph.Changed* notifications.")
# height
def _get_height(self):
return self._height
def _set_height(self, value):
oldValue = self._height
if oldValue != value:
self._height = value
self.postNotification(notification="Glyph.HeightChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
height = property(_get_height, _set_height, doc="The height of the glyph. Setting this posts *Glyph.HeightChanged* and *Glyph.Changed* notifications.")
# ----------------------
# Lib Wrapped Attributes
# ----------------------
# mark color
def _get_markColor(self):
value = self.lib.get("public.markColor")
if value is not None:
value = Color(value)
return value
def _set_markColor(self, value):
# convert to a color object
if value is not None:
value = Color(value)
# don't write if there is no change
oldValue = self.lib.get("public.markColor")
if oldValue is not None:
oldValue = Color(oldValue)
if value == oldValue:
return
# remove
if value is None:
if "public.markColor" in self.lib:
del self.lib["public.markColor"]
# store
else:
self.lib["public.markColor"] = value
self.postNotification(notification="Glyph.MarkColorChanged", data=dict(oldValue=oldValue, newValue=value))
markColor = property(_get_markColor, _set_markColor, doc="The glyph's mark color. When setting, the value can be a UFO color string, a sequence of (r, g, b, a) or a :class:`Color` object. Setting this posts *Glyph.MarkColorChanged* and *Glyph.Changed* notifications.")
# vertical origin
def _get_verticalOrigin(self):
value = self.lib.get("public.verticalOrigin")
return value
def _set_verticalOrigin(self, value):
# don't write if there is no change
oldValue = self.lib.get("public.verticalOrigin")
if value == oldValue:
return
# remove
if value is None:
if "public.verticalOrigin" in self.lib:
del self.lib["public.verticalOrigin"]
# store
else:
self.lib["public.verticalOrigin"] = value
self.postNotification(notification="Glyph.VerticalOriginChanged", data=dict(oldValue=oldValue, newValue=value))
verticalOrigin = property(_get_verticalOrigin, _set_verticalOrigin, doc="The glyph's vertical origin. Setting this posts *Glyph.VerticalOriginChanged* and *Glyph.Changed* notifications.")
# -------
# Pen API
# -------
def draw(self, pen):
"""
Draw the glyph with **pen**.
"""
from fontTools.pens.pointPen import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the glyph with **pointPen**.
"""
if self._shallowLoadedContours:
self._drawShallowLoadedContours(pointPen, self._shallowLoadedContours)
else:
for contour in self._contours:
contour.drawPoints(pointPen)
for component in self._components:
component.drawPoints(pointPen)
def _drawShallowLoadedContours(self, pointPen, contours):
for contour in contours:
try:
pointPen.beginPath(identifier=contour.get("identifier"))
except TypeError:
pointPen.beginPath()
warn("The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.", DeprecationWarning)
for args, kwargs in contour["points"]:
pointPen.addPoint(*args, **kwargs)
pointPen.endPath()
def getPen(self):
"""
Get the pen used to draw into this glyph.
"""
from fontTools.pens.pointPen import SegmentToPointPen
return SegmentToPointPen(self.getPointPen())
def getPointPen(self):
"""
Get the point pen used to draw into this glyph.
"""
from defcon.pens.glyphObjectPointPen import GlyphObjectPointPen, GlyphObjectLoadingPointPen
if self._isLoading:
self._shallowLoadedContours = []
return GlyphObjectLoadingPointPen(self)
else:
return GlyphObjectPointPen(self)
# --------
# Contours
# --------
def _get_contourClass(self):
return self._contourClass
contourClass = property(_get_contourClass, doc="The class used for contours.")
def _get_pointClass(self):
return self._pointClass
pointClass = property(_get_pointClass, doc="The class used for points.")
def _fullyLoadShallowLoadedContours(self):
if not self._shallowLoadedContours:
self._shallowLoadedContours = None
return
self.disableNotifications()
contours = list(self._shallowLoadedContours)
self._shallowLoadedContours = None
dirty = self.dirty
pointPen = self.getPointPen()
self._drawShallowLoadedContours(pointPen, contours)
self.dirty = dirty
self.enableNotifications()
def instantiateContour(self, contourDict=None):
contour = self._contourClass(
glyph=self,
pointClass=self.pointClass
)
if contourDict is not None:
contour.setDataFromSerialization(contourDict)
return contour
def beginSelfContourNotificationObservation(self, contour):
if contour.dispatcher is None:
return
contour.addObserver(observer=self, methodName="_contourChanged", notification="Contour.Changed")
def endSelfContourNotificationObservation(self, contour):
if contour.dispatcher is None:
return
contour.removeObserver(observer=self, notification="Contour.Changed")
contour.endSelfNotificationObservation()
def appendContour(self, contour):
"""
Append **contour** to the glyph. The contour must be a defcon
:class:`Contour` object or a subclass of that object. An error
will be raised if the contour's identifier or a point identifier
conflicts with any of the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertContour(len(self), contour)
def insertContour(self, index, contour):
"""
Insert **contour** into the glyph at index. The contour
must be a defcon :class:`Contour` object or a subclass
of that object. An error will be raised if the contour's
identifier or a point identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
assert contour not in self
assert contour.glyph in (self, None), "This contour belongs to another glyph."
self.postNotification(notification="Glyph.ContourWillBeAdded", data=dict(object=contour))
if contour.glyph is None:
identifiers = self._identifiers
if contour.identifier is not None:
assert contour.identifier not in identifiers
identifiers.add(contour.identifier)
for point in contour:
if point.identifier is not None:
assert point.identifier not in identifiers
identifiers.add(point.identifier)
contour.glyph = self
contour.beginSelfNotificationObservation()
self.beginSelfContourNotificationObservation(contour)
self._contours.insert(index, contour)
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def removeContour(self, contour):
"""
Remove **contour** from the glyph.
This will post a *Glyph.Changed* notification.
"""
if contour not in self:
raise IndexError("contour not in glyph")
self.postNotification(notification="Glyph.ContourWillBeDeleted", data=dict(object=contour))
identifiers = self._identifiers
if contour.identifier is not None:
identifiers.remove(contour.identifier)
for point in contour:
if point.identifier is not None:
identifiers.remove(point.identifier)
self._contours.remove(contour)
self.endSelfContourNotificationObservation(contour)
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def contourIndex(self, contour):
"""
Get the index for **contour**.
"""
return self._getContourIndex(contour)
def clearContours(self):
"""
Clear all contours from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications(note="Requested by Glyph.clearContours.")
for contour in reversed(self):
self.removeContour(contour)
self.releaseHeldNotifications()
def correctContourDirection(self, trueType=False, segmentLength=10):
"""
Correct the direction of all contours in the glyph.
This posts a *Glyph.Changed* notification.
"""
# set the contours to the same direction
for contour in self:
contour.clockwise = False
# sort the contours by area in reverse (i.e. largest first)
contours = sorted(self, key=lambda contour: -contour.area)
# build a tree of nested contours
tree = {}
for largeIndex, largeContour in enumerate(contours):
for smallContour in contours[largeIndex + 1:]:
if largeContour.contourInside(smallContour, segmentLength=segmentLength):
if largeContour not in tree:
tree[largeContour] = []
tree[largeContour].append(smallContour)
# run through the tree, largest to smallest, flipping
# the direction of each contour nested within another contour
for largeContour in contours:
if largeContour in tree:
for smallContour in tree[largeContour]:
smallContour.reverse()
# set to the opposite if needed
if trueType:
for contour in self:
contour.reverse()
# ----------
# Components
# ----------
def _get_componentClass(self):
return self._componentClass
componentClass = property(_get_componentClass, doc="The class used for components.")
def _get_components(self):
return list(self._components)
components = property(_get_components, doc="An ordered list of :class:`Component` objects stored in the glyph.")
def instantiateComponent(self, componentDict=None):
component = self._componentClass(
glyph=self
)
if componentDict is not None:
component.setDataFromSerialization(componentDict)
return component
def beginSelfComponentNotificationObservation(self, component):
if component.dispatcher is None:
return
component.addObserver(observer=self, methodName="_componentChanged", notification="Component.Changed")
component.addObserver(observer=self, methodName="_componentBaseGlyphDataChanged", notification="Component.BaseGlyphDataChanged")
def endSelfComponentNotificationObservation(self, component):
if component.dispatcher is None:
return
component.removeObserver(observer=self, notification="Component.Changed")
component.removeObserver(observer=self, notification="Component.BaseGlyphDataChanged")
component.endSelfNotificationObservation()
def appendComponent(self, component):
"""
Append **component** to the glyph. The component must be a defcon
:class:`Component` object or a subclass of that object. An error
will be raised if the component's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertComponent(len(self._components), component)
def insertComponent(self, index, component):
"""
Insert **component** into the glyph at index. The component
must be a defcon :class:`Component` object or a subclass
of that object. An error will be raised if the component's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
assert component not in self._components
assert component.glyph in (self, None), "This component belongs to another glyph."
self.postNotification(notification="Glyph.ComponentWillBeAdded", data=dict(object=component))
if component.glyph is None:
if component.identifier is not None:
identifiers = self._identifiers
assert component.identifier not in identifiers
identifiers.add(component.identifier)
component.glyph = self
component.beginSelfNotificationObservation()
self.beginSelfComponentNotificationObservation(component)
self._components.insert(index, component)
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def removeComponent(self, component):
"""
Remove **component** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.ComponentWillBeDeleted", data=dict(object=component))
if component.identifier is not None:
self._identifiers.remove(component.identifier)
self._components.remove(component)
self.endSelfComponentNotificationObservation(component)
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def componentIndex(self, component):
"""
Get the index for **component**.
"""
return self._components.index(component)
def clearComponents(self):
"""
Clear all components from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications(note="Requested by Glyph.clearComponents.")
for component in reversed(self._components):
self.removeComponent(component)
self.releaseHeldNotifications()
def decomposeComponent(self, component):
"""
Decompose **component**. This will preserve the identifiers
in the incoming contours and points unless there is a conflict.
In that case, the conflicting incoming identifier will be discarded.
This posts *Glyph.ComponentsChanged*, *Glyph.ContoursChanged*
and *Glyph.Changed* notifications.
"""
self.holdNotifications(note="Requested by Glyph.decomposeComponent.")
layer = self.layer
pointPen = DecomposeComponentPointPen(self, layer)
self._decomposeComponent(component, layer, pointPen)
self.releaseHeldNotifications()
self.postNotification(notification="Glyph.ContoursChanged")
def decomposeAllComponents(self):
"""
Decompose all components in this glyph. This will preserve the
identifiers in the incoming contours and points unless there is a
conflict. In that case, the conflicting incoming identifier will
be discarded.
This posts *Glyph.ComponentsChanged*, *Glyph.ContoursChanged*
and *Glyph.Changed* notifications.
"""
if not self.components:
return
self.holdNotifications(note="Requested by Glyph.decomposeAllComponents.")
layer = self.layer
pointPen = DecomposeComponentPointPen(self, layer)
for component in self.components:
self._decomposeComponent(component, layer, pointPen)
self.releaseHeldNotifications()
self.postNotification(notification="Glyph.ContoursChanged")
def _decomposeComponent(self, component, layer, pointPen):
pointPen.skipConflictingIdentifiers = True
component.drawPoints(pointPen)
self.removeComponent(component)
# -------
# Anchors
# -------
def _get_anchorClass(self):
return self._anchorClass
anchorClass = property(_get_anchorClass, doc="The class used for anchors.")
def _get_anchors(self):
return list(self._anchors)
def _set_anchors(self, value):
self.clearAnchors()
self.holdNotifications(note="Requested by Glyph._set_anchors.")
for anchor in value:
self.appendAnchor(anchor)
self.releaseHeldNotifications()
anchors = property(_get_anchors, _set_anchors, doc="An ordered list of :class:`Anchor` objects stored in the glyph.")
def instantiateAnchor(self, anchorDict=None):
anchor = self._anchorClass(
glyph=self,
anchorDict=anchorDict)
return anchor
def beginSelfAnchorNotificationObservation(self, anchor):
if anchor.dispatcher is None:
return
anchor.addObserver(observer=self, methodName="_anchorChanged", notification="Anchor.Changed")
def endSelfAnchorNotificationObservation(self, anchor):
if anchor.dispatcher is None:
return
anchor.removeObserver(observer=self, notification="Anchor.Changed")
anchor.endSelfNotificationObservation()
def appendAnchor(self, anchor):
"""
Append **anchor** to the glyph. The anchor must be a defcon
:class:`Anchor` object or a subclass of that object. An error
will be raised if the anchor's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertAnchor(len(self._anchors), anchor)
def insertAnchor(self, index, anchor):
"""
Insert **anchor** into the glyph at index. The anchor
must be a defcon :class:`Anchor` object or a subclass
of that object. An error will be raised if the anchor's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
if not isinstance(anchor, self._anchorClass):
anchor = self.instantiateAnchor(anchorDict=anchor)
assert anchor not in self._anchors
assert anchor.glyph in (self, None), "This anchor belongs to another glyph."
self.postNotification(notification="Glyph.AnchorWillBeAdded", data=dict(object=anchor))
if anchor.glyph is None:
if anchor.identifier is not None:
identifiers = self._identifiers
assert anchor.identifier not in identifiers
identifiers.add(anchor.identifier)
anchor.glyph = self
anchor.beginSelfNotificationObservation()
self.beginSelfAnchorNotificationObservation(anchor)
self._anchors.insert(index, anchor)
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def removeAnchor(self, anchor):
"""
Remove **anchor** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.AnchorWillBeDeleted", data=dict(object=anchor))
if anchor.identifier is not None:
self._identifiers.remove(anchor.identifier)
self._anchors.remove(anchor)
self.endSelfAnchorNotificationObservation(anchor)
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def anchorIndex(self, anchor):
"""
Get the index for **anchor**.
"""
return self._anchors.index(anchor)
def clearAnchors(self):
"""
Clear all anchors from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications(note="Requested by Glyph.clearAnchors.")
for anchor in reversed(self._anchors):
self.removeAnchor(anchor)
self.releaseHeldNotifications()
# ----------
# Guidelines
# ----------
def _get_guidelineClass(self):
return self._guidelineClass
guidelineClass = property(_get_guidelineClass, doc="The class used for guidelines.")
def _get_guidelines(self):
return list(self._guidelines)
def _set_guidelines(self, value):
self.clearGuidelines()
self.holdNotifications(note="Requested by Glyph._set_guidelines.")
for guideline in value:
self.appendGuideline(guideline)
self.releaseHeldNotifications()
guidelines = property(_get_guidelines, _set_guidelines, doc="An ordered list of :class:`Guideline` objects stored in the glyph. Setting this will post a *Glyph.Changed* notification along with any notifications posted by the :py:meth:`Glyph.appendGuideline` and :py:meth:`Glyph.clearGuidelines` methods.")
def instantiateGuideline(self, guidelineDict=None):
guideline = self._guidelineClass(
glyph=self,
guidelineDict=guidelineDict
)
return guideline
def beginSelfGuidelineNotificationObservation(self, guideline):
if guideline.dispatcher is None:
return
guideline.addObserver(observer=self, methodName="_guidelineChanged", notification="Guideline.Changed")
def endSelfGuidelineNotificationObservation(self, guideline):
if guideline.dispatcher is None:
return
guideline.removeObserver(observer=self, notification="Guideline.Changed")
guideline.endSelfNotificationObservation()
def appendGuideline(self, guideline):
"""
Append **guideline** to the glyph. The guideline must be a defcon
:class:`Guideline` object or a subclass of that object. An error
will be raised if the guideline's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertGuideline(len(self._guidelines), guideline)
def insertGuideline(self, index, guideline):
"""
Insert **guideline** into the glyph at index. The guideline
must be a defcon :class:`Guideline` object or a subclass
of that object. An error will be raised if the guideline's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
assert guideline not in self.guidelines
if not isinstance(guideline, self._guidelineClass):
guideline = self.instantiateGuideline(guidelineDict=guideline)
assert guideline.glyph in (self, None), "This guideline belongs to another glyph."
if guideline.glyph is None:
assert guideline.font is None, "This guideline belongs to a font."
self.postNotification(notification="Glyph.GuidelineWillBeAdded", data=dict(object=guideline))
if guideline.glyph is None:
if guideline.identifier is not None:
identifiers = self._identifiers
assert guideline.identifier not in identifiers
if guideline.identifier is not None:
identifiers.add(guideline.identifier)
guideline.glyph = self
guideline.beginSelfNotificationObservation()
self.beginSelfGuidelineNotificationObservation(guideline)
self._guidelines.insert(index, guideline)
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def removeGuideline(self, guideline):
"""
Remove **guideline** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.GuidelineWillBeDeleted", data=dict(object=guideline))
if guideline.identifier is not None:
self._identifiers.remove(guideline.identifier)
self._guidelines.remove(guideline)
self.endSelfGuidelineNotificationObservation(guideline)
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def guidelineIndex(self, guideline):
"""
Get the index for **guideline**.
"""
return self._guidelines.index(guideline)
def clearGuidelines(self):
"""
Clear all guidelines from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications(note="Requested by Glyph.clearGuidelines.")
for guideline in reversed(self._guidelines):
self.removeGuideline(guideline)
self.releaseHeldNotifications()
# ----
# Note
# ----
def _get_note(self):
return self._note
def _set_note(self, value):
if value is not None:
assert isinstance(value, str)
oldValue = self._note
if oldValue != value:
self._note = value
self.postNotification(notification="Glyph.NoteChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
note = property(_get_note, _set_note, doc="An arbitrary note for the glyph. Setting this will post a *Glyph.Changed* notification.")
# ---
# Lib
# ---
def _get_libClass(self):
return self._libClass
libClass = property(_get_libClass, doc="The class used for the lib.")
def instantiateLib(self):
lib = self._libClass(
glyph=self
)
return lib
def _get_lib(self):
if self._lib is None:
self._lib = self.instantiateLib()
self.beginSelfLibNotificationObservation()
return self._lib
def _set_lib(self, value):
lib = self.lib
lib.clear()
lib.update(value)
self.dirty = True
lib = property(_get_lib, _set_lib, doc="The glyph's :class:`Lib` object. Setting this will clear any existing lib data and post a *Glyph.Changed* notification if data was replaced.")
def beginSelfLibNotificationObservation(self):
if self._lib.dispatcher is None:
return
self._lib.addObserver(observer=self, methodName="_libContentChanged", notification="Lib.Changed")
def endSelfLibNotificationObservation(self):
if self._lib is None:
return
if self._lib.dispatcher is None:
return
self._lib.removeObserver(observer=self, notification="Lib.Changed")
self._lib.endSelfNotificationObservation()
# -----
# Image
# -----
def _get_imageClass(self):
return self._imageClass
imageClass = property(_get_imageClass, doc="The class used for the image.")
def instantiateImage(self, imageDict=None):
image = self._imageClass(
glyph=self,
imageDict=imageDict
)
return image
def _get_image(self):
if self._image is None:
self._image = self.instantiateImage()
self.beginSelfImageNotificationObservation()
return self._image
def _set_image(self, image):
# removing image
if image is None:
if self._image is not None:
self.postNotification(notification="Glyph.ImageWillBeDeleted")
self.endSelfImageNotificationObservation()
self._image = None
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
# adding image
else:
if self._image is None:
# create the image object
i = self.image
if set(self._image.items()) != set(image.items()):
self._image.fileName = image["fileName"]
self._image.transformation = (image["xScale"], image["xyScale"], image["yxScale"], image["yScale"], image["xOffset"], image["yOffset"])
self._image.color = image.get("color")
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
image = property(_get_image, _set_image, doc="The glyph's :class:`Image` object. Setting this posts *Glyph.ImageChanged* and *Glyph.Changed* notifications.")
def clearImage(self):
self.image = None
def beginSelfImageNotificationObservation(self):
if self._image.dispatcher is None:
return
self._image.addObserver(observer=self, methodName="_imageChanged", notification="Image.Changed")
self._image.addObserver(observer=self, methodName="_imageDataChanged", notification="Image.ImageDataChanged")
def endSelfImageNotificationObservation(self):
if self._image is None:
return
if self._image.dispatcher is None:
return
self._image.removeObserver(observer=self, notification="Image.Changed")
self._image.removeObserver(observer=self, notification="Image.ImageDataChanged")
self._image.endSelfNotificationObservation()
# -------------
# List Behavior
# -------------
def __contains__(self, contour):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return contour in self._contours
def __len__(self):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return len(self._contours)
def __iter__(self):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return iter(self._contours)
def __getitem__(self, index):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return self._contours[index]
def _getContourIndex(self, contour):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return self._contours.index(contour)
# ----------------
# Glyph Absorption
# ----------------
def copyDataFromGlyph(self, glyph):
"""
Copy data from **glyph**. This copies the following data:
==========
width
height
unicodes
note
image
contours
components
anchors
guidelines
lib
==========
The name attribute is purposefully omitted.
"""
from copy import deepcopy
self.width = glyph.width
self.height = glyph.height
self.unicodes = list(glyph.unicodes)
self.note = glyph.note
self.guidelines = [self.instantiateGuideline(g) for g in glyph.guidelines]
self.anchors = [self.instantiateAnchor(a) for a in glyph.anchors]
self.image = glyph.image
pointPen = self.getPointPen()
glyph.drawPoints(pointPen)
self.lib = deepcopy(glyph.lib)
# -----
# Clear
# -----
def clear(self):
"""
Clear all contours, components, anchors and guidelines from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications(note="Requested by Glyph.clear.")
self.clearContours()
self.clearComponents()
self.clearAnchors()
self.clearGuidelines()
self.clearImage()
self.releaseHeldNotifications()
# ----
# Move
# ----
def move(self, values):
"""
Move all contours, components and anchors in the glyph
by **(x, y)**.
This posts a *Glyph.Changed* notification.
"""
(x, y) = values
for contour in self:
contour.move((x, y))
for component in self._components:
component.move((x, y))
for anchor in self._anchors:
anchor.move((x, y))
# ------------
# Point Inside
# ------------
def pointInside(self, coordinates, evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the glyph.
"""
(x, y) = coordinates
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
# ----------------------
# Notification Callbacks
# ----------------------
def endSelfNotificationObservation(self):
if self.dispatcher is None:
return
if self._contours:
for contour in self:
self.endSelfContourNotificationObservation(contour)
for component in self.components:
self.endSelfComponentNotificationObservation(component)
for anchor in self.anchors:
self.endSelfAnchorNotificationObservation(anchor)
for guideline in self.guidelines:
self.endSelfGuidelineNotificationObservation(guideline)
self.endSelfLibNotificationObservation()
self.endSelfImageNotificationObservation()
super(Glyph, self).endSelfNotificationObservation()
self._font = None
self._layerSet = None
self._layer = None
def _imageDataChanged(self, notification):
self.postNotification(notification="Glyph.ImageChanged")
self.postNotification(notification=self.changeNotificationName)
def _imageChanged(self, notification):
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
def _contourChanged(self, notification):
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def _componentChanged(self, notification):
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def _componentBaseGlyphDataChanged(self, notification):
self.postNotification(notification="Glyph.ComponentsChanged")
self.postNotification(notification=self.changeNotificationName)
def _anchorChanged(self, notification):
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def _guidelineChanged(self, notification):
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def _libContentChanged(self, notification):
self.postNotification(notification="Glyph.LibChanged")
self.dirty = True
# -----------------------------
# Serialization/Deserialization
# -----------------------------
def getDataForSerialization(self, **kwargs):
from functools import partial
simple_get = partial(getattr, self)
serialize = lambda item: item.getDataForSerialization()
serialized_get = lambda key: serialize(simple_get(key))
serialized_list_get = lambda key: [serialize(item) for item in simple_get(key)]
getters = [
('name', simple_get),
('unicodes', simple_get),
('width', simple_get),
('height', simple_get),
('note', simple_get),
('components', serialized_list_get),
('anchors', serialized_list_get),
('guidelines', serialized_list_get),
('image', serialized_get),
('lib', serialized_get)
]
if self._shallowLoadedContours is not None:
getters.append(('_shallowLoadedContours', simple_get))
else:
getters.append(('_contours', serialized_list_get))
return self._serialize(getters, **kwargs)
def setDataFromSerialization(self, data):
from functools import partial
set_attr = partial(setattr, self) # key, data
def set_each(setter, drop_key=False):
_setter = lambda k, v: setter(v) if drop_key else setter
def wrapper(key, data):
for d in data:
_setter(key, d)
return wrapper
def single_init(factory, data):
item = factory(data)
return item
def list_init(factory, data):
return [single_init(factory, childData) for childData in data]
def init_set(init, factory, setter):
def wrapper(key, data):
setter(key, init(factory, data))
return wrapper
# Clear all contours, components, anchors and guidelines from the glyph.
self.clear()
setters = (
('name', set_attr),
('unicodes', set_attr),
('width', set_attr),
('height', set_attr),
('note', set_attr),
('lib', set_attr),
('_shallowLoadedContours', set_attr),
('_contours', init_set(list_init, self.instantiateContour, set_each(self.appendContour, True))),
('components', init_set(list_init, self.instantiateComponent, set_each(self.appendComponent, True))),
('guidelines', init_set(list_init, self.instantiateGuideline, set_attr)),
('anchors', init_set(list_init, self.instantiateAnchor, set_attr)),
('image', init_set(single_init, self.instantiateImage, set_attr))
)
for key, setter in setters:
if key not in data:
continue
setter(key, data[key])
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | -3,219,715,125,084,927,000 | 35.575458 | 309 | 0.634563 | false |
moylop260/odoo-dev | addons/mrp_byproduct/mrp_byproduct.py | 30 | 8808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts'),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
qty2 = production.product_uos and production.product_uos_qty or False
product_uos_factor = 0.0
if qty2 and production.bom_id.product_uos.id:
product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id)
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
if production.product_uos_qty:
qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'product_uos_qty': qty2,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'state': 'waiting',
'production_id': production.id
}
self.pool.get('stock.move').create(cr, uid, data)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,225,047,293,519,693,000 | 53.37037 | 218 | 0.618756 | false |
koparasy/gemfi | configs/example/ruby_direct_test.py | 6 | 5375 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Brad Beckmann
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
parser.add_option("-l", "--requests", metavar="N", default=100,
help="Stop after N requests")
parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
help="Wakeup every N cycles")
parser.add_option("--test-type", type="choice", default="SeriesGetx",
choices = ["SeriesGetx", "SeriesGets", "SeriesGetMixed",
"Invalidate"],
help = "Type of test")
parser.add_option("--percent-writes", type="int", default=100,
help="percentage of accesses that should be writes")
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
#
# Select the direct test generator
#
if options.test_type == "SeriesGetx":
generator = SeriesRequestGenerator(num_cpus = options.num_cpus,
percent_writes = 100)
elif options.test_type == "SeriesGets":
generator = SeriesRequestGenerator(num_cpus = options.num_cpus,
percent_writes = 0)
elif options.test_type == "SeriesGetMixed":
generator = SeriesRequestGenerator(num_cpus = options.num_cpus,
percent_writes = options.percent_writes)
elif options.test_type == "Invalidate":
generator = InvalidateGenerator(num_cpus = options.num_cpus)
else:
print "Error: unknown direct test generator"
sys.exit(1)
#
# Create the M5 system. Note that the Memory Object isn't
# actually used by the rubytester, but is included to support the
# M5 memory size == Ruby memory size checks
#
system = System(physmem = SimpleMemory(),
mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
#
# Create the ruby random tester
#
system.tester = RubyDirectedTester(requests_to_complete = \
options.requests,
generator = generator)
Ruby.create_system(options, system)
# Since Ruby runs at an independent frequency, create a seperate clock
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
assert(options.num_cpus == len(system.ruby._cpu_ports))
for ruby_port in system.ruby._cpu_ports:
#
# Tie the ruby tester ports to the ruby cpu ports
#
system.tester.cpuPort = ruby_port.slave
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
| bsd-3-clause | 1,973,769,458,501,934,300 | 36.852113 | 79 | 0.692651 | false |
matthew-tucker/mne-python | mne/minimum_norm/psf_ctf.py | 21 | 18951 | # Authors: Olaf Hauk <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
from scipy import linalg
from ..io.pick import pick_channels
from ..utils import logger, verbose
from ..forward import convert_forward_solution
from ..evoked import EvokedArray
from ..source_estimate import SourceEstimate
from .inverse import _subject_from_inverse
from . import apply_inverse
def _prepare_info(inverse_operator):
"""Helper to get a usable dict"""
# in order to convert sub-leadfield matrix to evoked data type (pretending
# it's an epoch, see in loop below), uses 'info' from inverse solution
# because this has all the correct projector information
info = deepcopy(inverse_operator['info'])
info['sfreq'] = 1000. # necessary
info['projs'] = inverse_operator['projs']
return info
def _pick_leadfield(leadfield, forward, ch_names):
"""Helper to pick out correct lead field components"""
# NB must pick from fwd['sol']['row_names'], not ['info']['ch_names'],
# because ['sol']['data'] may be ordered differently from functional data
picks_fwd = pick_channels(forward['sol']['row_names'], ch_names)
return leadfield[picks_fwd]
@verbose
def point_spread_function(inverse_operator, forward, labels, method='dSPM',
lambda2=1 / 9., pick_ori=None, mode='mean',
n_svd_comp=1, verbose=None):
"""Compute point-spread functions (PSFs) for linear estimators
Compute point-spread functions (PSF) in labels for a combination of inverse
operator and forward solution. PSFs are computed for test sources that are
perpendicular to cortical surface.
Parameters
----------
inverse_operator : instance of InverseOperator
Inverse operator.
forward : dict
Forward solution. Note: (Bad) channels not included in forward
solution will not be used in PSF computation.
labels : list of Label
Labels for which PSFs shall be computed.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse method for which PSFs shall be computed (for apply_inverse).
lambda2 : float
The regularization parameter (for apply_inverse).
pick_ori : None | "normal"
If "normal", rather than pooling the orientations by taking the norm,
only the radial component is kept. This is only implemented
when working with loose orientations (for apply_inverse).
mode : 'mean' | 'sum' | 'svd' |
PSFs can be computed for different summary measures with labels:
'sum' or 'mean': sum or means of sub-leadfields for labels
This corresponds to situations where labels can be assumed to be
homogeneously activated.
'svd': SVD components of sub-leadfields for labels
This is better suited for situations where activation patterns are
assumed to be more variable.
"sub-leadfields" are the parts of the forward solutions that belong to
vertices within invidual labels.
n_svd_comp : integer
Number of SVD components for which PSFs will be computed and output
(irrelevant for 'sum' and 'mean'). Explained variances within
sub-leadfields are shown in screen output.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_psf : SourceEstimate
The PSFs for the specified labels
If mode='svd': n_svd_comp components per label are created
(i.e. n_svd_comp successive time points in mne_analyze)
The last sample is the summed PSF across all labels
Scaling of PSFs is arbitrary, and may differ greatly among methods
(especially for MNE compared to noise-normalized estimates).
evoked_fwd : Evoked
Forward solutions corresponding to PSFs in stc_psf
If mode='svd': n_svd_comp components per label are created
(i.e. n_svd_comp successive time points in mne_analyze)
The last sample is the summed forward solution across all labels
(sum is taken across summary measures).
"""
mode = mode.lower()
if mode not in ['mean', 'sum', 'svd']:
raise ValueError("mode must be 'svd', 'mean' or 'sum'. Got %s."
% mode)
logger.info("About to process %d labels" % len(labels))
forward = convert_forward_solution(forward, force_fixed=False,
surf_ori=True)
info = _prepare_info(inverse_operator)
leadfield = _pick_leadfield(forward['sol']['data'][:, 2::3], forward,
info['ch_names'])
# will contain means of subleadfields for all labels
label_psf_summary = []
# if mode='svd', this will collect all SVD singular values for labels
label_singvals = []
# loop over labels
for ll in labels:
logger.info(ll)
if ll.hemi == 'rh':
# for RH labels, add number of LH vertices
offset = forward['src'][0]['vertno'].shape[0]
# remember whether we are in the LH or RH
this_hemi = 1
elif ll.hemi == 'lh':
offset = 0
this_hemi = 0
# get vertices on cortical surface inside label
idx = np.intersect1d(ll.vertices, forward['src'][this_hemi]['vertno'])
# get vertices in source space inside label
fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
# get sub-leadfield matrix for label vertices
sub_leadfield = leadfield[:, fwd_idx + offset]
# compute summary data for labels
if mode == 'sum': # sum across forward solutions in label
logger.info("Computing sums within labels")
this_label_psf_summary = sub_leadfield.sum(axis=1)[np.newaxis, :]
elif mode == 'mean':
logger.info("Computing means within labels")
this_label_psf_summary = sub_leadfield.mean(axis=1)[np.newaxis, :]
elif mode == 'svd': # takes svd of forward solutions in label
logger.info("Computing SVD within labels, using %d component(s)"
% n_svd_comp)
# compute SVD of sub-leadfield
u_svd, s_svd, _ = linalg.svd(sub_leadfield,
full_matrices=False,
compute_uv=True)
# keep singular values (might be useful to some people)
label_singvals.append(s_svd)
# get first n_svd_comp components, weighted with their
# corresponding singular values
logger.info("First 5 singular values: %s" % s_svd[0:5])
logger.info("(This tells you something about variability of "
"forward solutions in sub-leadfield for label)")
# explained variance by chosen components within sub-leadfield
my_comps = s_svd[:n_svd_comp]
comp_var = (100. * np.sum(my_comps * my_comps) /
np.sum(s_svd * s_svd))
logger.info("Your %d component(s) explain(s) %.1f%% "
"variance in label." % (n_svd_comp, comp_var))
this_label_psf_summary = (u_svd[:, :n_svd_comp] *
s_svd[:n_svd_comp][np.newaxis, :])
# transpose required for conversion to "evoked"
this_label_psf_summary = this_label_psf_summary.T
# initialise or append to existing collection
label_psf_summary.append(this_label_psf_summary)
label_psf_summary = np.concatenate(label_psf_summary, axis=0)
# compute sum across forward solutions for labels, append to end
label_psf_summary = np.r_[label_psf_summary,
label_psf_summary.sum(axis=0)[np.newaxis, :]].T
# convert sub-leadfield matrix to evoked data type (a bit of a hack)
evoked_fwd = EvokedArray(label_psf_summary, info=info, tmin=0.)
# compute PSFs by applying inverse operator to sub-leadfields
logger.info("About to apply inverse operator for method='%s' and "
"lambda2=%s" % (method, lambda2))
stc_psf = apply_inverse(evoked_fwd, inverse_operator, lambda2,
method=method, pick_ori=pick_ori)
return stc_psf, evoked_fwd
def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
method='dSPM', lambda2=1. / 9.,
mode='mean', n_svd_comp=1):
"""Get inverse matrix from an inverse operator
Currently works only for fixed/loose orientation constraints
For loose orientation constraint, the CTFs are computed for the radial
component (pick_ori='normal').
Parameters
----------
inverse_operator : instance of InverseOperator
The inverse operator.
forward : dict
The forward operator.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse methods (for apply_inverse).
labels : list of Label | None
Labels for which CTFs shall be computed. If None, inverse matrix for
all vertices will be returned.
lambda2 : float
The regularization parameter (for apply_inverse).
pick_ori : None | "normal"
pick_ori : None | "normal"
If "normal", rather than pooling the orientations by taking the norm,
only the radial component is kept. This is only implemented
when working with loose orientations (for apply_inverse).
Determines whether whole inverse matrix G will have one or three rows
per vertex. This will also affect summary measures for labels.
mode : 'mean' | 'sum' | 'svd'
CTFs can be computed for different summary measures with labels:
'sum' or 'mean': sum or means of sub-inverse for labels
This corresponds to situations where labels can be assumed to be
homogeneously activated.
'svd': SVD components of sub-inverse for labels
This is better suited for situations where activation patterns are
assumed to be more variable.
"sub-inverse" is the part of the inverse matrix that belongs to
vertices within invidual labels.
n_svd_comp : int
Number of SVD components for which CTFs will be computed and output
(irrelevant for 'sum' and 'mean'). Explained variances within
sub-inverses are shown in screen output.
Returns
-------
invmat : ndarray
Inverse matrix associated with inverse operator and specified
parameters.
label_singvals : list of ndarray
Singular values of svd for sub-inverses.
Provides information about how well labels are represented by chosen
components. Explained variances within sub-inverses are shown in
screen output.
"""
mode = mode.lower()
if not forward['surf_ori']:
raise RuntimeError('Forward has to be surface oriented and '
'force_fixed=True.')
if not (forward['source_ori'] == 1):
raise RuntimeError('Forward has to be surface oriented and '
'force_fixed=True.')
if labels:
logger.info("About to process %d labels" % len(labels))
else:
logger.info("Computing whole inverse operator.")
info = _prepare_info(inverse_operator)
# create identity matrix as input for inverse operator
id_mat = np.eye(len(info['ch_names']))
# convert identity matrix to evoked data type (pretending it's an epoch)
ev_id = EvokedArray(id_mat, info=info, tmin=0.)
snr = 3.0
lambda2 = 1.0 / snr ** 2
# apply inverse operator to identity matrix in order to get inverse matrix
# free orientation constraint not possible because apply_inverse would
# combined components
invmat_mat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
method=method, pick_ori='normal')
logger.info("Dimension of inverse matrix: %s" % str(invmat_mat_op.shape))
# turn source estimate into numpty array
invmat_mat = invmat_mat_op.data
invmat_summary = []
# if mode='svd', label_singvals will collect all SVD singular values for
# labels
label_singvals = []
if labels:
for ll in labels:
if ll.hemi == 'rh':
# for RH labels, add number of LH vertices
offset = forward['src'][0]['vertno'].shape[0]
# remember whether we are in the LH or RH
this_hemi = 1
elif ll.hemi == 'lh':
offset = 0
this_hemi = 0
else:
raise RuntimeError("Cannot determine hemisphere of label.")
# get vertices on cortical surface inside label
idx = np.intersect1d(ll.vertices,
forward['src'][this_hemi]['vertno'])
# get vertices in source space inside label
fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
# get sub-inverse for label vertices, one row per vertex
invmat_lbl = invmat_mat[fwd_idx + offset, :]
# compute summary data for labels
if mode == 'sum': # takes sum across estimators in label
logger.info("Computing sums within labels")
this_invmat_summary = invmat_lbl.sum(axis=0)
this_invmat_summary = np.vstack(this_invmat_summary).T
elif mode == 'mean':
logger.info("Computing means within labels")
this_invmat_summary = invmat_lbl.mean(axis=0)
this_invmat_summary = np.vstack(this_invmat_summary).T
elif mode == 'svd': # takes svd of sub-inverse in label
logger.info("Computing SVD within labels, using %d "
"component(s)" % n_svd_comp)
# compute SVD of sub-inverse
u_svd, s_svd, _ = linalg.svd(invmat_lbl.T,
full_matrices=False,
compute_uv=True)
# keep singular values (might be useful to some people)
label_singvals.append(s_svd)
# get first n_svd_comp components, weighted with their
# corresponding singular values
logger.info("First 5 singular values: %s" % s_svd[:5])
logger.info("(This tells you something about variability of "
"estimators in sub-inverse for label)")
# explained variance by chosen components within sub-inverse
my_comps = s_svd[:n_svd_comp]
comp_var = ((100 * np.sum(my_comps * my_comps)) /
np.sum(s_svd * s_svd))
logger.info("Your %d component(s) explain(s) %.1f%% "
"variance in label." % (n_svd_comp, comp_var))
this_invmat_summary = (u_svd[:, :n_svd_comp].T *
s_svd[:n_svd_comp][:, np.newaxis])
invmat_summary.append(this_invmat_summary)
invmat = np.concatenate(invmat_summary, axis=0)
else: # no labels provided: return whole matrix
invmat = invmat_mat
return invmat, label_singvals
@verbose
def cross_talk_function(inverse_operator, forward, labels,
method='dSPM', lambda2=1 / 9., signed=False,
mode='mean', n_svd_comp=1, verbose=None):
"""Compute cross-talk functions (CTFs) for linear estimators
Compute cross-talk functions (CTF) in labels for a combination of inverse
operator and forward solution. CTFs are computed for test sources that are
perpendicular to cortical surface.
Parameters
----------
inverse_operator : instance of InverseOperator
Inverse operator.
forward : dict
Forward solution. Note: (Bad) channels not included in forward
solution will not be used in CTF computation.
labels : list of Label
Labels for which CTFs shall be computed.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse method for which CTFs shall be computed.
lambda2 : float
The regularization parameter.
signed : bool
If True, CTFs will be written as signed source estimates. If False,
absolute (unsigned) values will be written
mode : 'mean' | 'sum' | 'svd'
CTFs can be computed for different summary measures with labels:
'sum' or 'mean': sum or means of sub-inverses for labels
This corresponds to situations where labels can be assumed to be
homogeneously activated.
'svd': SVD components of sub-inverses for labels
This is better suited for situations where activation patterns are
assumed to be more variable. "sub-inverse" is the part of the inverse
matrix that belongs to vertices within invidual labels.
n_svd_comp : int
Number of SVD components for which CTFs will be computed and output
(irrelevant for 'sum' and 'mean'). Explained variances within
sub-inverses are shown in screen output.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_ctf : SourceEstimate
The CTFs for the specified labels.
If mode='svd': n_svd_comp components per label are created
(i.e. n_svd_comp successive time points in mne_analyze)
The last sample is the summed CTF across all labels.
"""
forward = convert_forward_solution(forward, force_fixed=True,
surf_ori=True)
# get the inverse matrix corresponding to inverse operator
out = _get_matrix_from_inverse_operator(inverse_operator, forward,
labels=labels, method=method,
lambda2=lambda2, mode=mode,
n_svd_comp=n_svd_comp)
invmat, label_singvals = out
# get the leadfield matrix from forward solution
leadfield = _pick_leadfield(forward['sol']['data'], forward,
inverse_operator['info']['ch_names'])
# compute cross-talk functions (CTFs)
ctfs = np.dot(invmat, leadfield)
# compute sum across forward solutions for labels, append to end
ctfs = np.vstack((ctfs, ctfs.sum(axis=0)))
# if unsigned output requested, take absolute values
if not signed:
ctfs = np.abs(ctfs, out=ctfs)
# create source estimate object
vertno = [ss['vertno'] for ss in inverse_operator['src']]
stc_ctf = SourceEstimate(ctfs.T, vertno, tmin=0., tstep=1.)
stc_ctf.subject = _subject_from_inverse(inverse_operator)
return stc_ctf
| bsd-3-clause | 2,768,933,391,410,751,000 | 42.465596 | 79 | 0.61316 | false |
hkawasaki/kawasaki-aio8-1 | lms/djangoapps/courseware/tests/test_access.py | 23 | 6706 | import courseware.access as access
import datetime
from mock import Mock
from django.test import TestCase
from django.test.utils import override_settings
from courseware.tests.factories import UserFactory, CourseEnrollmentAllowedFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory
from xmodule.modulestore import Location
from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE
import pytz
# pylint: disable=protected-access
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class AccessTestCase(TestCase):
"""
Tests for the various access controls on the student dashboard
"""
def setUp(self):
self.course = Location('i4x://edX/toy/course/2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course=self.course)
self.course_instructor = InstructorFactory(course=self.course)
def test__has_access_to_location(self):
self.assertFalse(access._has_access_to_location(None, self.course, 'staff', None))
self.assertFalse(access._has_access_to_location(self.anonymous_user, self.course, 'staff', None))
self.assertFalse(access._has_access_to_location(self.anonymous_user, self.course, 'instructor', None))
self.assertTrue(access._has_access_to_location(self.global_staff, self.course, 'staff', None))
self.assertTrue(access._has_access_to_location(self.global_staff, self.course, 'instructor', None))
# A user has staff access if they are in the staff group
self.assertTrue(access._has_access_to_location(self.course_staff, self.course, 'staff', None))
self.assertFalse(access._has_access_to_location(self.course_staff, self.course, 'instructor', None))
# A user has staff and instructor access if they are in the instructor group
self.assertTrue(access._has_access_to_location(self.course_instructor, self.course, 'staff', None))
self.assertTrue(access._has_access_to_location(self.course_instructor, self.course, 'instructor', None))
# A user does not have staff or instructor access if they are
# not in either the staff or the the instructor group
self.assertFalse(access._has_access_to_location(self.student, self.course, 'staff', None))
self.assertFalse(access._has_access_to_location(self.student, self.course, 'instructor', None))
def test__has_access_string(self):
u = Mock(is_staff=True)
self.assertFalse(access._has_access_string(u, 'not_global', 'staff', None))
u._has_global_staff_access.return_value = True
self.assertTrue(access._has_access_string(u, 'global', 'staff', None))
self.assertRaises(ValueError, access._has_access_string, u, 'global', 'not_staff', None)
def test__has_access_descriptor(self):
# TODO: override DISABLE_START_DATES and test the start date branch of the method
u = Mock()
d = Mock()
d.start = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1) # make sure the start time is in the past
# Always returns true because DISABLE_START_DATES is set in test.py
self.assertTrue(access._has_access_descriptor(u, d, 'load'))
self.assertRaises(ValueError, access._has_access_descriptor, u, d, 'not_load_or_staff')
def test__has_access_course_desc_can_enroll(self):
u = Mock()
yesterday = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1)
tomorrow = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1)
c = Mock(enrollment_start=yesterday, enrollment_end=tomorrow, enrollment_domain='')
# User can enroll if it is between the start and end dates
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# User can enroll if authenticated and specifically allowed for that course
# even outside the open enrollment period
u = Mock(email='[email protected]', is_staff=False)
u.is_authenticated.return_value = True
c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/2012_Fall', enrollment_domain='')
allowed = CourseEnrollmentAllowedFactory(email=u.email, course_id=c.id)
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# Staff can always enroll even outside the open enrollment period
u = Mock(email='[email protected]', is_staff=True)
u.is_authenticated.return_value = True
c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/Whenever', enrollment_domain='')
self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
# TODO:
# Non-staff cannot enroll outside the open enrollment period if not specifically allowed
def test__user_passed_as_none(self):
"""Ensure has_access handles a user being passed as null"""
access.has_access(None, 'global', 'staff', None)
class UserRoleTestCase(TestCase):
"""
Tests for user roles.
"""
def setUp(self):
self.course = Location('i4x://edX/toy/course/2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course=self.course)
self.course_instructor = InstructorFactory(course=self.course)
def test_user_role_staff(self):
"""Ensure that user role is student for staff masqueraded as student."""
self.assertEqual(
'staff',
access.get_user_role(self.course_staff, self.course.course_id)
)
# Masquerade staff
self.course_staff.masquerade_as_student = True
self.assertEqual(
'student',
access.get_user_role(self.course_staff, self.course.course_id)
)
def test_user_role_instructor(self):
"""Ensure that user role is student for instructor masqueraded as student."""
self.assertEqual(
'instructor',
access.get_user_role(self.course_instructor, self.course.course_id)
)
# Masquerade instructor
self.course_instructor.masquerade_as_student = True
self.assertEqual(
'student',
access.get_user_role(self.course_instructor, self.course.course_id)
)
def test_user_role_anonymous(self):
"""Ensure that user role is student for anonymous user."""
self.assertEqual(
'student',
access.get_user_role(self.anonymous_user, self.course.course_id)
)
| agpl-3.0 | 2,838,229,190,782,936,000 | 44.006711 | 121 | 0.679839 | false |
adamdoupe/enemy-of-the-state | link.py | 1 | 4653 | import re
import output
from lazyproperty import lazyproperty
from collections import namedtuple
class Link(object):
LinkIdx = namedtuple("LinkIdx", "type path params")
xpathsimplifier = re.compile(r"\[[^\]]*\]")
def __init__(self, internal, reqresp):
assert internal
assert reqresp
self.internal = internal
self.reqresp = reqresp
self.to = []
self.skip = False
@lazyproperty
def dompath(self):
return Link.xpathsimplifier.sub("", self.internal.getCanonicalXPath())
@lazyproperty
def _str(self):
raise NotImplementedError
def __str__(self):
return self._str
def __repr__(self):
return str(self)
import logging
import pdb
from constants import Constants
from recursive_dict import RecursiveDict
from randgen import RandGen
class Links(object):
Type = Constants("ANCHOR", "FORM", "REDIRECT")
rng = RandGen()
def __init__(self, anchors=[], forms=[], redirects=[]):
self.logger = logging.getLogger(self.__class__.__name__)
# leaves in linkstree are counter of how many times that url occurred
# therefore use that counter when compuing number of urls with "nleaves"
linkstree = RecursiveDict(lambda x: len(x))
for ltype, links in [(Links.Type.ANCHOR, anchors),
(Links.Type.FORM, forms),
(Links.Type.REDIRECT, redirects)]:
for l in links:
urlv = [ltype]
urlv += [l.dompath] if l.dompath else []
urlv += list(l.linkvector)
linkstree.applypath(urlv, lambda x: self.addlink(x, l))
if not linkstree:
# all pages with no links will end up in the same special bin
linkstree.setapplypathvalue(("<EMPTY>", ), [None], lambda x: x+[None])
self.linkstree = linkstree
def addlink(self, v, l):
keys = [i for i in v.keys() if isinstance(i, int)] if v else []
if v and keys:
nextk = max(keys) + 1
else:
nextk = 0
# call setpath to fix the leaves count
v.setpath([nextk], [l])
return v
def nAnchors(self):
if Links.Type.ANCHOR in self.linkstree:
return self.linkstree[Links.Type.ANCHOR].nleaves
else:
return 0
def nForms(self):
if Links.Type.FORM in self.linkstree:
return self.linkstree[Links.Type.FORM].nleaves
else:
return 0
def nRedirects(self):
if Links.Type.REDIRECT in self.linkstree:
return self.linkstree[Links.Type.REDIRECT].nleaves
else:
return 0
def __len__(self):
return self.nAnchors() + self.nForms() + self.nRedirects()
def __nonzero__(self):
return self.nAnchors() != 0 or self.nForms() != 0 or self.nRedirects() != 0
@lazyproperty
def _str(self):
return "Links(%s, %s, %s)" % (self.nAnchors(), self.nForms(), self.nRedirects())
def __str__(self):
return self._str
def __getitem__(self, linkidx):
idx = [linkidx.type] + list(linkidx.path)
val = self.linkstree.getpath(idx)
assert val.nleaves == len(list(val.iterleaves()))
if val.nleaves > 1:
ret = Links.rng.choice([i for i in val.iterleaves()])
ret = val.iterleaves().next()
assert not val.value or val.value == ret
assert isinstance(ret, list)
return ret[0]
def __iter__(self):
for l in self.linkstree.iterleaves():
assert isinstance(l, list), l
for i in l:
yield i
def iteritems(self):
for p, l in self.linkstree.iteridxleaves():
assert isinstance(l, list), l
yield (Link.LinkIdx(p[0], p[1:], None), l[0])
from utils import DebugDict
class AbstractLink(object):
def __init__(self, links):
# map from state to AbstractRequest
self.skip = any(i.skip for i in links)
self.links = links
self.parentpage = links[0].reqresp.response.page.abspage
assert all(i.reqresp.response.page.abspage == self.parentpage
for i in links)
self.targets = DebugDict(self.parentpage.instance)
@lazyproperty
def _str(self):
raise NotImplementedError
def __str__(self):
return self._str
def __repr__(self):
return str(self)
@lazyproperty
def dompath(self):
dompaths = set(l.dompath for l in self.links)
# XXX multiple dompaths not supported yet
assert len(dompaths) == 1
return iter(dompaths).next()
| gpl-2.0 | -4,311,830,849,712,518,700 | 28.636943 | 88 | 0.585429 | false |
zstackorg/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/operations/monitor_operations.py | 2 | 6149 | '''
All host operations for test.
@author: Songtao
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import zstackwoodpecker.operations.account_operations as account_operations
import apibinding.inventory as inventory
def get_monitor_item(resourceType, session_uuid=None):
action = api_actions.GetMonitorItemAction()
action.timeout = 3000
action.resourceType = resourceType
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Get %s Monitor Item ' % action.resourceType)
return evt.inventories
def create_monitor_trigger(resource_uuid, duration, expression, session_uuid=None):
action = api_actions.CreateMonitorTriggerAction()
action.targetResourceUuid = resource_uuid
action.duration = duration
action.name = resource_uuid
action.expression = expression
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def query_monitor_trigger(uuid=None, session_uuid=None):
action = api_actions.QueryMonitorTriggerAction()
action.uuid = uuid
action.conditions = []
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def update_monitor_trigger(uuid, infoType, infoValue, session_uuid=None):
action = api_actions.UpdateMonitorTriggerAction()
action.uuid = uuid
if infoType == 'name':
action.name = infoValue
elif infoType == 'description':
action.description = infoValue
elif infoType == 'expression':
action.expression = infoValue
elif infoType == 'duration':
action.duration = infoValue
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def delete_monitor_trigger(uuid, session_uuid=None):
action = api_actions.DeleteMonitorTriggerAction()
action.uuid = uuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def change_monitor_trigger_state(uuid, state, session_uuid=None):
action = api_actions.ChangeMonitorTriggerStateAction()
action.uuid = uuid
action.stateEvent = state
action.timeout = 6000
test_util.action_logger('Change monitor trigger [uuid:] %s to [state:] %s' % (uuid, state))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def create_email_media(name, smtpport, smtpserver, username, password, session_uuid=None):
action = api_actions.CreateEmailMediaAction()
action.name = name
action.smtpPort = smtpport
action.smtpServer = smtpserver
action.username = username
action.password = password
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def query_email_media(uuid=None, session_uuid=None):
action = api_actions.QueryMediaAction()
action.uuid = uuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def change_email_media_state(uuid, state, session_uuid=None):
action = api_actions.ChangeMediaStateAction()
action.uuid = uuid
action.stateEvent = state
action.timeout = 6000
test_util.action_logger('Change email media [uuid:] %s to [state:] %s' % (uuid, state))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def delete_email_media(uuid, session_uuid=None):
action = api_actions.DeleteMediaAction()
action.uuid = uuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def create_email_monitor_trigger_action(name, mediaUuid, triggerUuids, email,session_uuid=None):
action = api_actions.CreateEmailMonitorTriggerActionAction()
action.name = name
action.mediaUuid = mediaUuid
action.triggerUuids = triggerUuids
action.email = email
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def query_monitor_trigger_action(uuid=None, session_uuid=None):
action = api_actions.QueryMonitorTriggerAction()
action.uuid = uuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def change_monitor_trigger_action_state(uuid, state, session_uuid=None):
action = api_actions.ChangeMonitorTriggerActionStateAction()
action.uuid = uuid
action.stateEvent = state
action.timeout = 6000
test_util.action_logger('Change monitor trigger action [uuid:] %s to [state:] %s' % (uuid, state))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def delete_monitor_trigger_action(uuid, session_uuid=None):
action = api_actions.DeleteMonitorTriggerAction()
action.uuid = uuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def attach_monitor_trigger_action_to_trigger(actionUuid, triggerUuid,session_uuid=None):
action = api_actions.AttachMonitorTriggerActionToTriggerAction()
action.actionUuid = actionUuid
action.triggerUuid = triggerUuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def detach_monitor_trigger_action_to_trigger(actionUuid, triggerUuid,session_uuid=None):
action = api_actions.DetachMonitorTriggerActionFromTriggerAction()
action.actionUuid = actionUuid
action.triggerUuid = triggerUuid
action.timeout = 6000
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
| apache-2.0 | -3,006,976,577,302,114,300 | 36.917722 | 102 | 0.721581 | false |
sinhrks/scikit-learn | sklearn/metrics/ranking.py | 2 | 27187 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause | 6,740,976,122,526,023,000 | 34.96164 | 79 | 0.624122 | false |
isivisi/pybot | pybot/web/uimodules.py | 1 | 1638 | import tornado
import pybot.globals as globals
from pybot.pybotextra import allFilters
import json
class Raffle(tornado.web.UIModule):
def render(self):
return self.render_string("templates/rafflemodule.html", data=globals.data)
class UserPoints(tornado.web.UIModule):
def render(self, top=0):
return self.render_string("templates/userpointsmodule.html", data=globals.data, top=top)
class Logs(tornado.web.UIModule):
def render(self):
return self.render_string("templates/logmodule.html", data=globals.data)
class Links(tornado.web.UIModule):
def render(self, link=False):
return self.render_string("templates/linksmodule.html", settings=globals.settings, data=globals.data, link=link)
class Filters(tornado.web.UIModule):
def render(self):
activeFilters = json.loads(globals.settings.config['filters']['activeFilters'])
return self.render_string("templates/filtersmodule.html", data=globals.data, activeFilters=activeFilters,
allfilters=allFilters())
# Values take in a list of dictionaries with values (value: #, color:"#F7464A", highlight: "#FF5A5E", label: "")
# settings is a dictionary with settings for the chart
class Chart(tornado.web.UIModule):
def render(self, values=[], settings={}):
# test values
values.append({"value": "25", "color": "#F7464A", "highlight": "#FF5A5E", "label": "test1"})
values.append({"value": "75", "color": "#ffffff", "highlight": "#FF5A5E", "label": "test2"})
return self.render_string("templates/chartmodule.html", values=values, settings=settings)
| gpl-3.0 | -8,570,704,250,861,037,000 | 38 | 120 | 0.694139 | false |
adrianschroeter/kiwi | test/unit/tasks_system_build_test.py | 1 | 9818 | import sys
import mock
import os
from mock import patch, call
import kiwi
from .test_helper import argv_kiwi_tests
from kiwi.tasks.system_build import SystemBuildTask
class TestSystemBuildTask(object):
def setup(self):
sys.argv = [
sys.argv[0], '--profile', 'vmxFlavour', 'system', 'build',
'--description', '../data/description',
'--target-dir', 'some-target'
]
self.abs_target_dir = os.path.abspath('some-target')
kiwi.tasks.system_build.Privileges = mock.Mock()
kiwi.tasks.system_build.Path = mock.Mock()
kiwi.tasks.system_build.Help = mock.Mock(
return_value=mock.Mock()
)
self.manager = mock.Mock()
self.system_prepare = mock.Mock()
self.system_prepare.setup_repositories = mock.Mock(
return_value=self.manager
)
self.runtime_checker = mock.Mock()
kiwi.tasks.base.RuntimeChecker = mock.Mock(
return_value=self.runtime_checker
)
self.runtime_config = mock.Mock()
kiwi.tasks.base.RuntimeConfig = mock.Mock(
return_value=self.runtime_config
)
kiwi.tasks.system_build.SystemPrepare = mock.Mock(
return_value=self.system_prepare
)
self.setup = mock.Mock()
kiwi.tasks.system_build.SystemSetup = mock.Mock(
return_value=self.setup
)
self.profile = mock.Mock()
self.profile.dot_profile = dict()
kiwi.tasks.system_build.Profile = mock.Mock(
return_value=self.profile
)
self.result = mock.Mock()
self.builder = mock.MagicMock()
self.builder.create = mock.Mock(
return_value=self.result
)
kiwi.tasks.system_build.ImageBuilder = mock.Mock(
return_value=self.builder
)
self.task = SystemBuildTask()
def teardown(self):
sys.argv = argv_kiwi_tests
def _init_command_args(self):
self.task.command_args = {}
self.task.command_args['help'] = False
self.task.command_args['build'] = False
self.task.command_args['--allow-existing-root'] = True
self.task.command_args['--description'] = '../data/description'
self.task.command_args['--target-dir'] = 'some-target'
self.task.command_args['--set-repo'] = None
self.task.command_args['--add-repo'] = []
self.task.command_args['--add-package'] = []
self.task.command_args['--delete-package'] = []
self.task.command_args['--ignore-repos'] = False
self.task.command_args['--ignore-repos-used-for-build'] = False
self.task.command_args['--set-container-derived-from'] = None
self.task.command_args['--set-container-tag'] = None
self.task.command_args['--clear-cache'] = False
self.task.command_args['--signing-key'] = None
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build(self, mock_log):
self._init_command_args()
self.task.command_args['build'] = True
self.task.process()
self.runtime_checker.check_boot_description_exists.assert_called_once_with()
self.runtime_checker.check_consistent_kernel_in_boot_and_system_image.assert_called_once_with()
self.runtime_checker.check_docker_tool_chain_installed.assert_called_once_with()
self.runtime_checker.check_volume_setup_has_no_root_definition.assert_called_once_with()
self.runtime_checker.check_xen_uniquely_setup_as_server_or_guest.assert_called_once_with()
self.runtime_checker.check_target_directory_not_in_shared_cache.assert_called_once_with(self.abs_target_dir)
self.runtime_checker.check_mediacheck_only_for_x86_arch.assert_called_once_with()
self.runtime_checker.check_dracut_module_for_live_iso_in_package_list.assert_called_once_with()
self.runtime_checker.check_repositories_configured.assert_called_once_with()
self.runtime_checker.check_dracut_module_for_disk_overlay_in_package_list.assert_called_once_with()
self.runtime_checker.check_dracut_module_for_disk_oem_in_package_list.assert_called_once_with()
self.runtime_checker.check_dracut_module_for_oem_install_in_package_list.assert_called_once_with()
self.runtime_checker.check_efi_mode_for_disk_overlay_correctly_setup.assert_called_once_with()
self.system_prepare.setup_repositories.assert_called_once_with(False, None)
self.system_prepare.install_bootstrap.assert_called_once_with(
self.manager
)
self.system_prepare.install_system.assert_called_once_with(
self.manager
)
self.setup.import_shell_environment.assert_called_once_with(
self.profile
)
self.setup.import_description.assert_called_once_with()
self.setup.import_overlay_files.assert_called_once_with()
self.setup.import_repositories_marked_as_imageinclude.assert_called_once_with()
self.setup.call_config_script.assert_called_once_with()
self.setup.import_image_identifier.assert_called_once_with()
self.setup.setup_groups.assert_called_once_with()
self.setup.setup_users.assert_called_once_with()
self.setup.setup_keyboard_map.assert_called_once_with()
self.setup.setup_locale.assert_called_once_with()
self.setup.setup_plymouth_splash.assert_called_once_with()
self.setup.setup_timezone.assert_called_once_with()
self.system_prepare.pinch_system.assert_has_calls(
[call(force=False), call(force=True)]
)
self.setup.call_image_script.assert_called_once_with()
self.builder.create.assert_called_once_with()
self.result.print_results.assert_called_once_with()
self.result.dump.assert_called_once_with(
os.sep.join([self.abs_target_dir, 'kiwi.result'])
)
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build_add_package(self, mock_log):
self._init_command_args()
self.task.command_args['--add-package'] = ['vim']
self.task.process()
self.system_prepare.setup_repositories.assert_called_once_with(False, None)
self.system_prepare.install_packages.assert_called_once_with(
self.manager, ['vim']
)
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_update_delete_package(self, mock_log):
self._init_command_args()
self.task.command_args['--delete-package'] = ['vim']
self.task.process()
self.system_prepare.setup_repositories.assert_called_once_with(False, None)
self.system_prepare.delete_packages.assert_called_once_with(
self.manager, ['vim']
)
@patch('kiwi.xml_state.XMLState.set_container_config_tag')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build_prepare_stage_set_container_tag(
self, mock_log, mock_set_container_tag
):
self._init_command_args()
self.task.command_args['--set-container-tag'] = 'new_tag'
self.task.process()
mock_set_container_tag.assert_called_once_with(
'new_tag'
)
@patch('kiwi.xml_state.XMLState.set_derived_from_image_uri')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build_prepare_stage_set_derived_from_uri(
self, mock_log, mock_set_derived_from_uri
):
self._init_command_args()
self.task.command_args['--set-container-derived-from'] = 'file:///new'
self.task.process()
mock_set_derived_from_uri.assert_called_once_with(
'file:///new'
)
@patch('kiwi.xml_state.XMLState.set_repository')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build_prepare_stage_set_repo(
self, mock_log, mock_set_repo
):
self._init_command_args()
self.task.command_args['--set-repo'] = 'http://example.com,yast2,alias'
self.task.process()
mock_set_repo.assert_called_once_with(
'http://example.com', 'yast2', 'alias', None, None, None
)
@patch('kiwi.xml_state.XMLState.add_repository')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_build_prepare_stage_add_repo(
self, mock_log, mock_add_repo
):
self._init_command_args()
self.task.command_args['--add-repo'] = [
'http://example.com,yast2,alias,99,false,true'
]
self.task.process()
mock_add_repo.assert_called_once_with(
'http://example.com', 'yast2', 'alias', '99', False, True
)
def test_process_system_build_help(self):
self._init_command_args()
self.task.command_args['help'] = True
self.task.command_args['build'] = True
self.task.process()
self.task.manual.show.assert_called_once_with(
'kiwi::system::build'
)
@patch('kiwi.xml_state.XMLState.delete_repository_sections')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_prepare_ignore_repos(
self, mock_log, mock_delete_repos
):
self._init_command_args()
self.task.command_args['--ignore-repos'] = True
self.task.process()
mock_delete_repos.assert_called_once_with()
@patch('kiwi.xml_state.XMLState.delete_repository_sections_used_for_build')
@patch('kiwi.logger.Logger.set_logfile')
def test_process_system_prepare_ignore_repos_used_for_build(
self, mock_log, mock_delete_repos
):
self._init_command_args()
self.task.command_args['--ignore-repos-used-for-build'] = True
self.task.process()
mock_delete_repos.assert_called_once_with()
| gpl-3.0 | -5,475,405,908,561,977,000 | 40.079498 | 116 | 0.638928 | false |
lonnen/socorro | socorro/lib/threaded_task_manager.py | 1 | 14201 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This module defines classes that implements a threaded
producer/consumer system. A single iterator thread pushes jobs into an
internal queue while a flock of consumer/worker threads do the jobs. A job
consists of a function and the data applied to the function."""
import logging
import queue
import threading
import time
from configman import Namespace
from socorro.lib.task_manager import (
default_task_func,
default_iterator,
TaskManager
)
class ThreadedTaskManager(TaskManager):
"""Given an iterator over a sequence of job parameters and a function,
this class will execute the function in a set of threads."""
required_config = Namespace()
required_config.add_option(
'idle_delay',
default=7,
doc='the delay in seconds if no job is found'
)
# how does one choose how many threads to use? Keep the number low if your
# application is compute bound. You can raise it if your app is i/o
# bound. The best thing to do is to test the through put of your app with
# several values. For Socorro, we've found that setting this value to the
# number of processor cores in the system gives the best throughput.
required_config.add_option(
'number_of_threads',
default=4,
doc='the number of threads'
)
# there is wisdom is setting the maximum queue size to be no more than
# twice the number of threads. By keeping the threads starved, the
# queing thread will be blocked more more frequently. Once an item
# is in the queue, there may be no way to fetch it again if disaster
# strikes and this app quits or fails. Potentially anything left in
# the queue could be lost. Limiting the queue size insures minimal
# damage in a worst case scenario.
required_config.add_option(
'maximum_queue_size',
default=8,
doc='the maximum size of the internal queue'
)
def __init__(self, config,
job_source_iterator=default_iterator,
task_func=default_task_func):
"""the constructor accepts the function that will serve as the data
source iterator and the function that the threads will execute on
consuming the data.
parameters:
job_source_iterator - an iterator to serve as the source of data.
it can be of the form of a generator or
iterator; a function that returns an
iterator; a instance of an iterable object;
or a class that when instantiated with a
config object can be iterated. The iterator
must yield a tuple consisting of a
function's tuple of args and, optionally, a
mapping of kwargs.
Ex: (('a', 17), {'x': 23})
task_func - a function that will accept the args and kwargs yielded
by the job_source_iterator"""
super().__init__(config, job_source_iterator, task_func)
self.thread_list = [] # the thread object storage
self.number_of_threads = config.number_of_threads
self.task_queue = queue.Queue(config.maximum_queue_size)
def start(self):
"""this function will start the queing thread that executes the
iterator and feeds jobs into the queue. It also starts the worker
threads that just sit and wait for items to appear on the queue. This
is a non blocking call, so the executing thread is free to do other
things while the other threads work."""
self.logger.debug('start')
# start each of the task threads.
for x in range(self.number_of_threads):
# each thread is given the config object as well as a reference to
# this manager class. The manager class is where the queue lives
# and the task threads will refer to it to get their next jobs.
new_thread = TaskThread(self.config, self.task_queue)
self.thread_list.append(new_thread)
new_thread.start()
self.queuing_thread = threading.Thread(
name="QueuingThread",
target=self._queuing_thread_func
)
self.queuing_thread.start()
def wait_for_completion(self, waiting_func=None):
"""This is a blocking function call that will wait for the queuing
thread to complete.
parameters:
waiting_func - this function will be called every one second while
waiting for the queuing thread to quit. This allows
for logging timers, status indicators, etc."""
self.logger.debug("waiting to join queuingThread")
self._responsive_join(self.queuing_thread, waiting_func)
def stop(self):
"""This function will tell all threads to quit. All threads
periodically look at the value of quit. If they detect quit is True,
then they commit ritual suicide. After setting the quit flag, this
function will wait for the queuing thread to quit."""
self.quit = True
self.wait_for_completion()
def blocking_start(self, waiting_func=None):
"""this function is just a wrapper around the start and
wait_for_completion methods. It starts the queuing thread and then
waits for it to complete. If run by the main thread, it will detect
the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP
have been translated to) and will order the threads to die."""
try:
self.start()
self.wait_for_completion(waiting_func)
# it only ends if someone hits ^C or sends SIGHUP or SIGTERM -
# any of which will get translated into a KeyboardInterrupt
except KeyboardInterrupt:
while True:
try:
self.stop()
break
except KeyboardInterrupt:
self.logger.warning('We heard you the first time. There '
'is no need for further keyboard or signal '
'interrupts. We are waiting for the '
'worker threads to stop. If this app '
'does not halt soon, you may have to send '
'SIGKILL (kill -9)')
def wait_for_empty_queue(self, wait_log_interval=0, wait_reason=''):
"""Sit around and wait for the queue to become empty
parameters:
wait_log_interval - while sleeping, it is helpful if the thread
periodically announces itself so that we
know that it is still alive. This number is
the time in seconds between log entries.
wait_reason - the is for the explaination of why the thread is
sleeping. This is likely to be a message like:
'there is no work to do'."""
seconds = 0
while True:
if self.task_queue.empty():
break
self.quit_check()
if wait_log_interval and not seconds % wait_log_interval:
self.logger.info('%s: %dsec so far',
wait_reason,
seconds)
self.quit_check()
seconds += 1
time.sleep(1.0)
def _responsive_join(self, thread, waiting_func=None):
"""similar to the responsive sleep, a join function blocks a thread
until some other thread dies. If that takes a long time, we'd like to
have some indicaition as to what the waiting thread is doing. This
method will wait for another thread while calling the waiting_func
once every second.
parameters:
thread - an instance of the TaskThread class representing the
thread to wait for
waiting_func - a function to call every second while waiting for
the thread to die"""
while True:
try:
thread.join(1.0)
if not thread.isAlive():
break
if waiting_func:
waiting_func()
except KeyboardInterrupt:
self.logger.debug('quit detected by _responsive_join')
self.quit = True
def _kill_worker_threads(self):
"""This function coerces the consumer/worker threads to kill
themselves. When called by the queuing thread, one death token will
be placed on the queue for each thread. Each worker thread is always
looking for the death token. When it encounters it, it immediately
runs to completion without drawing anything more off the queue.
This is a blocking call. The thread using this function will wait for
all the worker threads to die."""
for x in range(self.number_of_threads):
self.task_queue.put((None, None))
self.logger.debug("waiting for standard worker threads to stop")
for t in self.thread_list:
t.join()
def _queuing_thread_func(self):
"""This is the function responsible for reading the iterator and
putting contents into the queue. It loops as long as there are items
in the iterator. Should something go wrong with this thread, or it
detects the quit flag, it will calmly kill its workers and then
quit itself."""
self.logger.debug('_queuing_thread_func start')
try:
# May never exhaust
for job_params in self._get_iterator():
self.logger.debug('received %r', job_params)
if job_params is None:
if self.config.quit_on_empty_queue:
self.wait_for_empty_queue(
wait_log_interval=10,
wait_reason='waiting for queue to drain'
)
raise KeyboardInterrupt
self.logger.info("there is nothing to do. Sleeping "
"for %d seconds" %
self.config.idle_delay)
self._responsive_sleep(self.config.idle_delay)
continue
self.quit_check()
# self.logger.debug("queuing job %s", job_params)
self.task_queue.put((self.task_func, job_params))
except Exception:
self.logger.error('queuing jobs has failed', exc_info=True)
except KeyboardInterrupt:
self.logger.debug('queuingThread gets quit request')
finally:
self.logger.debug("we're quitting queuingThread")
self._kill_worker_threads()
self.logger.debug("all worker threads stopped")
# now that we've killed all the workers, we can set the quit flag
# to True. This will cause any other threads to die and shut down
# the application. Originally, the setting of this flag was at the
# start of this "finally" block. However, that meant that the
# workers would abort their currently running jobs. In the case of
# of the natural ending of an application where an iterater ran to
# exhaustion, the workers would die before completing their tasks.
# Moving the setting of the flag to this location allows the
# workers to finish and then the app shuts down.
self.quit = True
class TaskThread(threading.Thread):
"""This class represents a worker thread for the TaskManager class"""
def __init__(self, config, task_queue):
"""Initialize a new thread.
parameters:
config - the configuration from configman
task_queue - a reference to the queue from which to fetch jobs
"""
super().__init__()
self.task_queue = task_queue
self.config = config
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def _get_name(self):
return threading.currentThread().getName()
def run(self):
"""The main routine for a thread's work.
The thread pulls tasks from the task queue and executes them until it
encounters a death token. The death token is a tuple of two Nones.
"""
try:
quit_request_detected = False
while True:
function, arguments = self.task_queue.get()
if function is None:
# this allows us to watch the threads die and identify
# threads that may be hanging or deadlocked
self.logger.info('quits')
break
if quit_request_detected:
continue
try:
try:
args, kwargs = arguments
except ValueError:
args = arguments
kwargs = {}
function(*args, **kwargs) # execute the task
except Exception:
self.logger.error("Error in processing a job", exc_info=True)
except KeyboardInterrupt: # TODO: can probably go away
self.logger.info('quit request detected')
quit_request_detected = True
# Only needed if signal handler is not registered
# thread.interrupt_main()
except Exception:
self.logger.critical("Failure in task_queue", exc_info=True)
| mpl-2.0 | 5,145,759,100,393,674,000 | 45.867987 | 84 | 0.580734 | false |
cmbclh/vnpy1.7 | archive/datayes/api.py | 11 | 43801 | #encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
| mit | 9,052,507,582,145,055,000 | 27.078205 | 77 | 0.61679 | false |
MYaseen208/Calculator | Documentation/source/gui.py | 1 | 3815 | # By Geetha, Ali, Yaseen, Majid and Mortaza
# import Tkinter as Tk # Python2
import tkinter as Tk # Python3
import subprocess
class Calculator:
# Constructor for adding buttons
def __init__(self, window):
window.title('Calculator By Geetha, Ali, Yaseen, Majid and Mortaza')
window.geometry()
self.text_box = Tk.Entry(window, width=40, font="Noto 20 bold")
self.text_box.grid(row=0, column=0, columnspan=6)
self.text_box.focus_set()
# Buttons
Tk.Button(window,text="+",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('+')).grid(row=4, column=3)
Tk.Button(window,text="*",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('*')).grid(row=2, column=3)
Tk.Button(window,text="-",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('-')).grid(row=3, column=3)
Tk.Button(window,text="/",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('/')).grid(row=1, column=3)
Tk.Button(window,text="7",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('7')).grid(row=1, column=0)
Tk.Button(window,text="8",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(8)).grid(row=1, column=1)
Tk.Button(window,text="9",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(9)).grid(row=1, column=2)
Tk.Button(window,text="4",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(4)).grid(row=2, column=0)
Tk.Button(window,text="5",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(5)).grid(row=2, column=1)
Tk.Button(window,text="6",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(6)).grid(row=2, column=2)
Tk.Button(window,text="1",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(1)).grid(row=3, column=0)
Tk.Button(window,text="2",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(2)).grid(row=3, column=1)
Tk.Button(window,text="3",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(3)).grid(row=3, column=2)
Tk.Button(window,text="0",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(0)).grid(row=4, column=0)
Tk.Button(window,text=".",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('.')).grid(row=4, column=1)
Tk.Button(window,text="(",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('(')).grid(row=1, column=4)
Tk.Button(window,text=")",font="Noto 10 bold",width=14,height=6,command=lambda:self.action(')')).grid(row=2, column=4)
Tk.Button(window,text="=",font="Noto 10 bold",width=14,height=6,command=lambda:self.equals()).grid(row=4, column=2)
Tk.Button(window,text="^",font="Noto 10 bold",width=14,height=6,command=lambda:self.action('^')).grid(row=3, column=4)
Tk.Button(window,text='Clear',font="Noto 10 bold",width=14,height=6,command=lambda:self.clearall()).grid(row=4, column=4)
def action(self, arg):
"""Attaching button's value to end of the text box"""
self.text_box.insert(Tk.END, arg)
def get(self):
"""Getting expression from c++ code"""
self.expression = self.text_box.get()
def equals(self):
self.get()
self.expression=self.expression.replace('(','\(') # Because of echo!
self.expression=self.expression.replace(')','\)') # Because of echo!
self.value= subprocess.check_output("echo {} | ./main.x".format(self.expression), shell=True)
self.text_box.delete(0, Tk.END)
self.text_box.insert(0, self.value)
def clearall(self):
"""Clearing the text box"""
self.text_box.delete(0, Tk.END)
window = Tk.Tk()
ob = Calculator(window)
window.mainloop()
| gpl-3.0 | -2,637,277,800,477,473,300 | 62.583333 | 129 | 0.658453 | false |
hfp/tensorflow-xsmm | tensorflow/python/autograph/converters/call_trees.py | 5 | 12529 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.util import tf_inspect
class FunctionInfo(collections.namedtuple('FunctionInfo', ('dtype',))):
pass
# TODO(mdan): Move this to a separate transformer.
KNOWN_NUMPY_FUNCTIONS = {
('numpy', 'random', 'binomial'): FunctionInfo(dtype='tf.int64'),
}
# TODO(mdan): Get rid of these interfaces. Can now depend directly on Namer.
class FunctionNamer(object):
"""Describes the interface for CallTreeTransformer's namer."""
def compiled_function_name(self,
original_fqn,
live_entity=None,
owner_type=None):
"""Generate the name corresponding to the compiled version of a function.
Args:
original_fqn: string or tuple(string)
live_entity: Callable, the actual target function, if known.
owner_type: Optional object. If present, it indicates that the function is
a member of the given type.
Returns:
string, bool
"""
raise NotImplementedError()
def compiled_class_name(self, original_fqn, live_entity=None):
"""Generate the name corresponding to the compiled version of a class.
Args:
original_fqn: string or tuple(string)
live_entity: The actual target class, if known.
Returns:
string
"""
raise NotImplementedError()
# TODO(mdan): Rename to CallsTransformer.
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def _resolve_decorator_name(self, node):
"""Used to resolve decorator info."""
if isinstance(node, gast.Call):
return self._resolve_decorator_name(node.func)
if isinstance(node, gast.Name):
# TODO(mdan): Add test coverage for this branch.
return self.ctx.info.namespace.get(node.id)
if isinstance(node, gast.Attribute):
parent = self._resolve_decorator_name(node.value)
if parent is not None:
return getattr(parent, node.attr)
return None
raise ValueError(node)
def _try_resolve_target(self, node):
"""Works for methods of objects of known type."""
if anno.hasanno(node, 'live_val'):
return anno.getanno(node, 'live_val')
if isinstance(node, gast.Attribute) and anno.hasanno(node, 'type'):
owner_type = anno.getanno(node, 'type')
if hasattr(owner_type, node.attr):
return getattr(owner_type, node.attr)
else:
# TODO(mdan): We should probably return None here rather than an error.
raise ValueError('Type "%s" has no attribute "%s". Is it dynamic?' %
(owner_type, node.attr))
return None
def _function_is_compilable(self, target_entity):
"""Determines whether an entity can be compiled at all."""
# TODO(mdan): Expand.
if target_entity.__module__ is None:
# Functions like builtins and NumPy don't expose a module.
# Those in general should not be compiled.
return False
if inspect_utils.isbuiltin(target_entity):
return False
if inspect_utils.isnamedtuple(target_entity):
# namedtuple doesn't expose its source code, making it uncompilable.
return False
return True
def _should_compile(self, node, fqn):
"""Determines whether an entity should be compiled in the context."""
# TODO(mdan): Needs cleanup. We should remove the use of fqn altogether.
module_name = fqn[0]
for mod in self.ctx.program.uncompiled_modules:
if module_name.startswith(mod[0] + '.'):
return False
for i in range(1, len(fqn)):
if fqn[:i] in self.ctx.program.uncompiled_modules:
return False
target_entity = self._try_resolve_target(node.func)
if target_entity is not None:
# Currently, lambdas are always converted.
# TODO(mdan): Allow markers of the kind f = ag.do_not_convert(lambda: ...)
if inspect_utils.islambda(target_entity):
return True
# This may be reached when "calling" a callable attribute of an object.
# For example:
#
# self.fc = tf.keras.layers.Dense()
# self.fc()
#
for mod in self.ctx.program.uncompiled_modules:
if target_entity.__module__.startswith(mod[0] + '.'):
return False
# Inspect the target function decorators. If any include a @convert
# or @do_not_convert annotation, then they must be called as they are.
# TODO(mdan): This may be quite heavy. Perhaps always dynamically convert?
# To parse and re-analyze each function for every call site could be quite
# wasteful. Maybe we could cache the parsed AST?
try:
target_node, _ = parser.parse_entity(target_entity)
target_node = target_node.body[0]
except TypeError:
# Functions whose source we cannot access are compilable (e.g. wrapped
# to py_func).
return True
# This attribute is set when the decorator was applied before the
# function was parsed. See api.py.
if hasattr(target_entity, '__ag_compiled'):
return False
for dec in target_node.decorator_list:
decorator_fn = self._resolve_decorator_name(dec)
if (decorator_fn is not None and
self.ctx.program.options.should_strip(decorator_fn)):
return False
return True
def _rename_compilable_function(self, node):
assert anno.hasanno(node.func, 'live_val')
assert anno.hasanno(node.func, 'fqn')
target_entity = anno.getanno(node.func, 'live_val')
target_fqn = anno.getanno(node.func, 'fqn')
if anno.hasanno(node, 'is_constructor'):
new_name = self.ctx.namer.compiled_class_name(
target_fqn, live_entity=target_entity)
do_rename = True
else:
if anno.hasanno(node.func, 'parent_type'):
owner_type = anno.getanno(node.func, 'parent_type')
else:
# Fallback - not reliable.
owner_type = inspect_utils.getmethodclass(target_entity)
new_name, do_rename = self.ctx.namer.compiled_function_name(
target_fqn, live_entity=target_entity, owner_type=owner_type)
if do_rename:
if target_entity is not None:
if tf_inspect.ismethod(target_entity):
# The renaming process will transform it into a regular function.
# TODO(mdan): Is this complete? How does it work with nested members?
node.args = [node.func.value] + node.args
node.func = templates.replace_as_expression(
'func_name', func_name=new_name)
return node
def _wrap_to_py_func_single_return(self, node, dtype):
# TODO(mdan): Properly handle varargs, etc.
template = """
ag__.utils.wrap_py_func(func, dtype, (args,), kwargs, False)
"""
return templates.replace_as_expression(
template,
func=node.func,
dtype=parser.parse_expression(dtype),
args=node.args,
kwargs=ast_util.keywords_to_dict(node.keywords))
def _insert_dynamic_conversion(self, node):
"""Inlines a dynamic conversion for a dynamic function."""
# TODO(mdan): Pass information on the statically compiled functions.
# Having access to the statically compiled functions can help avoid
# unnecessary compilation.
# For example, this would lead to function `a` being compiled twice:
#
# def a():
# v = b
# b()
# def b():
# a()
#
# This is really a problem with recursive calls, which currently can
# only be gated by a static condition, and should be rare.
# TODO(mdan): It probably makes sense to use dynamic conversion every time.
# Before we could convert all the time though, we'd need a reasonable
# caching mechanism.
template = """
ag__.converted_call(func, owner, options, args)
"""
if isinstance(node.func, gast.Attribute):
func = gast.Str(node.func.attr)
owner = node.func.value
else:
func = node.func
owner = parser.parse_expression('None')
new_call = templates.replace_as_expression(
template,
func=func,
owner=owner,
options=self.ctx.program.options.to_ast(
self.ctx,
internal_convert_user_code=self.ctx.program.options.recursive),
args=node.args)
# TODO(mdan): Improve the template mechanism to better support this.
new_call.keywords = node.keywords
return new_call
def _visit_decorators(self, decorator_list):
if not self.ctx.program.options.uses(converter.Feature.DECORATORS):
# When not processing decorators, strip everything that is encountered.
return []
return self.visit_block(decorator_list)
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
node.decorator_list = self._visit_decorators(node.decorator_list)
node.returns = self.visit_block(node.returns)
return node
def visit_Call(self, node):
if anno.hasanno(node.func, 'live_val'):
target_entity = anno.getanno(node.func, 'live_val')
if anno.hasanno(node.func, 'fqn'):
target_fqn = anno.getanno(node.func, 'fqn')
else:
target_fqn = None
if self._function_is_compilable(target_entity):
if self._should_compile(node, target_fqn):
node = self._rename_compilable_function(node)
else:
node = self.generic_visit(node)
return node
elif target_fqn and target_fqn in KNOWN_NUMPY_FUNCTIONS:
# TODO(mdan): Should we replace these with equivalent TF ops instead?
node = self._wrap_to_py_func_single_return(
node, KNOWN_NUMPY_FUNCTIONS[target_fqn].dtype)
elif inspect_utils.isbuiltin(target_entity):
# Note: Any builtin that passed the builtins converter is assumed to be
# safe for graph mode.
return node
elif inspect_utils.isnamedtuple(target_entity):
# Although not compilable, we assume they are safe for graph mode.
node = self.generic_visit(node)
return node
else:
# TODO(mdan): Instert dynamic conversion here instead.
raise NotImplementedError(
'py_func with return values (unknown function)')
else:
# Special cases
# TODO(mdan): These need a systematic review - there may be more.
# 1. super() calls - these are preserved. The class conversion mechanism
# will ensure that they return the correct value.
if ast_util.matches(node, parser.parse_expression('super(_)')):
return node
# 2. super().method calls - these are preserved as well, when the
# conversion processes the entire class.
if (ast_util.matches(node, parser.parse_expression('super(_)._(_)')) and
self.ctx.info.owner_type is not None):
return node
node = self._insert_dynamic_conversion(node)
return node
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
| apache-2.0 | -7,970,651,286,638,464,000 | 34.797143 | 80 | 0.660308 | false |
brython-dev/brython | scripts/javascript_minifier.py | 1 | 2627 | """Javascript minifier"""
import re
def minify(src):
_res, pos = '', 0
while pos < len(src):
if src[pos] in ('"', "'", '`') or \
(src[pos] == '/' and src[pos - 1] == '('):
# the end of the string is the next quote if it is not
# after an odd number of backslashes
start = pos
while True:
end = src.find(src[pos], start + 1)
if end == -1:
line = src[:pos].count('\n')
raise SyntaxError('string not closed in line %s : %s' %
(line, src[pos:pos + 20]))
else:
# count number of backslashes before the quote
nb = 0
while src[end - nb - 1] == '\\':
nb += 1
if not nb % 2:
break
else:
start = end
_res += src[pos:end + 1]
pos = end + 1
elif src[pos] == '\r':
pos += 1
elif src[pos] == ' ':
if _res and _res[-1] in '({=[)}];:+-*/|\n':
pos += 1
continue
_res += ' '
while pos < len(src) and src[pos] == ' ':
pos += 1
elif src[pos:pos + 2] == '//':
end = src.find('\n', pos)
if end == -1:
break
pos = end
elif src[pos:pos + 2] == '/*':
end = src.find('*/', pos)
if end == -1:
break
pos = end+2
elif src[pos] in '={[(' and _res and _res[-1] == ' ':
_res = _res[:-1]+src[pos]
pos += 1
elif src[pos] in '{[,':
_res += src[pos]
while pos < len(src) - 1 and src[pos + 1] in ' \r\n':
pos += 1
pos += 1
elif src[pos] in '+-*/':
while _res[-1] == " ":
_res = _res[:-1]
_res += src[pos]
pos += 1
elif src[pos] == '}':
_res += src[pos]
nxt = pos + 1
while nxt < len(src) and src[nxt] in ' \r\n':
nxt += 1
if nxt < len(src) and src[nxt] == '}':
pos = nxt - 1
pos += 1
else:
_res += src[pos]
pos += 1
# replace consecutive newlines
_res = re.sub('\n+', '\n', _res)
# remove newline followed by }
_res = re.sub('\n}', '}', _res)
return _res
if __name__=="__main__":
print(minify(open('test.js').read()))
| bsd-3-clause | 6,306,103,374,661,249,000 | 31.432099 | 75 | 0.347925 | false |
mjafin/bcbio-nextgen | bcbio/galaxy/api.py | 10 | 2835 | """Access Galaxy NGLIMS functionality via the standard API.
"""
import urllib
import urllib2
import json
import time
class GalaxyApiAccess:
"""Simple front end for accessing Galaxy's REST API.
"""
def __init__(self, galaxy_url, api_key):
self._base_url = galaxy_url
self._key = api_key
self._max_tries = 5
def _make_url(self, rel_url, params=None):
if not params:
params = dict()
params['key'] = self._key
vals = urllib.urlencode(params)
return ("%s%s" % (self._base_url, rel_url), vals)
def _get(self, url, params=None):
url, params = self._make_url(url, params)
num_tries = 0
while 1:
response = urllib2.urlopen("%s?%s" % (url, params))
try:
out = json.loads(response.read())
break
except ValueError, msg:
if num_tries > self._max_tries:
raise
time.sleep(3)
num_tries += 1
return out
def _post(self, url, data, params=None, need_return=True):
url, params = self._make_url(url, params)
request = urllib2.Request("%s?%s" % (url, params),
headers = {'Content-Type' : 'application/json'},
data = json.dumps(data))
response = urllib2.urlopen(request)
try:
data = json.loads(response.read())
except ValueError:
if need_return:
raise
else:
data = {}
return data
def run_details(self, run_bc, run_date=None):
"""Next Gen LIMS specific API functionality.
"""
try:
details = self._get("/nglims/api_run_details", dict(run=run_bc))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_bc)
if details.has_key("error") and run_date is not None:
try:
details = self._get("/nglims/api_run_details", dict(run=run_date))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_date)
return details
def sequencing_projects(self):
"""Next Gen LIMS: retrieve summary information of sequencing projects.
"""
return self._get("/nglims/api_projects")
def sqn_run_summary(self, run_info):
"""Next Gen LIMS: Upload sequencing run summary information.
"""
return self._post("/nglims/api_upload_sqn_run_summary",
data=run_info)
def sqn_report(self, start_date, end_date):
"""Next Gen LIMS: report of items sequenced in a time period.
"""
return self._get("/nglims/api_sqn_report",
dict(start=start_date, end=end_date))
| mit | -1,893,774,085,017,352,200 | 33.156627 | 95 | 0.549559 | false |
pytexas/PyTexasBackend | conference/event/migrations/0007_auto_20190315_0221.py | 1 | 1382 | # Generated by Django 2.0.10 on 2019-03-15 02:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('event', '0006_auto_20190310_2253'),
]
operations = [
migrations.CreateModel(
name='PrizeWinner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('ticket_id', models.CharField(max_length=255, unique=True)),
('prize', models.CharField(blank=True, max_length=255, null=True)),
('claimed', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.Conference')),
],
),
migrations.AlterField(
model_name='session',
name='stype',
field=models.CharField(choices=[('keynote', 'Keynote'), ('lightning', 'Lightning Talk'), ('talk-short', 'Short Talk'), ('talk-long', 'Talk'), ('tutorial', 'Tutorial'), ('non-talk', 'Non Talk')], max_length=25, verbose_name='Session Type'),
),
]
| mit | 8,712,056,727,591,242,000 | 42.1875 | 251 | 0.583936 | false |
treeio/treeio | treeio/finance/csvapi.py | 2 | 1449 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Import/Export Contacts API
"""
import csv
from django.http import HttpResponse
import StringIO
import datetime
class ProcessTransactions(object):
"Import/Export Contacts"
def export_transactions(self, transactions):
"Export transactions into CSV file"
response = HttpResponse(content_type='text/csv')
response[
'Content-Disposition'] = 'attachment; filename=Transactions_%s.csv' % datetime.date.today().isoformat()
writer = csv.writer(response)
headers = ['name', 'source', 'target', 'liability',
'category', 'account', 'datetime', 'value', 'details']
writer.writerow(headers)
for transaction in transactions:
row = [transaction, transaction.source, transaction.target, transaction.liability, transaction.category,
transaction.account, transaction.datetime, transaction.get_relative_value(), transaction.details]
writer.writerow(row)
return response
def import_transactions(self, content):
"Import transactions from CSV file"
f = StringIO.StringIO(content)
transactions = csv.DictReader(f, delimiter=',')
self.parse_transactions(transactions)
def parse_transactions(self, transactions):
"Break down CSV file into transactions"
| mit | -5,004,105,069,990,892,000 | 30.5 | 116 | 0.672878 | false |
freevo/freevo2 | src/core/event.py | 1 | 5940 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# event.py - Global events for Freevo
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, 2003-2011 Dirk Meyer, et al.
#
# First Edition: Dirk Meyer <https://github.com/Dischi>
# Maintainer: Dirk Meyer <https://github.com/Dischi>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
import copy
import kaa
class Event(kaa.Event):
"""
an event is passed to the different eventhandlers in Freevo to
activate some action.
"""
def __init__(self, name, *args, **kwargs):
super(Event, self).__init__(name, *args)
self.handler = kwargs.get('handler', None)
self.source = 'system'
def post(self, *args, **kwargs):
"""
Post event into the queue.
"""
event = copy.copy(self)
event.source = kwargs.get('event_source', 'system')
super(Event, self).post(*args)
#
# Default actions Freevo knows
#
MIXER_VOLUP = Event('MIXER_VOLUP', 5)
MIXER_VOLDOWN = Event('MIXER_VOLDOWN', 5)
MIXER_MUTE = Event('MIXER_MUTE')
PLAYLIST_NEXT = Event('PLAYLIST_NEXT')
PLAYLIST_PREV = Event('PLAYLIST_PREV')
PLAYLIST_TOGGLE_REPEAT = Event('PLAYLIST_TOGGLE_REPEAT')
EJECT = Event('EJECT')
TOGGLE_APPLICATION = Event('TOGGLE_APPLICATION')
#
# Menu
#
MENU_LEFT = Event('MENU_LEFT')
MENU_RIGHT = Event('MENU_RIGHT')
MENU_UP = Event('MENU_UP')
MENU_DOWN = Event('MENU_DOWN')
MENU_PAGEUP = Event('MENU_PAGEUP')
MENU_PAGEDOWN = Event('MENU_PAGEDOWN')
MENU_GOTO_MAINMENU = Event('MENU_GOTO_MAINMENU')
MENU_GOTO_MEDIA = Event('MENU_GOTO_MEDIA')
MENU_GOTO_MENU = Event('MENU_GOTO_MENU')
MENU_BACK_ONE_MENU = Event('MENU_BACK_ONE_MENU')
MENU_SELECT = Event('MENU_SELECT')
MENU_CHANGE_SELECTION = Event('MENU_CHANGE_SELECTION')
MENU_PLAY_ITEM = Event('MENU_PLAY_ITEM')
MENU_SUBMENU = Event('MENU_SUBMENU')
MENU_CALL_ITEM_ACTION = Event('MENU_CALL_ITEM_ACTION')
MENU_CHANGE_STYLE = Event('MENU_CHANGE_STYLE')
DIRECTORY_CHANGE_MENU_TYPE = Event('DIRECTORY_CHANGE_MENU_TYPE')
DIRECTORY_TOGGLE_HIDE_PLAYED = Event('DIRECTORY_TOGGLE_HIDE_PLAYED')
#
# TV module
#
TV_START_RECORDING = Event('TV_START_RECORDING')
TV_CHANNEL_UP = Event('TV_CHANNEL_UP')
TV_CHANNEL_DOWN = Event('TV_CHANNEL_DOWN')
TV_SHOW_CHANNEL = Event('TV_SHOW_CHANNEL')
#
# Global playing events
#
SEEK = Event('SEEK')
PLAY = Event('PLAY')
PAUSE = Event('PAUSE')
STOP = Event('STOP')
TOGGLE_OSD = Event('TOGGLE_OSD')
#
# Video module
#
VIDEO_MANUAL_SEEK = Event('VIDEO_MANUAL_SEEK')
VIDEO_NEXT_AUDIOLANG = Event('VIDEO_NEXT_AUDIOLANG')
VIDEO_NEXT_SUBTITLE = Event('VIDEO_NEXT_SUBTITLE')
VIDEO_TOGGLE_INTERLACE = Event('VIDEO_TOGGLE_INTERLACE')
VIDEO_NEXT_ANGLE = Event('VIDEO_NEXT_ANGLE')
VIDEO_CHANGE_ASPECT = Event('VIDEO_CHANGE_ASPECT')
STORE_BOOKMARK = Event('STORE_BOOKMARK')
MENU = Event('MENU')
DVDNAV_LEFT = Event('DVDNAV_LEFT')
DVDNAV_RIGHT = Event('DVDNAV_RIGHT')
DVDNAV_UP = Event('DVDNAV_UP')
DVDNAV_DOWN = Event('DVDNAV_DOWN')
DVDNAV_SELECT = Event('DVDNAV_SELECT')
DVDNAV_TITLEMENU = Event('DVDNAV_TITLEMENU')
DVDNAV_MENU = Event('DVDNAV_MENU')
NEXT = Event('NEXT')
PREV = Event('PREV')
#
# Image module
#
ZOOM = Event('ZOOM')
ZOOM_IN = Event('ZOOM_IN')
ZOOM_OUT = Event('ZOOM_OUT')
IMAGE_ROTATE = Event('IMAGE_ROTATE')
IMAGE_SAVE = Event('IMAGE_SAVE')
IMAGE_MOVE = Event('IMAGE_MOVE')
#
# Input boxes
#
INPUT_EXIT = Event('INPUT_EXIT')
INPUT_ENTER = Event('INPUT_ENTER')
INPUT_LEFT = Event('INPUT_LEFT')
INPUT_RIGHT = Event('INPUT_RIGHT')
INPUT_UP = Event('INPUT_UP')
INPUT_DOWN = Event('INPUT_DOWN')
INPUT_1 = Event('INPUT_1', 1)
INPUT_2 = Event('INPUT_2', 2)
INPUT_3 = Event('INPUT_3', 3)
INPUT_4 = Event('INPUT_4', 4)
INPUT_5 = Event('INPUT_5', 5)
INPUT_6 = Event('INPUT_6', 6)
INPUT_7 = Event('INPUT_7', 7)
INPUT_8 = Event('INPUT_8', 8)
INPUT_9 = Event('INPUT_9', 9)
INPUT_0 = Event('INPUT_0', 0)
INPUT_ALL_NUMBERS = (INPUT_0, INPUT_1, INPUT_2, INPUT_3, INPUT_4, INPUT_5,
INPUT_6, INPUT_7, INPUT_8, INPUT_9, INPUT_0 )
#
# Internal events, don't map any button on them
#
PLAY_END = Event('PLAY_END')
PLAY_START = Event('PLAY_START')
OSD_MESSAGE = Event('OSD_MESSAGE')
| gpl-2.0 | 4,403,699,767,020,894,000 | 32 | 79 | 0.560269 | false |
jantman/python-mcollective | tests/unit/connector/test_stomp.py | 2 | 1197 | """
Tests for StompConnector
"""
import pytest
from pymco.connector import stomp
CONFIGSTR = '''
topicprefix = /topic/
collectives = mcollective
main_collective = mcollective
libdir = /path/to/plugins
logfile = /path/to/mcollective.log
loglevel = debug
daemonize = 0
identity = mco1
# Plugins
securityprovider = none
direct_addressing = yes
direct_addressing_threshold = 5
connector = stomp
plugin.stomp.host = localhost
plugin.stomp.password = guest
plugin.stomp.port = 61613
plugin.stomp.user = guest
factsource = yaml
plugin.yaml = /path/to/facts.yaml
'''
@pytest.fixture
def config():
from pymco import config
return config.Config.from_configstr(configstr=CONFIGSTR)
@pytest.fixture
def connector(config, conn_mock):
return stomp.StompConnector(config, connection=conn_mock)
def test_get_target(connector, config):
assert connector.get_target(collective='collective', agent='agent') == (
'{0}collective.agent.command'.format(config['topicprefix'])
)
def test_get_reply_target(connector, config):
assert connector.get_reply_target(collective='collective', agent='agent') == (
'{0}collective.agent.reply'.format(config['topicprefix'])
)
| bsd-3-clause | 5,373,397,545,902,347,000 | 20.763636 | 82 | 0.734336 | false |
StefanoRaggi/Lean | Algorithm.Python/PandasDataFrameHistoryAlgorithm.py | 3 | 7375 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This algorithm demonstrates the various ways to handle History pandas DataFrame
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="history and warm up" />
### <meta name="tag" content="history" />
### <meta name="tag" content="warm up" />
class PandasDataFrameHistoryAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2014, 6, 9) # Set Start Date
self.SetEndDate(2014, 6, 9) # Set End Date
self.spy = self.AddEquity("SPY", Resolution.Daily).Symbol
self.eur = self.AddForex("EURUSD", Resolution.Daily).Symbol
aapl = self.AddEquity("AAPL", Resolution.Minute).Symbol
self.option = Symbol.CreateOption(aapl, Market.USA, OptionStyle.American, OptionRight.Call, 750, datetime(2014, 10, 18))
self.AddOptionContract(self.option)
sp1 = self.AddData(QuandlFuture,"CHRIS/CME_SP1", Resolution.Daily)
sp1.Exchange = EquityExchange()
self.sp1 = sp1.Symbol
self.AddUniverse(self.CoarseSelection)
def CoarseSelection(self, coarse):
if self.Portfolio.Invested:
return Universe.Unchanged
selected = [x.Symbol for x in coarse if x.Symbol.Value in ["AAA", "AIG", "BAC"]]
if len(selected) == 0:
return Universe.Unchanged
universeHistory = self.History(selected, 10, Resolution.Daily)
for symbol in selected:
self.AssertHistoryIndex(universeHistory, "close", 10, "", symbol)
return selected
def OnData(self, data):
if self.Portfolio.Invested:
return
# we can get history in initialize to set up indicators and such
self.spyDailySma = SimpleMovingAverage(14)
# get the last calendar year's worth of SPY data at the configured resolution (daily)
tradeBarHistory = self.History(["SPY"], timedelta(365))
self.AssertHistoryIndex(tradeBarHistory, "close", 251, "SPY", self.spy)
# get the last calendar year's worth of EURUSD data at the configured resolution (daily)
quoteBarHistory = self.History(["EURUSD"], timedelta(298))
self.AssertHistoryIndex(quoteBarHistory, "bidclose", 251, "EURUSD", self.eur)
optionHistory = self.History([self.option], timedelta(3))
optionHistory.index = optionHistory.index.droplevel(level=[0,1,2])
self.AssertHistoryIndex(optionHistory, "bidclose", 390, "", self.option)
# get the last calendar year's worth of quandl data at the configured resolution (daily)
quandlHistory = self.History(QuandlFuture, "CHRIS/CME_SP1", timedelta(365))
self.AssertHistoryIndex(quandlHistory, "settle", 251, "CHRIS/CME_SP1", self.sp1)
# we can loop over the return value from these functions and we get TradeBars
# we can use these TradeBars to initialize indicators or perform other math
self.spyDailySma.Reset()
for index, tradeBar in tradeBarHistory.loc["SPY"].iterrows():
self.spyDailySma.Update(index, tradeBar["close"])
# we can loop over the return values from these functions and we'll get Quandl data
# this can be used in much the same way as the tradeBarHistory above
self.spyDailySma.Reset()
for index, quandl in quandlHistory.loc["CHRIS/CME_SP1"].iterrows():
self.spyDailySma.Update(index, quandl["settle"])
self.SetHoldings(self.eur, 1)
def AssertHistoryIndex(self, df, column, expected, ticker, symbol):
if df.empty:
raise Exception(f"Empty history data frame for {symbol}")
if column not in df:
raise Exception(f"Could not unstack df. Columns: {', '.join(df.columns)} | {column}")
value = df.iat[0,0]
df2 = df.xs(df.index.get_level_values('time')[0], level='time')
df3 = df[column].unstack(level=0)
try:
# str(Symbol.ID)
self.AssertHistoryCount(f"df.iloc[0]", df.iloc[0], len(df.columns))
self.AssertHistoryCount(f"df.loc[str({symbol.ID})]", df.loc[str(symbol.ID)], expected)
self.AssertHistoryCount(f"df.xs(str({symbol.ID}))", df.xs(str(symbol.ID)), expected)
self.AssertHistoryCount(f"df.at[(str({symbol.ID}),), '{column}']", list(df.at[(str(symbol.ID),), column]), expected)
self.AssertHistoryCount(f"df2.loc[str({symbol.ID})]", df2.loc[str(symbol.ID)], len(df2.columns))
self.AssertHistoryCount(f"df3[str({symbol.ID})]", df3[str(symbol.ID)], expected)
self.AssertHistoryCount(f"df3.get(str({symbol.ID}))", df3.get(str(symbol.ID)), expected)
# str(Symbol)
self.AssertHistoryCount(f"df.loc[str({symbol})]", df.loc[str(symbol)], expected)
self.AssertHistoryCount(f"df.xs(str({symbol}))", df.xs(str(symbol)), expected)
self.AssertHistoryCount(f"df.at[(str({symbol}),), '{column}']", list(df.at[(str(symbol),), column]), expected)
self.AssertHistoryCount(f"df2.loc[str({symbol})]", df2.loc[str(symbol)], len(df2.columns))
self.AssertHistoryCount(f"df3[str({symbol})]", df3[str(symbol)], expected)
self.AssertHistoryCount(f"df3.get(str({symbol}))", df3.get(str(symbol)), expected)
# str : Symbol.Value
if len(ticker) == 0:
return
self.AssertHistoryCount(f"df.loc[{ticker}]", df.loc[ticker], expected)
self.AssertHistoryCount(f"df.xs({ticker})", df.xs(ticker), expected)
self.AssertHistoryCount(f"df.at[(ticker,), '{column}']", list(df.at[(ticker,), column]), expected)
self.AssertHistoryCount(f"df2.loc[{ticker}]", df2.loc[ticker], len(df2.columns))
self.AssertHistoryCount(f"df3[{ticker}]", df3[ticker], expected)
self.AssertHistoryCount(f"df3.get({ticker})", df3.get(ticker), expected)
except Exception as e:
symbols = set(df.index.get_level_values(level='symbol'))
raise Exception(f"{symbols}, {symbol.ID}, {symbol}, {ticker}. {e}")
def AssertHistoryCount(self, methodCall, tradeBarHistory, expected):
if isinstance(tradeBarHistory, list):
count = len(tradeBarHistory)
else:
count = len(tradeBarHistory.index)
if count != expected:
raise Exception(f"{methodCall} expected {expected}, but received {count}")
class QuandlFuture(PythonQuandl):
'''Custom quandl data type for setting customized value column name. Value column is used for the primary trading calculations and charting.'''
def __init__(self):
self.ValueColumnName = "Settle"
| apache-2.0 | -252,864,504,842,198,980 | 47.84106 | 147 | 0.654508 | false |
lindycoder/netman | netman/core/objects/vlan.py | 1 | 1476 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netman.core.objects import Model
from netman.core.objects.access_groups import OUT, IN
class Vlan(Model):
def __init__(self, number=None, name=None, ips=None, vrrp_groups=None, vrf_forwarding=None, access_group_in=None,
access_group_out=None, dhcp_relay_servers=None, arp_routing=None, icmp_redirects=None,
unicast_rpf_mode=None, ntp=None, varp_ips=None):
self.number = number
self.name = name
self.access_groups = {IN: access_group_in, OUT: access_group_out}
self.vrf_forwarding = vrf_forwarding
self.ips = ips or []
self.vrrp_groups = vrrp_groups or []
self.dhcp_relay_servers = dhcp_relay_servers or []
self.arp_routing = arp_routing
self.icmp_redirects = icmp_redirects
self.unicast_rpf_mode = unicast_rpf_mode
self.ntp = ntp
self.varp_ips = varp_ips or []
| apache-2.0 | 5,296,037,036,328,892,000 | 42.411765 | 117 | 0.686992 | false |
ligovirgo/seismon | p_and_s/test_p_and_s.py | 2 | 2465 |
import os, sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (13, 8) if False else (10, 6)
from obspy.taup.taup import getTravelTimes
from obspy.core.util.geodetics import gps2DistAzimuth
from obspy.taup import TauPyModel
from seismon.eqmon import ampRf, shoot
degrees = np.linspace(1,180,180)
distances = degrees*(np.pi/180)*6370000
model = TauPyModel(model="iasp91")
#model = TauPyModel(model="1066a")
fwd = 0
back = 0
eqlat, eqlon = 35.6895, 139.6917
GPS = 1000000000
magnitude = 6.0
depth = 20.0
Rf0 = 76.44
Rfs = 1.37
cd = 440.68
rs = 1.57
Rfamp = ampRf(magnitude,distances/1000.0,depth,Rf0,Rfs,cd,rs)
lats = []
lons = []
Ptimes = []
Stimes = []
#Rtimes = []
Rtwotimes = []
RthreePointFivetimes = []
Rfivetimes = []
Rfamps = []
parrivals = np.loadtxt('p.dat')
sarrivals = np.loadtxt('s.dat')
depths = np.linspace(1,100,100)
index = np.argmin(np.abs(depths-depth))
parrivals = parrivals[:,index]
sarrivals = sarrivals[:,index]
for distance, degree, parrival, sarrival in zip(distances, degrees,parrivals,sarrivals):
lon, lat, baz = shoot(eqlon, eqlat, fwd, distance/1000)
lats.append(lat)
lons.append(lon)
print "Calculating arrival for %.5f ..."%distance
#arrivals = model.get_travel_times(source_depth_in_km=depth,distance_in_degree=degree,phase_list=('P','S'))
arrivals = model.get_travel_times(source_depth_in_km=depth,distance_in_degree=degree)
Ptime = -1
Stime = -1
Rtime = -1
for phase in arrivals:
if Ptime == -1 and phase.name.lower()[0] == "p":
Ptime = GPS+phase.time
if Stime == -1 and phase.name.lower()[-1] == "s":
Stime = GPS+phase.time
Ptime = GPS+parrival
Stime = GPS+sarrival
Rtwotime = GPS+distance/2000.0
RthreePointFivetime = GPS+distance/3500.0
Rfivetime = GPS+distance/5000.0
Ptimes.append(Ptime)
Stimes.append(Stime)
Rtwotimes.append(Rtwotime)
RthreePointFivetimes.append(RthreePointFivetime)
Rfivetimes.append(Rfivetime)
print Ptime - parrival
#print Ptime, Stime, Rtwotime, RthreePointFivetime, Rfivetime
plotDir = '.'
plt.figure()
plt.plot(degrees,Ptimes,'kx')
plt.plot(degrees,Stimes,'kx')
plotName = os.path.join(plotDir,'times.png')
plt.savefig(plotName)
plotName = os.path.join(plotDir,'times.eps')
plt.savefig(plotName)
plotName = os.path.join(plotDir,'times.pdf')
plt.savefig(plotName)
plt.close()
| gpl-3.0 | 8,292,354,089,427,684,000 | 23.89899 | 111 | 0.691684 | false |
openstack/manila | manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py | 1 | 34061 | # Copyright (c) 2016 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP data motion library.
This library handles transferring data from a source to a destination. Its
responsibility is to handle this as efficiently as possible given the
location of the data's source and destination. This includes cloning,
SnapMirror, and copy-offload as improvements to brute force data transfer.
"""
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila.share import configuration
from manila.share import driver
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp import options as na_opts
from manila.share.drivers.netapp import utils as na_utils
from manila.share import utils as share_utils
from manila import utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def get_backend_configuration(backend_name):
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration which is required for replication or migration "
"workflows with the source backend. Available stanzas are "
"%(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.BadConfigurationException(reason=msg % params)
config = configuration.Configuration(driver.share_opts,
config_group=backend_name)
if config.driver_handles_share_servers:
# NOTE(dviroel): avoid using a pre-create vserver on DHSS == True mode
# when retrieving remote backend configuration.
config.netapp_vserver = None
config.append_config_values(na_opts.netapp_cluster_opts)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_support_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_data_motion_opts)
return config
def get_client_for_backend(backend_name, vserver_name=None):
config = get_backend_configuration(backend_name)
client = client_cmode.NetAppCmodeClient(
transport_type=config.netapp_transport_type,
ssl_cert_path=config.netapp_ssl_cert_path,
username=config.netapp_login,
password=config.netapp_password,
hostname=config.netapp_server_hostname,
port=config.netapp_server_port,
vserver=vserver_name or config.netapp_vserver,
trace=na_utils.TRACE_API)
return client
class DataMotionSession(object):
def _get_backend_volume_name(self, config, share_obj):
"""Return the calculated backend name of the share.
Uses the netapp_volume_name_template configuration value for the
backend to calculate the volume name on the array for the share.
"""
volume_name = config.netapp_volume_name_template % {
'share_id': share_obj['id'].replace('-', '_')}
return volume_name
def _get_backend_qos_policy_group_name(self, share):
"""Get QoS policy name according to QoS policy group name template."""
__, config = self.get_backend_name_and_config_obj(share['host'])
return config.netapp_qos_policy_group_name_template % {
'share_id': share['id'].replace('-', '_')}
def _get_backend_snapmirror_policy_name_svm(self, share_server_id,
backend_name):
config = get_backend_configuration(backend_name)
return (config.netapp_snapmirror_policy_name_svm_template
% {'share_server_id': share_server_id.replace('-', '_')})
def get_vserver_from_share_server(self, share_server):
backend_details = share_server.get('backend_details')
if backend_details:
return backend_details.get('vserver_name')
def get_vserver_from_share(self, share_obj):
share_server = share_obj.get('share_server')
if share_server:
return self.get_vserver_from_share_server(share_server)
def get_backend_name_and_config_obj(self, host):
backend_name = share_utils.extract_host(host, level='backend_name')
config = get_backend_configuration(backend_name)
return backend_name, config
def get_backend_info_for_share(self, share_obj):
backend_name, config = self.get_backend_name_and_config_obj(
share_obj['host'])
vserver = (self.get_vserver_from_share(share_obj) or
config.netapp_vserver)
volume_name = self._get_backend_volume_name(config, share_obj)
return volume_name, vserver, backend_name
def get_client_and_vserver_name(self, share_server):
destination_host = share_server.get('host')
vserver = self.get_vserver_from_share_server(share_server)
backend, __ = self.get_backend_name_and_config_obj(destination_host)
client = get_client_for_backend(backend, vserver_name=vserver)
return client, vserver
def get_snapmirrors(self, source_share_obj, dest_share_obj):
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
snapmirrors = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume_name, dest_volume=dest_volume_name,
desired_attributes=['relationship-status',
'mirror-state',
'source-vserver',
'source-volume',
'last-transfer-end-timestamp'])
return snapmirrors
def create_snapmirror(self, source_share_obj, dest_share_obj):
"""Sets up a SnapMirror relationship between two volumes.
1. Create SnapMirror relationship
2. Initialize data transfer asynchronously
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Create SnapMirror relationship
# TODO(ameade): Change the schedule from hourly to a config value
dest_client.create_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
schedule='hourly')
# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def delete_snapmirror(self, source_share_obj, dest_share_obj,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.
1. Abort snapmirror
2. Delete the snapmirror
3. Release snapmirror to cleanup snapmirror metadata and snapshots
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, src_backend = (
self.get_backend_info_for_share(source_share_obj))
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
clear_checkpoint=False)
except netapp_api.NaApiError:
# Snapmirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False
if release:
# If the source is unreachable, do not perform the release
try:
src_client = get_client_for_backend(src_backend,
vserver_name=src_vserver)
except Exception:
src_client = None
# 3. Cleanup SnapMirror relationship on source
try:
if src_client:
src_client.release_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
# Handle the case where the snapmirror is already
# cleaned up
exc_context.reraise = False
def update_snapmirror(self, source_share_obj, dest_share_obj):
"""Schedule a snapmirror update to happen on the backend."""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# Update SnapMirror
dest_client.update_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def quiesce_then_abort_svm(self, source_share_server, dest_share_server):
source_client, source_vserver = self.get_client_and_vserver_name(
source_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror_svm(source_vserver, dest_vserver)
dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason="Snapmirror relationship is not quiesced.")
try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror_svm(source_vserver,
dest_vserver,
clear_checkpoint=False)
def quiesce_then_abort(self, source_share_obj, dest_share_obj):
dest_volume, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume)
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume, dest_volume=dest_volume,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason="Snapmirror relationship is not quiesced.")
try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume,
clear_checkpoint=False)
def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True):
"""Breaks SnapMirror relationship.
1. Quiesce any ongoing snapmirror transfers
2. Wait until snapmirror finishes transfers and enters quiesced state
3. Break snapmirror
4. Mount the destination volume so it is exported as a share
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Attempt to quiesce, then abort
self.quiesce_then_abort(source_share_obj, dest_share_obj)
# 2. Break SnapMirror
dest_client.break_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
# 3. Mount the destination volume and create a junction path
if mount:
dest_client.mount_volume(dest_volume_name)
def resync_snapmirror(self, source_share_obj, dest_share_obj):
"""Resync SnapMirror relationship. """
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
dest_client.resync_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def resume_snapmirror(self, source_share_obj, dest_share_obj):
"""Resume SnapMirror relationship from a quiesced state."""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
dest_client.resume_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def change_snapmirror_source(self, replica,
orig_source_replica,
new_source_replica, replica_list):
"""Creates SnapMirror relationship from the new source to destination.
1. Delete all snapmirrors involving the replica, but maintain
snapmirror metadata and snapshots for efficiency
2. For DHSS=True scenarios, creates a new vserver peer relationship if
it does not exists
3. Ensure a new source -> replica snapmirror exists
4. Resync new source -> replica snapmirror relationship
"""
replica_volume_name, replica_vserver, replica_backend = (
self.get_backend_info_for_share(replica))
replica_client = get_client_for_backend(replica_backend,
vserver_name=replica_vserver)
new_src_volume_name, new_src_vserver, new_src_backend = (
self.get_backend_info_for_share(new_source_replica))
# 1. delete
for other_replica in replica_list:
if other_replica['id'] == replica['id']:
continue
# We need to delete ALL snapmirror relationships
# involving this replica but do not remove snapmirror metadata
# so that the new snapmirror relationship is efficient.
self.delete_snapmirror(other_replica, replica, release=False)
self.delete_snapmirror(replica, other_replica, release=False)
# 2. vserver operations when driver handles share servers
replica_config = get_backend_configuration(replica_backend)
if (replica_config.driver_handles_share_servers
and replica_vserver != new_src_vserver):
# create vserver peering if does not exists
if not replica_client.get_vserver_peers(replica_vserver,
new_src_vserver):
new_src_client = get_client_for_backend(
new_src_backend, vserver_name=new_src_vserver)
# Cluster name is needed for setting up the vserver peering
new_src_cluster_name = new_src_client.get_cluster_name()
replica_cluster_name = replica_client.get_cluster_name()
replica_client.create_vserver_peer(
replica_vserver, new_src_vserver,
peer_cluster_name=new_src_cluster_name)
if new_src_cluster_name != replica_cluster_name:
new_src_client.accept_vserver_peer(new_src_vserver,
replica_vserver)
# 3. create
# TODO(ameade): Update the schedule if needed.
replica_client.create_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name,
schedule='hourly')
# 4. resync
replica_client.resync_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name)
@na_utils.trace
def remove_qos_on_old_active_replica(self, orig_active_replica):
old_active_replica_qos_policy = (
self._get_backend_qos_policy_group_name(orig_active_replica)
)
replica_volume_name, replica_vserver, replica_backend = (
self.get_backend_info_for_share(orig_active_replica))
replica_client = get_client_for_backend(
replica_backend, vserver_name=replica_vserver)
try:
replica_client.set_qos_policy_group_for_volume(
replica_volume_name, 'none')
replica_client.mark_qos_policy_group_for_deletion(
old_active_replica_qos_policy)
except exception.StorageCommunicationException:
LOG.exception("Could not communicate with the backend "
"for replica %s to unset QoS policy and mark "
"the QoS policy group for deletion.",
orig_active_replica['id'])
def create_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Sets up a SnapMirror relationship between two vServers.
1. Create a SnapMirror policy for SVM DR
2. Create SnapMirror relationship
3. Initialize data transfer asynchronously
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# 1: Create SnapMirror policy for SVM DR
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
policy_name = self._get_backend_snapmirror_policy_name_svm(
dest_share_server['id'],
dest_backend_name,
)
dest_client.create_snapmirror_policy(policy_name)
# 2. Create SnapMirror relationship
dest_client.create_snapmirror_svm(src_vserver,
dest_vserver,
policy=policy_name,
schedule='hourly')
# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_svm(src_vserver,
dest_vserver)
def get_snapmirrors_svm(self, source_share_server, dest_share_server):
"""Get SnapMirrors between two vServers."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
snapmirrors = dest_client.get_snapmirrors_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status',
'mirror-state',
'last-transfer-end-timestamp'])
return snapmirrors
def get_snapmirror_destinations_svm(self, source_share_server,
dest_share_server):
"""Get SnapMirrors between two vServers."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
snapmirrors = dest_client.get_snapmirror_destinations_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver)
return snapmirrors
def update_snapmirror_svm(self, source_share_server, dest_share_server):
"""Schedule a SnapMirror update to happen on the backend."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# Update SnapMirror
dest_client.update_snapmirror_svm(src_vserver, dest_vserver)
def quiesce_and_break_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Abort and break a SnapMirror relationship between vServers.
1. Quiesce SnapMirror
2. Break SnapMirror
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# 1. Attempt to quiesce, then abort
self.quiesce_then_abort_svm(source_share_server, dest_share_server)
# 2. Break SnapMirror
dest_client.break_snapmirror_svm(src_vserver, dest_vserver)
def cancel_snapmirror_svm(self, source_share_server, dest_share_server):
"""Cancels SnapMirror relationship between vServers."""
dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
dest_config = get_backend_configuration(dest_backend)
server_timeout = (
dest_config.netapp_server_migration_state_change_timeout)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
snapmirrors = self.get_snapmirrors_svm(source_share_server,
dest_share_server)
if snapmirrors:
# 1. Attempt to quiesce and break snapmirror
self.quiesce_and_break_snapmirror_svm(source_share_server,
dest_share_server)
# NOTE(dviroel): Lets wait until the destination vserver be
# promoted to 'default' and state 'running', before starting
# shutting down the source
self.wait_for_vserver_state(dest_vserver, dest_client,
subtype='default', state='running',
operational_state='stopped',
timeout=server_timeout)
# 2. Delete SnapMirror
self.delete_snapmirror_svm(source_share_server, dest_share_server)
else:
dest_info = dest_client.get_vserver_info(dest_vserver)
if dest_info is None:
# NOTE(dviroel): Nothing to cancel since the destination does
# not exist.
return
if dest_info.get('subtype') == 'dp_destination':
# NOTE(dviroel): Can be a corner case where no snapmirror
# relationship was found but the destination vserver is stuck
# in DP mode. We need to convert it to 'default' to release
# its resources later.
self.convert_svm_to_default_subtype(dest_vserver, dest_client,
timeout=server_timeout)
def convert_svm_to_default_subtype(self, vserver_name, client,
is_dest_path=True, timeout=300):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if vserver_info.get('subtype') != 'default':
if is_dest_path:
client.break_snapmirror_svm(dest_vserver=vserver_name)
else:
client.break_snapmirror_svm(source_vserver=vserver_name)
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)
def delete_snapmirror_svm(self, src_share_server, dest_share_server,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.
1. Abort SnapMirror
2. Delete the SnapMirror
3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
"""
src_client, src_vserver = self.get_client_and_vserver_name(
src_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError:
# SnapMirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False
# 3. Release SnapMirror
if release:
src_backend = share_utils.extract_host(src_share_server['host'],
level='backend_name')
src_config = get_backend_configuration(src_backend)
release_timeout = (
src_config.netapp_snapmirror_release_timeout)
self.wait_for_snapmirror_release_svm(src_vserver,
dest_vserver,
src_client,
timeout=release_timeout)
def wait_for_vserver_state(self, vserver_name, client, state=None,
operational_state=None, subtype=None,
timeout=300):
interval = 10
retries = (timeout / interval or 1)
expected = {}
if state:
expected['state'] = state
if operational_state:
expected['operational_state'] = operational_state
if subtype:
expected['subtype'] = subtype
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if not all(item in vserver_info.items() for
item in expected.items()):
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)
def wait_for_snapmirror_release_svm(self, source_vserver, dest_vserver,
src_client, timeout=300):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.NetAppException, interval=interval,
retries=retries, backoff_rate=1)
def release_snapmirror():
snapmirrors = src_client.get_snapmirror_destinations_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver)
if not snapmirrors:
LOG.debug("No snapmirrors to be released in source location.")
else:
try:
src_client.release_snapmirror_svm(source_vserver,
dest_vserver)
except netapp_api.NaApiError as e:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.debug('Snapmirror relationship does not exists '
'anymore.')
msg = _('Snapmirror release sent to source vserver. We will '
'wait for it to be released.')
raise exception.NetAppException(vserver=msg)
try:
release_snapmirror()
except exception.NetAppException:
msg = _("Unable to release the snapmirror from source vserver %s. "
"Retries exhausted. Aborting") % source_vserver
raise exception.NetAppException(message=msg)
| apache-2.0 | -8,708,134,808,909,844,000 | 45.341497 | 79 | 0.566836 | false |
benjello/openfisca-france | openfisca_france/model/prelevements_obligatoires/prelevements_sociaux/cotisations_sociales/contrat_professionnalisation.py | 1 | 5969 | # -*- coding: utf-8 -*-
from __future__ import division
from numpy import datetime64, timedelta64
from openfisca_france.model.base import * # noqa analysis:ignore
class professionnalisation(Variable):
column = BoolCol
entity_class = Individus
label = u"L'individu est en contrat de professionnalisation"
url = "http://www.apce.com/pid879/contrat-de-professionnalisation.html?espace=1&tp=1"
def function(self, simulation, period):
period = period.this_month
age = simulation.calculate('age', period)
ass = simulation.calculate_add('ass', period)
rsa = simulation.calculate('rsa', period)
aah = simulation.calculate('aah', period)
age_condition = (16 <= age) * (age < 25)
dummy_ass = ass > 0
dummy_rmi = rsa > 0
dummy_aah = aah > 0
return period, (age_condition + dummy_ass + dummy_aah + dummy_rmi) > 0
class remuneration_professionnalisation(Variable):
column = FloatCol
entity_class = Individus
label = u"Rémunération de l'apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
# La rémunération minimale varie en fonction de l'âge et du niveau de qualification des bénéficiaires des contrats
# de professionnalisation :
#
# Pour les personnes de moins de 21 ans :
# au minimum 55 % du Smic,
# au minimum 65 % du Smic si le jeune est titulaire d'une qualification au moins égale au baccalauréat
# professionnel ou d'un titre ou d'un diplôme à finalité professionnelle de même niveau.
#
# Pour les personnes ayant entre 21 et 25 ans :
# au minimum 70 % du Smic,
# au minimum 80 % du Smic si le bénéficiaire est titulaire d'une qualification au moins égale à celle d'un
# baccalauréat professionnel ou d'un titre/diplôme à finalité professionnelle de même niveau.
#
# Pour les personnes âgées de plus de 26 ans :
# au minimum le Smic,
# au minimum 85 % du salaire minimum prévu par la convention ou l'accord de branche auquel est soumise
# l'entreprise.
def function(self, simulation, period):
period = period.this_month
age = simulation.calculate('age', period)
smic = simulation.legislation_at(period.start).cotsoc.gen.smic_h_b * 52 * 35 / 12
professionnalisation = simulation.calculate('professionnalisation', period)
qualifie = simulation.calculate('qualifie')
salaire_en_smic = [
dict(
part_de_smic_by_qualification = {
'non_qualifie': .55,
'qualifie': .65
},
age_min = 16,
age_max = 21,
),
dict(
part_de_smic_by_qualification = {
1: .70,
},
age_min = 21,
age_max = 25,
),
dict(
part_de_smic_by_qualification = {
1: 1.0,
},
age_min = 26,
age_max = 99
)
]
taux_smic = age * 0.0
for age_interval in salaire_en_smic:
age_condition = (age_interval['age_min'] <= age) * (age <= age_interval['age_max'])
taux_smic[age_condition] = sum([
(qualifie[age_condition] == qualification) * part_de_smic
for qualification, part_de_smic in age_interval['part_de_smic_by_qualification'].iteritems()
])
return period, taux_smic * smic * professionnalisation
class exoneration_cotisations_employeur_professionnalisation(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonération de cotisations patronales pour l'emploi d'un apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
# Exonération de cotisations sociales patronales d'assurance maladie-maternité, de vieillesse de base,
# d'invalidité-décès et d'allocations familiales au titre des rémunérations versées aux demandeurs d'emploi de
# plus de 45 ans
#
# Les salariés en contrat de professionnalisation ne sont pas comptabilisés dans l'effectif de l'entreprise pendant
# la durée du contrat s'il est à durée déterminée ou pendant l'action de professionnalisation si le contrat est à
# durée indéterminée.
# Remboursement de certaines dépenses par les organismes collecteurs paritaires agréés (OPCA)
# Aide forfaitaire versée par Pôle emploi pour l'embauche d'un demandeur d'emploi de 26 ans et plus
# En cas d'embauche d'un demandeur d'emploi de 26 ans et plus, l'employeur peut bénéficier d'une aide forfaitaire
# (AFE) d'un montant maximum de 2 000 euros par bénéficiaire. Pour les salariés à temps partiel, le montant de
# l'aide est proratisé en fonction du temps de travail effectif.
# Aide spécifique de 686 euros par accompagnement et pour une année pleine est attribuée sous certaines conditions
# aux groupements d'employeurs qui organisent dans le cadre des contrats de professionnalisation
def function(self, simulation, period):
period = period.this_month
age = simulation.calculate('age', period)
mmid_employeur = simulation.calculate('mmid_employeur', period)
famille = simulation.calculate('famille', period)
vieillesse_plafonnee_employeur = simulation.calculate('vieillesse_plafonnee_employeur', period)
# FIXME: correspond bien à vieillesse de base ?
cotisations_exonerees = mmid_employeur + famille + vieillesse_plafonnee_employeur
return period, cotisations_exonerees * (age > 45)
# FIXME: On est bien d'accord qu'il y a les exos uniquement pour les
# plus de 45 ans?
# TODO: vérifier aucun avantage pour l'employé ??
| agpl-3.0 | -7,057,880,705,877,680,000 | 43.742424 | 120 | 0.645107 | false |
kingvuplus/ts-gui-3 | lib/python/Components/Pixmap.py | 38 | 3125 | from ConditionalWidget import ConditionalWidget
from GUIComponent import GUIComponent
from enigma import ePixmap, eTimer
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from os import path
from skin import loadPixmap
class Pixmap(GUIComponent):
GUI_WIDGET = ePixmap
def getSize(self):
s = self.instance.size()
return (s.width(), s.height())
class PixmapConditional(ConditionalWidget, Pixmap):
def __init__(self, withTimer = True):
ConditionalWidget.__init__(self)
Pixmap.__init__(self)
class MovingPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.moving = False
# TODO: get real values
self.x = 0.0
self.y = 0.0
self.clearPath()
self.moveTimer = eTimer()
self.moveTimer.callback.append(self.doMove)
def clearPath(self, repeated = False):
if (self.moving):
self.moving = False
self.moveTimer.stop()
self.path = []
self.currDest = 0
self.repeated = repeated
def addMovePoint(self, x, y, time = 20):
self.path.append((x, y, time))
def moveTo(self, x, y, time = 20):
self.clearPath()
self.addMovePoint(x, y, time)
def startMoving(self):
if not self.moving:
self.time = self.path[self.currDest][2]
self.stepX = (self.path[self.currDest][0] - self.x) / float(self.time)
self.stepY = (self.path[self.currDest][1] - self.y) / float(self.time)
self.moving = True
self.moveTimer.start(100)
def stopMoving(self):
self.moving = False
self.moveTimer.stop()
def doMove(self):
self.x += self.stepX
self.y += self.stepY
self.time -= 1
try:
self.move(int(self.x), int(self.y))
except: # moving not possible... widget not there any more... stop moving
self.stopMoving()
if (self.time == 0):
self.currDest += 1
self.moveTimer.stop()
self.moving = False
if (self.currDest >= len(self.path)): # end of path
if (self.repeated):
self.currDest = 0
self.moving = False
self.startMoving()
else:
self.moving = False
self.startMoving()
class MultiPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.pixmaps = []
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
skin_path_prefix = getattr(screen, "skin_path", path)
pixmap = None
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "pixmaps":
pixmaps = value.split(',')
for p in pixmaps:
self.pixmaps.append(loadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, p, path_prefix=skin_path_prefix), desktop) )
if not pixmap:
pixmap = resolveFilename(SCOPE_CURRENT_SKIN, pixmaps[0], path_prefix=skin_path_prefix)
elif attrib == "pixmap":
pixmap = resolveFilename(SCOPE_CURRENT_SKIN, value, path_prefix=skin_path_prefix)
else:
attribs.append((attrib,value))
if pixmap:
attribs.append(("pixmap", pixmap))
self.skinAttributes = attribs
return GUIComponent.applySkin(self, desktop, screen)
def setPixmapNum(self, x):
if self.instance:
if len(self.pixmaps) > x:
self.instance.setPixmap(self.pixmaps[x])
else:
print "setPixmapNum(%d) failed! defined pixmaps:" %(x), self.pixmaps
| gpl-2.0 | -4,067,766,056,966,849,500 | 25.260504 | 117 | 0.6784 | false |
elgambitero/FreeCAD_sf_master | src/Mod/Ship/shipCapacityCurve/PlotAux.py | 16 | 5190 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import os
from PySide import QtGui, QtCore
import FreeCAD
import FreeCADGui
from FreeCAD import Base
import Spreadsheet
class Plot(object):
def __init__(self, l, z, v, tank):
""" Constructor. performs the plot and shows it.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
"""
self.plot(l, z, v, tank)
self.spreadSheet(l, z, v, tank)
def plot(self, l, z, v, tank):
""" Perform the areas curve plot.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
@return True if error happens.
"""
try:
import Plot
plt = Plot.figure('Capacity curve')
except ImportError:
msg = QtGui.QApplication.translate(
"ship_console",
"Plot module is disabled, so I cannot perform the plot",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintWarning(msg + '\n')
return True
# Plot the volume as a function of the level percentage
vols = Plot.plot(l, v, 'Capacity')
vols.line.set_linestyle('-')
vols.line.set_linewidth(2.0)
vols.line.set_color((0.0, 0.0, 0.0))
Plot.xlabel(r'Percentage of filling level')
Plot.ylabel(r'$V \; [\mathrm{m}^3]$')
plt.axes.xaxis.label.set_fontsize(20)
plt.axes.yaxis.label.set_fontsize(20)
Plot.grid(True)
# Now duplicate the axes
ax = Plot.addNewAxes()
# Y axis can be placed at right
ax.yaxis.tick_right()
ax.spines['right'].set_color((0.0, 0.0, 0.0))
ax.spines['left'].set_color('none')
ax.yaxis.set_ticks_position('right')
ax.yaxis.set_label_position('right')
# And X axis can be placed at top
ax.xaxis.tick_top()
ax.spines['top'].set_color((0.0, 0.0, 0.0))
ax.spines['bottom'].set_color('none')
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
# Plot the volume as a function of the level z coordinate
vols = Plot.plot(z, v, 'Capacity')
vols.line.set_linestyle('-')
vols.line.set_linewidth(2.0)
vols.line.set_color((0.0, 0.0, 0.0))
Plot.xlabel(r'$z \; [\mathrm{m}]$')
Plot.ylabel(r'$V \; [\mathrm{m}^3]$')
ax.xaxis.label.set_fontsize(20)
ax.yaxis.label.set_fontsize(20)
Plot.grid(True)
# End
plt.update()
return False
def spreadSheet(self, l, z, v, tank):
""" Write the output data file.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
"""
s = FreeCAD.activeDocument().addObject('Spreadsheet::Sheet',
'Capacity curve')
# Print the header
s.set("A1", "Percentage of filling level")
s.set("B1", "Level [m]")
s.set("C1", "Volume [m^3]")
# Print the data
for i in range(len(l)):
s.set("A{}".format(i + 2), str(l[i]))
s.set("B{}".format(i + 2), str(z[i]))
s.set("C{}".format(i + 2), str(v[i]))
# Recompute
FreeCAD.activeDocument().recompute() | lgpl-2.1 | 160,937,239,777,189,220 | 40.528 | 76 | 0.493256 | false |
guthemberg/yanoama | yanoama/amen/system/runner.py | 1 | 2476 | from yanoama.amen.system.collector import system_info_collector, process_info_collector
from yanoama.amen.core import settings
from yanoama.amen.utils.dates import unix_utc_now
import sys
class Runner(object):
def __init__(self):
self.active_checks = settings.SYSTEM_CHECKS
#self.process_checks = settings.PROCESS_CHECKS
def system(self):
system_info_dict = {}
now = unix_utc_now() # unix time
##table (* index)
##node
#ts(*),hostname(*),tx,rx,storage_usage,num_obj
##object
#oid(*),length,owners
##swarm
#ts(*),oid,tx,rx,hostname
if 'node' in self.active_checks and sys.platform != 'darwin':
node = system_info_collector.get_node_info(settings.STORAGE_PATH)
if node != False:
node['time'] = now
system_info_dict['node'] = node
elif 'memory' in self.active_checks:
memory = system_info_collector.get_memory_info()
if memory != False:
memory['time'] = now
system_info_dict['memory'] = memory
elif 'cpu' in self.active_checks:
cpu = system_info_collector.get_cpu_utilization()
if cpu != False:
cpu['time'] = now
system_info_dict['cpu'] = cpu
elif 'loadavg' in self.active_checks:
loadavg = system_info_collector.get_load_average()
if loadavg != False:
loadavg['time'] = now
system_info_dict['loadavg'] = loadavg
elif 'disk' in self.active_checks:
disk = system_info_collector.get_disk_usage()
if disk != False:
disk['time'] = now
system_info_dict['disk'] = disk
return system_info_dict
# empty dictionary, used when stopping the daemon to avoid chart bugs
def empty(self):
empty_dict = {}
now = unix_utc_now()
for check in self.active_checks:
empty_dict[check] = {'time': now, 'last': 1}
return empty_dict
#def processes(self):
# now = unix_utc_now()
# process_info_dict = {}
# for process in self.process_checks:
# process_info_dict[process] = process_info_collector.check_process(process)
# process_info_dict[process]['time'] = now
# return process_info_dict
runner = Runner()
| bsd-3-clause | 6,456,345,211,602,889,000 | 28.129412 | 88 | 0.550485 | false |
citrix-openstack-build/ceilometer | ceilometer/image/notifications.py | 3 | 3971 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc
#
# Author: Eoghan Glynn <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing image metering messages from glance notification
events.
"""
from oslo.config import cfg
from ceilometer import sample
from ceilometer import plugin
OPTS = [
cfg.StrOpt('glance_control_exchange',
default='glance',
help="Exchange name for Glance notifications"),
]
cfg.CONF.register_opts(OPTS)
class ImageBase(plugin.NotificationBase):
"""Base class for image counting."""
@staticmethod
def get_exchange_topics(conf):
"""Return a sequence of ExchangeTopics defining the exchange and
topics to be connected for this plugin.
"""
return [
plugin.ExchangeTopics(
exchange=conf.glance_control_exchange,
topics=set(topic + ".info"
for topic in conf.notification_topics)),
]
class ImageCRUDBase(ImageBase):
event_types = [
'image.update',
'image.upload',
'image.delete',
]
class ImageCRUD(ImageCRUDBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name=message['event_type'],
type=sample.TYPE_DELTA,
unit='image',
volume=1,
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class Image(ImageCRUDBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image',
type=sample.TYPE_GAUGE,
unit='image',
volume=1,
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class ImageSize(ImageCRUDBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.size',
type=sample.TYPE_GAUGE,
unit='B',
volume=message['payload']['size'],
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class ImageDownload(ImageBase):
"""Emit image_download sample when an image is downloaded."""
event_types = ['image.send']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.download',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes_sent'],
resource_id=message['payload']['image_id'],
user_id=message['payload']['receiver_user_id'],
project_id=message['payload']['receiver_tenant_id'],
message=message)
class ImageServe(ImageBase):
"""Emit image_serve sample when an image is served out."""
event_types = ['image.send']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.serve',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes_sent'],
resource_id=message['payload']['image_id'],
user_id=None,
project_id=message['payload']['owner_id'],
message=message)
| apache-2.0 | 3,191,904,136,939,929,000 | 29.775194 | 75 | 0.607557 | false |
JasonHanG/tensor-gallery | cnn-text-classification/text_cnn.py | 2 | 3776 | import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| apache-2.0 | 6,086,297,899,356,951,000 | 43.952381 | 101 | 0.562765 | false |
cogeorg/black_rhino | examples/degroot/networkx/algorithms/tests/test_dag.py | 20 | 5729 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDAG:
def setUp(self):
pass
def test_topological_sort1(self):
DG=nx.DiGraph()
DG.add_edges_from([(1,2),(1,3),(2,3)])
assert_equal(nx.topological_sort(DG),[1, 2, 3])
assert_equal(nx.topological_sort_recursive(DG),[1, 2, 3])
DG.add_edge(3,2)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
DG.remove_edge(2,3)
assert_equal(nx.topological_sort(DG),[1, 3, 2])
assert_equal(nx.topological_sort_recursive(DG),[1, 3, 2])
def test_is_directed_acyclic_graph(self):
G = nx.generators.complete_graph(2)
assert_false(nx.is_directed_acyclic_graph(G))
assert_false(nx.is_directed_acyclic_graph(G.to_directed()))
assert_false(nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])))
assert_true(nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])))
def test_topological_sort2(self):
DG=nx.DiGraph({1:[2],2:[3],3:[4],
4:[5],5:[1],11:[12],
12:[13],13:[14],14:[15]})
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
assert_false(nx.is_directed_acyclic_graph(DG))
DG.remove_edge(1,2)
assert_equal(nx.topological_sort_recursive(DG),
[11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
assert_equal(nx.topological_sort(DG),
[11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
assert_true(nx.is_directed_acyclic_graph(DG))
def test_topological_sort3(self):
DG=nx.DiGraph()
DG.add_edges_from([(1,i) for i in range(2,5)])
DG.add_edges_from([(2,i) for i in range(5,9)])
DG.add_edges_from([(6,i) for i in range(9,12)])
DG.add_edges_from([(4,i) for i in range(12,15)])
assert_equal(nx.topological_sort_recursive(DG),
[1, 4, 14, 13, 12, 3, 2, 7, 6, 11, 10, 9, 5, 8])
assert_equal(nx.topological_sort(DG),
[1, 2, 8, 5, 6, 9, 10, 11, 7, 3, 4, 12, 13, 14])
DG.add_edge(14,1)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
def test_topological_sort4(self):
G=nx.Graph()
G.add_edge(1,2)
assert_raises(nx.NetworkXError, nx.topological_sort, G)
assert_raises(nx.NetworkXError, nx.topological_sort_recursive, G)
def test_topological_sort5(self):
G=nx.DiGraph()
G.add_edge(0,1)
assert_equal(nx.topological_sort_recursive(G), [0,1])
assert_equal(nx.topological_sort(G), [0,1])
def test_nbunch_argument(self):
G=nx.DiGraph()
G.add_edges_from([(1,2), (2,3), (1,4), (1,5), (2,6)])
assert_equal(nx.topological_sort(G), [1, 2, 3, 6, 4, 5])
assert_equal(nx.topological_sort_recursive(G), [1, 5, 4, 2, 6, 3])
assert_equal(nx.topological_sort(G,[1]), [1, 2, 3, 6, 4, 5])
assert_equal(nx.topological_sort_recursive(G,[1]), [1, 5, 4, 2, 6, 3])
assert_equal(nx.topological_sort(G,[5]), [5])
assert_equal(nx.topological_sort_recursive(G,[5]), [5])
def test_ancestors(self):
G=nx.DiGraph()
ancestors = nx.algorithms.dag.ancestors
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(ancestors(G, 6), set([1, 2, 4, 5]))
assert_equal(ancestors(G, 3), set([1, 4]))
assert_equal(ancestors(G, 1), set())
assert_raises(nx.NetworkXError, ancestors, G, 8)
def test_descendants(self):
G=nx.DiGraph()
descendants = nx.algorithms.dag.descendants
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(descendants(G, 1), set([2, 3, 6]))
assert_equal(descendants(G, 4), set([2, 3, 5, 6]))
assert_equal(descendants(G, 3), set())
assert_raises(nx.NetworkXError, descendants, G, 8)
def test_is_aperiodic_cycle():
G=nx.DiGraph()
G.add_cycle([1,2,3,4])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle2():
G=nx.DiGraph()
G.add_cycle([1,2,3,4])
G.add_cycle([3,4,5,6,7])
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_cycle3():
G=nx.DiGraph()
G.add_cycle([1,2,3,4])
G.add_cycle([3,4,5,6])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle4():
G = nx.DiGraph()
G.add_cycle([1,2,3,4])
G.add_edge(1,3)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_selfloop():
G = nx.DiGraph()
G.add_cycle([1,2,3,4])
G.add_edge(1,1)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_raise():
G = nx.Graph()
assert_raises(nx.NetworkXError,
nx.is_aperiodic,
G)
def test_is_aperiodic_bipartite():
#Bipartite graph
G = nx.DiGraph(nx.davis_southern_women_graph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_rary_tree():
G = nx.full_rary_tree(3,27,create_using=nx.DiGraph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected():
#disconnected graph
G = nx.DiGraph()
G.add_cycle([1,2,3,4])
G.add_cycle([5,6,7,8])
assert_false(nx.is_aperiodic(G))
G.add_edge(1,3)
G.add_edge(5,7)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected2():
G = nx.DiGraph()
G.add_cycle([0,1,2])
G.add_edge(3,3)
assert_false(nx.is_aperiodic(G))
| gpl-3.0 | -3,263,586,685,965,317,000 | 34.147239 | 79 | 0.575668 | false |
CiscoSystems/nova | nova/virt/xenapi/vm_utils.py | 1 | 103690 | # Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import os
import time
import urllib
import uuid
from xml.parsers import expat
from eventlet import greenthread
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import exception
from nova.network import model as network_model
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import versionutils
from nova.openstack.common import xmlutils
from nova import utils
from nova.virt import configdrive
from nova.virt import cpu
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt.xenapi import agent
from nova.virt.xenapi.image import utils as image_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
deprecated_name='cache_images',
deprecated_group='DEFAULT',
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.IntOpt('image_compression_level',
deprecated_name='xenapi_image_compression_level',
deprecated_group='DEFAULT',
help='Compression level for images, e.g., 9 for gzip -9.'
' Range is 1-9, 9 being most compressed but most CPU'
' intensive on dom0.'),
cfg.StrOpt('default_os_type',
default='linux',
deprecated_name='default_os_type',
deprecated_group='DEFAULT',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
deprecated_name='block_device_creation_timeout',
deprecated_group='DEFAULT',
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * units.Mi,
deprecated_name='max_kernel_ramdisk_size',
deprecated_group='DEFAULT',
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
deprecated_name='sr_matching_filter',
deprecated_group='DEFAULT',
help='Filter for finding the SR to be used to install guest '
'instances on. To use the Local Storage in default '
'XenServer/XCP installations set this flag to '
'other-config:i18n-key=local-storage. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('sparse_copy',
default=True,
deprecated_name='xenapi_sparse_copy',
deprecated_group='DEFAULT',
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('num_vbd_unplug_retries',
default=10,
deprecated_name='xenapi_num_vbd_unplug_retries',
deprecated_group='DEFAULT',
help='Maximum number of retries to unplug VBD'),
cfg.StrOpt('torrent_images',
default='none',
deprecated_name='xenapi_torrent_images',
deprecated_group='DEFAULT',
help='Whether or not to download images via Bit Torrent '
'(all|some|none).'),
cfg.StrOpt('ipxe_network_name',
deprecated_name='xenapi_ipxe_network_name',
deprecated_group='DEFAULT',
help='Name of network to use for booting iPXE ISOs'),
cfg.StrOpt('ipxe_boot_menu_url',
deprecated_name='xenapi_ipxe_boot_menu_url',
deprecated_group='DEFAULT',
help='URL to the iPXE boot menu'),
cfg.StrOpt('ipxe_mkisofs_cmd',
default='mkisofs',
deprecated_name='xenapi_ipxe_mkisofs_cmd',
deprecated_group='DEFAULT',
help='Name and optionally path of the tool used for '
'ISO image creation'),
]
CONF = cfg.CONF
# xenapi_vm_utils options in the DEFAULT group were deprecated in Icehouse
CONF.register_opts(xenapi_vm_utils_opts, 'xenserver')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('glance_num_retries', 'nova.image.glance')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
PROGRESS_INTERVAL_SECONDS = 300
# Fudge factor to allow for the VHD chain to be slightly larger than
# the partitioned space. Otherwise, legitimate images near their
# maximum allowed size can fail on build with FlavorDiskTooSmall.
VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
| 6 - config drive
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
DISK_CONFIGDRIVE = 6
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(cls._ids, ImageType._strs)).get(image_type)
@classmethod
def get_role(cls, image_type_id):
"""Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
cls.DISK_ISO: 'iso',
cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
def get_vm_device_id(session, image_properties):
# NOTE: device_id should be 2 for windows VMs which run new xentools
# (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more
# information.
if image_properties is None:
image_properties = {}
device_id = image_properties.get('xenapi_device_id')
# The device_id is required to be set for hypervisor version 6.1 and above
if device_id:
hypervisor_version = session.product_version
if _hypervisor_supports_device_id(hypervisor_version):
return device_id
else:
msg = _("Device id %(id)s specified is not supported by "
"hypervisor version %(version)s") % {'id': device_id,
'version': hypervisor_version}
raise exception.NovaException(msg)
def _hypervisor_supports_device_id(version):
version_as_string = '.'.join(str(v) for v in version)
return(versionutils.is_compatible('6.1', version_as_string))
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False, device_id=None):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
flavor = flavors.extract_flavor(instance)
mem = str(long(flavor['memory_mb']) * units.Mi)
vcpus = str(flavor['vcpus'])
vcpu_weight = flavor['vcpu_weight']
vcpu_params = {}
if vcpu_weight is not None:
# NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means
# we need to specify both weight and cap for either to apply
vcpu_params = {"weight": str(vcpu_weight), "cap": "0"}
cpu_mask_list = cpu.get_cpuset_ids()
if cpu_mask_list:
cpu_mask = ",".join(str(cpu_id) for cpu_id in cpu_mask_list)
vcpu_params["mask"] = cpu_mask
viridian = 'true' if instance['os_type'] == 'windows' else 'false'
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': viridian, 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': vcpu_params,
'xenstore_data': {'vm-data/allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
if device_id:
rec['platform']['device_id'] = device_id
vm_ref = session.VM.create(rec)
LOG.debug(_('Created VM'), instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.VM.destroy(vm_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
return
LOG.debug(_("VM destroyed"), instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug(_("Shutting down VM (cleanly)"), instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug(_("Shutting down VM (hard)"), instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
return False
return True
def is_vm_shutdown(session, vm_ref):
state = get_power_state(session, vm_ref)
if state == power_state.SHUTDOWN:
return True
return False
def is_enough_free_mem(session, instance):
flavor = flavors.extract_flavor(instance)
mem = long(flavor['memory_mb']) * units.Mi
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
session.host_ref))
return host_free_mem >= mem
def find_vbd_by_number(session, vm_ref, number):
"""Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
user_device = session.call_xenapi("VBD.get_userdevice",
vbd_ref)
if user_device == str(number):
return vbd_ref
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('VBD not found in instance %s') % vm_ref)
def _should_retry_unplug_vbd(err):
# Retry if unplug failed with DEVICE_DETACH_REJECTED
# For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
# Since XenServer 6.2, we also need to retry if we get
# INTERNAL_ERROR, as that error goes away when you retry.
return (err == 'DEVICE_DETACH_REJECTED'
or
err == 'INTERNAL_ERROR')
def unplug_vbd(session, vbd_ref, this_vm_ref):
max_attempts = CONF.xenserver.num_vbd_unplug_retries + 1
for num_attempt in xrange(1, max_attempts + 1):
try:
if num_attempt > 1:
greenthread.sleep(1)
session.VBD.unplug(vbd_ref, this_vm_ref)
return
except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_('VBD %s already detached'), vbd_ref)
return
elif _should_retry_unplug_vbd(err):
LOG.info(_('VBD %(vbd_ref)s uplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'),
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err})
else:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to unplug VBD %s') % vbd_ref)
raise volume_utils.StorageError(
_('Reached maximum number of retries trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False, osvol=False,
empty=False, unpluggable=True):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
if vdi_ref == None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = unpluggable
vbd_rec['empty'] = empty
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... '),
{'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.'),
{'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
if osvol:
# set osvol=True in other-config to indicate this is an
# attached nova (or cinder) volume
session.call_xenapi('VBD.add_to_other_config',
vbd_ref, 'osvol', 'True')
return vbd_ref
def attach_cd(session, vm_ref, vdi_ref, userdevice):
"""Create an empty VBD, then insert the CD."""
vbd_ref = create_vbd(session, vm_ref, None, userdevice,
vbd_type='cd', read_only=True,
bootable=True, empty=True,
unpluggable=False)
session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure:
msg = _("Unable to destroy VDI %s") % vdi_ref
LOG.debug(msg, exc_info=True)
LOG.error(msg)
raise volume_utils.StorageError(msg)
def safe_destroy_vdis(session, vdi_refs):
"""Tries to destroy the requested VDIs, but ignores any errors."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except volume_utils.StorageError:
msg = _("Ignoring error while destroying VDI: %s") % vdi_ref
LOG.debug(msg)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
def get_vdi_uuid_for_volume(session, connection_data):
sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data)
sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
if not sr_ref:
sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
vdi_uuid = None
if 'vdi_uuid' in connection_data:
_scan_sr(session, sr_ref)
vdi_uuid = connection_data['vdi_uuid']
else:
try:
vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
except volume_utils.StorageError as exc:
LOG.exception(exc)
volume_utils.forget_sr(session, sr_ref)
return vdi_uuid
def get_vdis_for_instance(context, session, instance, name_label, image,
image_type, block_device_info=None):
vdis = {}
if block_device_info:
LOG.debug(_("block device info: %s"), block_device_info)
root_device_name = block_device_info['root_device_name']
for bdm in block_device_info['block_device_mapping']:
if (block_device.strip_prefix(bdm['mount_device']) ==
block_device.strip_prefix(root_device_name)):
# If we're a root-device, record that fact so we don't download
# a root image via Glance
type_ = 'root'
else:
# Otherwise, use mount_device as `type_` so that we have easy
# access to it in _attach_disks to create the VBD
type_ = bdm['mount_device']
connection_data = bdm['connection_info']['data']
vdi_uuid = get_vdi_uuid_for_volume(session, connection_data)
if vdi_uuid:
vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True)
# If we didn't get a root VDI from volumes, then use the Glance image as
# the root device
if 'root' not in vdis:
create_image_vdis = _create_image(
context, session, instance, name_label, image, image_type)
vdis.update(create_image_vdis)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
return vdis
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
imported_vhds = session.call_plugin_serialized(
'workarounds', 'safe_copy_vdis',
sr_path=get_sr_path(session, sr_ref=sr_ref),
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
root_uuid = imported_vhds['root']['uuid']
# rescan to discover new VHDs
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s'),
{'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
return vdi_ref
def _get_vdi_other_config(disk_type, instance=None):
"""Return metadata to store in VDI's other_config attribute.
`nova_instance_uuid` is used to associate a VDI with a particular instance
so that, if it becomes orphaned from an unclean shutdown of a
compute-worker, we can safely detach it.
"""
other_config = {'nova_disk_type': disk_type}
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
if instance:
other_config['nova_instance_uuid'] = instance['uuid']
return other_config
def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
instance):
existing_other_config = session.call_xenapi('VDI.get_other_config',
vdi_ref)
session.call_xenapi('VDI.set_name_label', vdi_ref, name_label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
other_config = _get_vdi_other_config(vdi_type, instance=instance)
for key, value in other_config.iteritems():
if key not in existing_other_config:
session.call_xenapi(
"VDI.add_to_other_config", vdi_ref, key, value)
def _vm_get_vbd_refs(session, vm_ref):
return session.call_xenapi("VM.get_VBDs", vm_ref)
def _vbd_get_rec(session, vbd_ref):
return session.call_xenapi("VBD.get_record", vbd_ref)
def _vdi_get_rec(session, vdi_ref):
return session.call_xenapi("VDI.get_record", vdi_ref)
def _vdi_get_uuid(session, vdi_ref):
return session.call_xenapi("VDI.get_uuid", vdi_ref)
def _vdi_snapshot(session, vdi_ref):
return session.call_xenapi("VDI.snapshot", vdi_ref, {})
def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'):
"""Retrieves the primary VDI for a VM."""
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == userdevice:
vdi_ref = vbd_rec['VDI']
vdi_rec = _vdi_get_rec(session, vdi_ref)
return vdi_ref, vdi_rec
raise exception.NovaException(_("No primary VDI found for %s") % vm_ref)
def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0):
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
if int(vbd_rec['userdevice']) >= min_userdevice:
vdi_ref = vbd_rec['VDI']
yield _vdi_get_uuid(session, vdi_ref)
def _try_strip_base_mirror_from_vdi(session, vdi_ref):
try:
session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
"base_mirror")
except session.XenAPI.Failure:
LOG.debug(_("Error while removing sm_config"), exc_info=True)
def strip_base_mirror_from_vdis(session, vm_ref):
# NOTE(johngarbutt) part of workaround for XenServer bug CA-98606
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
_try_strip_base_mirror_from_vdi(session, vdi_ref)
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
post_snapshot_callback=None):
# impl method allow easier patching for tests
return _snapshot_attached_here_impl(session, instance, vm_ref, label,
userdevice, post_snapshot_callback)
def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug(_("Starting snapshot for VM"), instance=instance)
# Memorize the original_parent_uuid so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
userdevice)
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
sr_ref = vm_vdi_rec["SR"]
snapshot_ref = _vdi_snapshot(session, vm_vdi_ref)
if post_snapshot_callback is not None:
post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
# Ensure no VHDs will vanish while we migrate them
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
original_parent_uuid)
snapshot_uuid = _vdi_get_uuid(session, snapshot_ref)
chain = _walk_vdi_chain(session, snapshot_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain]
yield vdi_uuids
finally:
safe_destroy_vdis(session, [snapshot_ref])
# TODO(johngarbut) we need to check the snapshot has been coalesced
# now its associated VDI has been deleted.
def get_sr_path(session, sr_ref=None):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
if sr_ref is None:
sr_ref = safe_find_sr(session)
pbd_rec = session.call_xenapi("PBD.get_all_records_where",
'field "host"="%s" and '
'field "SR"="%s"' %
(session.host_ref, sr_ref))
# NOTE(bobball): There can only be one PBD for a host/SR pair, but path is
# not always present - older versions of XS do not set it.
pbd_ref = pbd_rec.keys()[0]
device_config = pbd_rec[pbd_ref]['device_config']
if 'path' in device_config:
return device_config['path']
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
if sr_rec["type"] not in ["ext", "nfs"]:
raise exception.NovaException(
_("Only file-based SRs (ext/NFS) are supported by this feature."
" SR %(uuid)s is of type %(type)s") %
{"uuid": sr_uuid, "type": sr_rec["type"]})
return os.path.join(CONF.xenserver.sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, root_vdi_rec['uuid'])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
name_label = _get_image_vdi_label(image_id)
recs = session.call_xenapi("VDI.get_all_records_where",
'field "name__label"="%s"'
% name_label)
number_found = len(recs)
if number_found > 0:
if number_found > 1:
LOG.warn(_("Multiple base images for image: %s") % image_id)
return recs.keys()[0]
def _get_resize_func_name(session):
brand = session.product_brand
version = session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if version and brand:
xcp = brand == 'XCP'
r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def _vdi_get_virtual_size(session, vdi_ref):
size = session.call_xenapi('VDI.get_virtual_size', vdi_ref)
return int(size)
def _vdi_resize(session, vdi_ref, new_size):
resize_func_name = _get_resize_func_name(session)
session.call_xenapi(resize_func_name, vdi_ref, str(new_size))
def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * units.Gi
msg = _("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size},
instance=instance)
if virtual_size < new_disk_size:
# For resize up. Simple VDI resize will do the trick
_vdi_resize(session, vdi_ref, new_disk_size)
elif virtual_size == new_disk_size:
LOG.debug(_("No need to change vdi virtual size."),
instance=instance)
else:
# NOTE(johngarbutt): we should never get here
# but if we don't raise an exception, a user might be able to use
# more storage than allowed by their chosen instance flavor
msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger "
"than flavor size of %(new_disk_size)d bytes.")
msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size}
LOG.debug(msg, instance=instance)
raise exception.ResizeError(reason=msg)
def resize_disk(session, instance, vdi_ref, flavor):
size_gb = flavor['root_gb']
if size_gb == 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
sr_ref = safe_find_sr(session)
clone_ref = _clone_vdi(session, vdi_ref)
try:
# Resize partition and filesystem down
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * units.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * units.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, clone_ref)
def _auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
flavors.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
if new_gb == 0:
LOG.debug(_("Skipping auto_config_disk as destination size is 0GB"))
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
reason = _('Disk must have only one partition.')
raise exception.CannotResizeDisk(reason=reason)
num, start, old_sectors, fstype, name, flags = partitions[0]
if fstype not in ('ext3', 'ext4'):
reason = _('Disk contains a filesystem '
'we are unable to resize: %s')
raise exception.CannotResizeDisk(reason=(reason % fstype))
if num != 1:
reason = _('The only partition should be partition 1.')
raise exception.CannotResizeDisk(reason=reason)
new_sectors = new_gb * units.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
def try_auto_configure_disk(session, vdi_ref, new_gb):
try:
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _('Attempted auto_configure_disk failed because: %s')
LOG.warn(msg % e)
def _make_partition(session, dev, partition_start, partition_end):
dev_path = utils.make_dev_path(dev)
# NOTE(bobball) If this runs in Dom0, parted will error trying
# to re-read the partition table and return a generic error
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True,
check_exit_code=not session.is_local_connection)
utils.execute('parted', '--script', dev_path, '--',
'mkpart', 'primary',
partition_start,
partition_end,
run_as_root=True,
check_exit_code=not session.is_local_connection)
partition_path = utils.make_dev_path(dev, partition=1)
if session.is_local_connection:
# Need to refresh the partitions
utils.trycmd('kpartx', '-a', dev_path,
run_as_root=True,
discard_warnings=True)
# Sometimes the partition gets created under /dev/mapper, depending
# on the setup in dom0.
mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path)
if os.path.exists(mapper_path):
return mapper_path
return partition_path
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = units.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
partition_start = "0"
partition_end = "-0"
partition_path = _make_partition(session, dev,
partition_start, partition_end)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and VDI
if vm_ref:
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while generating disk number: %s") % userdevice
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
return vdi_ref
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def get_ephemeral_disk_sizes(total_size_gb):
if not total_size_gb:
return
max_size_gb = 2000
if total_size_gb % 1024 == 0:
max_size_gb = 1024
left_to_allocate = total_size_gb
while left_to_allocate > 0:
size_gb = min(max_size_gb, left_to_allocate)
yield size_gb
left_to_allocate -= size_gb
def generate_single_ephemeral(session, instance, vm_ref, userdevice,
size_gb, instance_name_label=None):
if instance_name_label is None:
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
#TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
return _generate_disk(session, instance, vm_ref, str(userdevice),
name_label, 'ephemeral', size_gb * 1024,
CONF.default_ephemeral_format)
def generate_ephemeral(session, instance, vm_ref, first_userdevice,
instance_name_label, total_size_gb):
# NOTE(johngarbutt): max possible size of a VHD disk is 2043GB
sizes = get_ephemeral_disk_sizes(total_size_gb)
first_userdevice = int(first_userdevice)
vdi_refs = []
try:
for userdevice, size_gb in enumerate(sizes, start=first_userdevice):
ref = generate_single_ephemeral(session, instance, vm_ref,
userdevice, size_gb,
instance_name_label)
vdi_refs.append(ref)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.debug(_("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s"), {
'userdevice': userdevice,
'size_gb': size_gb,
'exc': exc})
safe_destroy_vdis(session, vdi_refs)
def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
name_label, size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'user', size_gb * 1024, CONF.default_ephemeral_format)
def generate_configdrive(session, instance, vm_ref, userdevice,
network_info, admin_password=None, files=None):
sr_ref = safe_find_sr(session)
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md,
network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
'oflag=direct,sync',
run_as_root=True)
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while generating config drive")
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
def _create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if CONF.xenserver.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def create_kernel_and_ramdisk(context, session, instance, name_label):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['kernel_id'],
ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['ramdisk_id'],
ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
return kernel_file, ramdisk_file
def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _get_image_vdi_label(image_id):
return 'Glance Image %s' % image_id
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type)
@utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label,
image_id, image_type, sr_ref):
cache_vdi_ref = _find_cached_image(session, image_id, sr_ref)
if cache_vdi_ref is None:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
cache_vdi_ref = session.call_xenapi(
'VDI.get_by_uuid', vdis['root']['uuid'])
session.call_xenapi('VDI.set_name_label', cache_vdi_ref,
_get_image_vdi_label(image_id))
session.call_xenapi('VDI.set_name_description', cache_vdi_ref,
'root')
session.call_xenapi('VDI.add_to_other_config',
cache_vdi_ref, 'image-id', str(image_id))
if CONF.use_cow_images:
new_vdi_ref = _clone_vdi(session, cache_vdi_ref)
elif sr_type == 'ext':
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance,
cache_vdi_ref)
else:
new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref,
sr_ref)
session.call_xenapi('VDI.set_name_label', new_vdi_ref, '')
session.call_xenapi('VDI.set_name_description', new_vdi_ref, '')
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
return vdi_uuid
vdi_uuid = _create_cached_image_impl(context, session, instance,
name_label, image_id, image_type, sr_ref)
vdis = {}
vdi_type = ImageType.get_role(image_type)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
return vdis
def _create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = CONF.xenserver.cache_images.lower()
# Determine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.xenserver.cache_images)
cache = True
# Fetch (and cache) the image
if cache:
vdis = _create_cached_image(context, session, instance, name_label,
image_id, image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in vdis.iteritems():
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
_set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
instance)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in vdis.iteritems():
vdi_uuid = vdi['uuid']
LOG.debug(_("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'"),
{'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
instance=instance)
return vdis
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
bittorrent = False
torrent_images = CONF.xenserver.torrent_images.lower()
if torrent_images == 'all':
bittorrent = True
elif torrent_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
except KeyError:
pass
elif torrent_images == 'none':
pass
else:
LOG.warning(_("Invalid value '%s' for torrent_images"),
torrent_images)
return bittorrent
def _default_download_handler():
# TODO(sirp): This should be configurable like upload_handler
return importutils.import_object(
'nova.virt.xenapi.image.glance.GlanceStore')
def _choose_download_handler(context, instance):
if _image_uses_bittorrent(context, instance):
return importutils.import_object(
'nova.virt.xenapi.image.bittorrent.BittorrentStore')
else:
return _default_download_handler()
def get_compression_level():
level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9):
LOG.warn(_("Invalid value '%d' for image_compression_level"),
level)
return None
return level
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug(_("Asking xapi to fetch vhd image %s"), image_id,
instance=instance)
handler = _choose_download_handler(context, instance)
try:
vdis = handler.download_image(context, session, instance, image_id)
except Exception as e:
default_handler = _default_download_handler()
# Using type() instead of isinstance() so instance of subclass doesn't
# test as equivalent
if type(handler) == type(default_handler):
raise
LOG.exception(_("Download handler '%(handler)s' raised an"
" exception, falling back to default handler"
" '%(default_handler)s'") %
{'handler': handler,
'default_handler': default_handler})
vdis = default_handler.download_image(
context, session, instance, image_id)
# Ensure we can see the import VHDs as VDIs
scan_default_sr(session)
vdi_uuid = vdis['root']['uuid']
try:
_check_vdi_size(context, session, instance, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while checking vdi size")
LOG.debug(msg, instance=instance, exc_info=True)
for vdi in vdis.values():
vdi_uuid = vdi['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
safe_destroy_vdis(session, [vdi_ref])
return vdis
def _get_vdi_chain_size(session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
This will walk the VDI chain to the root, add the size of each VDI into
the total.
"""
size_bytes = 0
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d'),
{'cur_vdi_uuid': cur_vdi_uuid,
'vdi_size_bytes': vdi_size_bytes})
size_bytes += vdi_size_bytes
return size_bytes
def _check_vdi_size(context, session, instance, vdi_uuid):
flavor = flavors.extract_flavor(instance)
allowed_size = (flavor['root_gb'] +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
if not flavor['root_gb']:
# root_gb=0 indicates that we're disabling size checks
return
size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size:
LOG.error(_("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"),
{'size': size, 'allowed_size': allowed_size},
instance=instance)
raise exception.FlavorDiskTooSmall()
def _fetch_disk_image(context, session, instance, name_label, image_id,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_vhd_image, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug(_("Fetching image %(image_id)s, type %(image_type_str)s"),
{'image_id': image_id, 'image_type_str': image_type_str},
instance=instance)
if image_type == ImageType.DISK_ISO:
sr_ref = _safe_find_iso_sr(session)
else:
sr_ref = safe_find_sr(session)
glance_image = image_utils.GlanceImage(context, image_id)
if glance_image.is_raw_tgz():
image = image_utils.RawTGZImage(glance_image)
else:
image = image_utils.RawImage(glance_image)
virtual_size = image.get_size()
vdi_size = virtual_size
LOG.debug(_("Size for image %(image_id)s: %(virtual_size)d"),
{'image_id': image_id, 'virtual_size': virtual_size},
instance=instance)
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
vdi_size > CONF.xenserver.max_kernel_ramdisk_size):
max_size = CONF.xenserver.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") %
{'vdi_size': vdi_size, 'max_size': max_size})
vdi_ref = create_vdi(session, sr_ref, instance, name_label,
image_type_str, vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_stream_disk(
session, image.stream_to, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"),
vdi_ref, instance=instance)
args = {}
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
if CONF.xenserver.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
else:
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_("Failed to fetch glance image"),
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
raise
def determine_disk_image_type(image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
if not image_meta or 'disk_format' not in image_meta:
return None
disk_format = image_meta['disk_format']
disk_format_map = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO,
}
try:
image_type = disk_format_map[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
image_ref = image_meta.get('id')
params = {
'image_type_str': ImageType.to_string(image_type),
'image_ref': image_ref
}
LOG.debug(_("Detected %(image_type_str)s format for image %(image_ref)s"),
params)
return image_type
def determine_vm_mode(instance, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
os_type = instance['os_type']
if os_type == "linux":
return vm_mode.XEN
if os_type == "windows":
return vm_mode.HVM
# disk_image_type specific default for backwards compatibility
if disk_image_type == ImageType.DISK_VHD or \
disk_image_type == ImageType.DISK:
return vm_mode.XEN
# most images run OK as HVM
return vm_mode.HVM
def set_vm_name_label(session, vm_ref, name_label):
session.call_xenapi("VM.set_name_label", vm_ref, name_label)
def list_vms(session):
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="false" and '
'field "is_a_template"="false" and '
'field "resident_on"="%s"' % session.host_ref)
for vm_ref in vms.keys():
yield vm_ref, vms[vm_ref]
def lookup_vm_vdis(session, vm_ref):
"""Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
LOG.debug(_('VDI %s is still available'), vdi_uuid)
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
return vdi_refs
def lookup(session, name_label, check_rescue=False):
"""Look the instance up and return it if available.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
if check_rescue:
result = lookup(session, name_label + '-rescue', False)
if result:
return result
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = False
key, net, metadata = _prepare_injectables(instance, network_info)
mount_required = key or net or metadata
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
def lookup_kernel_ramdisk(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
def is_snapshot(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
else:
return False
def get_power_state(session, vm_ref):
xapi_state = session.call_xenapi("VM.get_power_state", vm_ref)
return XENAPI_POWER_STATE[xapi_state]
def compile_info(session, vm_ref):
"""Fill record with VM status information."""
power_state = get_power_state(session, vm_ref)
max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref)
mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref)
num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref)
return {'state': power_state,
'max_mem': long(max_mem) >> 10,
'mem': long(mem) >> 10,
'num_cpu': num_cpu,
'cpu_time': 0}
def compile_diagnostics(record):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
vm_uuid = record["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = xmlutils.safe_minidom_parse_string(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# Provide the last update of the information
if node.localName == 'lastupdate':
diags['last_update'] = node.firstChild.data
# Create a list of the diagnostic keys (in their order)
if node.localName == 'ds':
ref = node.childNodes
# Name and Value
if len(ref) > 6:
keys.append(ref[0].firstChild.data)
# Read the last row of the first RRA to get the latest info
if node.localName == 'rra':
rows = node.childNodes[4].childNodes
last_row = rows[rows.length - 1].childNodes
for j, value in enumerate(last_row):
diags[keys[j]] = value.firstChild.data
break
return diags
except expat.ExpatError as e:
LOG.exception(_('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e}
def fetch_bandwidth(session):
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
return bw
def _scan_sr(session, sr_ref=None, max_attempts=4):
if sr_ref:
# NOTE(johngarbutt) xenapi will collapse any duplicate requests
# for SR.scan if there is already a scan in progress.
# However, we don't want that, because the scan may have started
# before we modified the underlying VHDs on disk through a plugin.
# Using our own mutex will reduce cases where our periodic SR scan
# in host.update_status starts racing the sr.scan after a plugin call.
@utils.synchronized('sr-scan-' + sr_ref)
def do_scan(sr_ref):
LOG.debug(_("Scanning SR %s"), sr_ref)
attempt = 1
while True:
try:
return session.call_xenapi('SR.scan', sr_ref)
except session.XenAPI.Failure as exc:
with excutils.save_and_reraise_exception() as ctxt:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts:
ctxt.reraise = False
LOG.warn(_("Retry SR scan due to error: %s")
% exc)
greenthread.sleep(2 ** attempt)
attempt += 1
do_scan(sr_ref)
def scan_default_sr(session):
"""Looks for the system default SR and triggers a re-scan."""
sr_ref = safe_find_sr(session)
_scan_sr(session, sr_ref)
return sr_ref
def safe_find_sr(session):
"""Same as _find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = _find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def _find_sr(session):
"""Return the storage repository to hold VM images."""
host = session.host_ref
try:
tokens = CONF.xenserver.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
"formatting convention"),
CONF.xenserver.sr_matching_filter)
return None
if filter_criteria == 'other-config':
key, value = filter_pattern.split('=', 1)
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if not (key in sr_rec['other_config'] and
sr_rec['other_config'][key] == value):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_rec('PBD', pbd_ref)
if pbd_rec and pbd_rec['host'] == host:
return sr_ref
elif filter_criteria == 'default-sr' and filter_pattern == 'true':
pool_ref = session.call_xenapi('pool.get_all')[0]
sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref)
if sr_ref:
return sr_ref
# No SR found!
LOG.error(_("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."))
return None
def _safe_find_iso_sr(session):
"""Same as _find_iso_sr except raises a NotFound exception if SR
cannot be determined
"""
sr_ref = _find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.host_ref
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %s"), sr_rec)
if not sr_rec['content_type'] == 'iso':
LOG.debug(_("ISO: not iso content"))
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug(_("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'"))
continue
LOG.debug(_("ISO: SR MATCHing our criteria"))
for pbd_ref in sr_rec['PBDs']:
LOG.debug(_("ISO: ISO, looking to see if it is host local"))
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug(_("ISO: PBD %s disappeared"), pbd_ref)
continue
pbd_rec_host = pbd_rec['host']
LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s"),
{'pbd_rec': pbd_rec, 'host': host})
if pbd_rec_host == host:
LOG.debug(_("ISO: SR with local PBD"))
return sr_ref
return None
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return [xs_url.scheme, xs_url.netloc]
def _get_rrd(server, vm_uuid):
"""Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
CONF.xenserver.connection_username,
CONF.xenserver.connection_password,
server[1],
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server})
return None
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
vdi_rec = session.get_rec('VDI', vdi_ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_rec call
if vdi_rec:
yield vdi_ref, vdi_rec
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
"""Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
yield vdi_ref
except session.XenAPI.Failure:
continue
def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
if vdi_rec is None:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
if 'vhd-parent' not in vdi_rec['sm_config']:
return None
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug(_('VHD %(vdi_uuid)s has parent %(parent_uuid)s'),
{'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
return parent_uuid
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
yield vdi_rec
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
if not parent_uuid:
break
vdi_uuid = parent_uuid
def _child_vhds(session, sr_ref, vdi_uuid):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
"""
children = set()
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
rec_uuid = rec['uuid']
if rec_uuid == vdi_uuid:
continue
parent_uuid = _get_vhd_parent_uuid(session, ref, rec)
if parent_uuid != vdi_uuid:
continue
children.add(rec_uuid)
return children
def _another_child_vhd(session, vdi_ref, sr_ref, original_parent_uuid):
# Search for any other vdi which parents to original parent and is not
# in the active vm/instance vdi chain.
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
vdi_uuid = vdi_rec['uuid']
parent_vdi_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if ((rec['uuid'] != vdi_uuid) and
(rec['uuid'] != parent_vdi_uuid) and
(rec['sm_config'].get('vhd-parent') == original_parent_uuid)):
# Found another vhd which too parents to original parent.
return True
# Found no other vdi with the same parent.
return False
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
original_parent_uuid):
"""Spin until the parent VHD is coalesced into its parent VHD
Before coalesce:
* original_parent_vhd
* parent_vhd
snapshot
After coalesce:
* parent_vhd
snapshot
"""
# NOTE(sirp): If we don't have an original_parent_uuid, then the snapshot
# doesn't have a grandparent to coalesce into, so we can skip waiting
if not original_parent_uuid:
return
# Check if original parent has any other child. If so, coalesce will
# not take place.
if _another_child_vhd(session, vdi_ref, sr_ref, original_parent_uuid):
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
max_attempts = CONF.xenserver.vhd_coalesce_max_attempts
for i in xrange(max_attempts):
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
" %(original_parent_uuid)s, waiting for coalesce..."),
{'parent_uuid': parent_uuid,
'original_parent_uuid': original_parent_uuid},
instance=instance)
else:
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%d)"
", giving up...") % max_attempts)
raise exception.NovaException(msg)
def _remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = CONF.xenserver.remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = CONF.xenserver.remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in xrange(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
raise volume_utils.StorageError(
_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
"""Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
except session.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
session.VBD.plug(vbd_ref, this_vm_ref)
try:
LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s'),
{'vbd_ref': vbd_ref, 'orig_dev': orig_dev})
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s'),
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
yield dev
finally:
utils.execute('sync', run_as_root=True)
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
try:
destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
def _get_sys_hypervisor_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_uuid(session):
if session and session.is_local_connection:
# UUID is the control domain running on this host
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="true" and '
'field "resident_on"="%s"' %
session.host_ref)
return vms[vms.keys()[0]]['uuid']
try:
return _get_sys_hypervisor_uuid()
except IOError:
# Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
# cannot read from uuid after a reboot. Fall back to trying xenstore.
# See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True)
vm_key, _ = utils.execute('xenstore-read',
'/local/domain/%s/vm' % domid.strip(),
run_as_root=True)
return vm_key.strip()[4:]
def _get_this_vm_ref(session):
return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session))
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug(_("Partitions:"))
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(_(" %(num)s: %(fstype)s %(size)d sectors"),
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions
def _stream_disk(session, image_service_func, image_type, virtual_size, dev):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(session, virtual_size, dev)
dev_path = utils.make_dev_path(dev)
with utils.temporary_chown(dev_path):
with open(dev_path, 'wb') as f:
f.seek(offset)
image_service_func(f)
def _write_partition(session, virtual_size, dev):
dev_path = utils.make_dev_path(dev)
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...'),
{'primary_first': primary_first, 'primary_last': primary_last,
'dev_path': dev_path})
def execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
_make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
LOG.debug(_('Writing partition table %s done.'), dev_path)
def _repair_filesystem(partition_path):
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags):
"""Resize partition and fileystem.
This assumes we are dealing with a single primary partition and using
ext3 or ext4.
"""
size = new_sectors - start
end = new_sectors - 1
dev_path = utils.make_dev_path(dev)
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
_repair_filesystem(partition_path)
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
run_as_root=True)
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
try:
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(str(exc))
reason = _("Shrinking the filesystem down with resize2fs "
"has failed, please check if you have "
"enough free space on your disk.")
raise exception.ResizeError(reason=reason)
utils.execute('parted', '--script', dev_path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'mkpart',
'primary',
'%ds' % start,
'%ds' % end,
run_as_root=True)
if "boot" in flags.lower():
utils.execute('parted', '--script', dev_path,
'set', '1', 'boot', 'on',
run_as_root=True)
if new_sectors > old_sectors:
# Resizing up, resize filesystem after partition resize
utils.execute('resize2fs', partition_path, run_as_root=True)
# Add back journal
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
def _log_progress_if_required(left, last_log_time, virtual_size):
if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
last_log_time = timeutils.utcnow()
complete_pct = float(virtual_size - left) / virtual_size * 100
LOG.debug(_("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy"),
{"complete_pct": complete_pct, "left": left})
return last_log_time
def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
"""Copy data, skipping long runs of zeros to create a sparse file."""
start_time = last_log_time = timeutils.utcnow()
EMPTY_BLOCK = '\0' * block_size
bytes_read = 0
skipped_bytes = 0
left = virtual_size
LOG.debug(_("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"),
{'src_path': src_path, 'dst_path': dst_path,
'virtual_size': virtual_size, 'block_size': block_size})
# NOTE(sirp): we need read/write access to the devices; since we don't have
# the luxury of shelling out to a sudo'd command, we temporarily take
# ownership of the devices.
with utils.temporary_chown(src_path):
with utils.temporary_chown(dst_path):
with open(src_path, "r") as src:
with open(dst_path, "w") as dst:
data = src.read(min(block_size, left))
while data:
if data == EMPTY_BLOCK:
dst.seek(block_size, os.SEEK_CUR)
left -= block_size
bytes_read += block_size
skipped_bytes += block_size
else:
dst.write(data)
data_len = len(data)
left -= data_len
bytes_read += data_len
if left <= 0:
break
data = src.read(min(block_size, left))
greenthread.sleep(0)
last_log_time = _log_progress_if_required(
left, last_log_time, virtual_size)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug(_("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size"),
{'duration': duration, 'compression_pct': compression_pct})
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(session, virtual_size, dst)
if CONF.xenserver.sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
utils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'count=%d' % num_blocks,
'iflag=direct,sync',
'oflag=direct,sync',
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except processutils.ProcessExecutionError as e:
err = str(e)
return err
def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
vfs = vfsimpl.VFSLocalFS(imgfile=None,
imgfmt=None,
imgdir=tmpdir)
LOG.info(_('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_('Failed to mount filesystem (expected for '
'non-linux instances): %s') % err)
def _prepare_injectables(inst, network_info):
"""prepares the ssh key and the network configuration file to be
injected into the disk image
"""
#do the import here - Jinja2 will be loaded only if injection is performed
import jinja2
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
metadata = inst['metadata']
key = str(inst['key_data'])
net = None
if network_info:
ifc_num = -1
interfaces_info = []
for vif in network_info:
ifc_num += 1
try:
if not vif['network'].get_meta('injected'):
# network is not specified injected
continue
except KeyError:
# vif network is None
continue
# NOTE(tr3buchet): using all subnets in case dns is stored in a
# subnet that isn't chosen as first v4 or v6
# subnet in the case where there is more than one
# dns = list of address of each dns entry from each vif subnet
dns = [ip['address'] for subnet in vif['network']['subnets']
for ip in subnet['dns']]
dns = ' '.join(dns).strip()
interface_info = {'name': 'eth%d' % ifc_num,
'address': '',
'netmask': '',
'gateway': '',
'broadcast': '',
'dns': dns or '',
'address_v6': '',
'netmask_v6': '',
'gateway_v6': '',
'use_ipv6': CONF.use_ipv6}
# NOTE(tr3buchet): the original code used the old network_info
# which only supported a single ipv4 subnet
# (and optionally, a single ipv6 subnet).
# I modified it to use the new network info model,
# which adds support for multiple v4 or v6
# subnets. I chose to ignore any additional
# subnets, just as the original code ignored
# additional IP information
# populate v4 info if v4 subnet and ip exist
try:
# grab the first v4 subnet (or it raises)
subnet = [s for s in vif['network']['subnets']
if s['version'] == 4][0]
# get the subnet's first ip (or it raises)
ip = subnet['ips'][0]
# populate interface_info
subnet_netaddr = subnet.as_netaddr()
interface_info['address'] = ip['address']
interface_info['netmask'] = subnet_netaddr.netmask
interface_info['gateway'] = subnet['gateway']['address']
interface_info['broadcast'] = subnet_netaddr.broadcast
except IndexError:
# there isn't a v4 subnet or there are no ips
pass
# populate v6 info if v6 subnet and ip exist
try:
# grab the first v6 subnet (or it raises)
subnet = [s for s in vif['network']['subnets']
if s['version'] == 6][0]
# get the subnet's first ip (or it raises)
ip = subnet['ips'][0]
# populate interface_info
interface_info['address_v6'] = ip['address']
interface_info['netmask_v6'] = subnet.as_netaddr().netmask
interface_info['gateway_v6'] = subnet['gateway']['address']
except IndexError:
# there isn't a v6 subnet or there are no ips
pass
interfaces_info.append(interface_info)
if interfaces_info:
net = template.render({'interfaces': interfaces_info,
'use_ipv6': CONF.use_ipv6})
return key, net, metadata
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
"""
this_vm_uuid = get_this_vm_uuid(session)
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise
raise Exception(_('This domU must be running on the host '
'specified by connection_url'))
def import_all_migrated_disks(session, instance):
root_vdi = _import_migrated_root_disk(session, instance)
eph_vdis = _import_migrate_ephemeral_disks(session, instance)
return {'root': root_vdi, 'ephemerals': eph_vdis}
def _import_migrated_root_disk(session, instance):
chain_label = instance['uuid']
vdi_label = instance['name']
return _import_migrated_vhds(session, instance, chain_label, "root",
vdi_label)
def _import_migrate_ephemeral_disks(session, instance):
ephemeral_vdis = {}
instance_uuid = instance['uuid']
ephemeral_gb = instance["ephemeral_gb"]
disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb)
for chain_number, _size in enumerate(disk_sizes, start=1):
chain_label = instance_uuid + "_ephemeral_%d" % chain_number
vdi_label = "%(name)s ephemeral (%(number)d)" % dict(
name=instance['name'], number=chain_number)
ephemeral_vdi = _import_migrated_vhds(session, instance,
chain_label, "ephemeral",
vdi_label)
userdevice = 3 + chain_number
ephemeral_vdis[str(userdevice)] = ephemeral_vdi
return ephemeral_vdis
def _import_migrated_vhds(session, instance, chain_label, disk_type,
vdi_label):
"""Move and possibly link VHDs via the XAPI plugin."""
# TODO(johngarbutt) tidy up plugin params
imported_vhds = session.call_plugin_serialized(
'migration', 'move_vhds_into_sr', instance_uuid=chain_label,
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
# Now we rescan the SR so we find the VHDs
scan_default_sr(session)
vdi_uuid = imported_vhds['root']['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
# Set name-label so we can find if we need to clean up a failed migration
_set_vdi_info(session, vdi_ref, disk_type, vdi_label,
disk_type, instance)
return {'uuid': vdi_uuid, 'ref': vdi_ref}
def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
ephemeral_number=0):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
chain_label = instance['uuid']
if ephemeral_number:
chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
try:
# TODO(johngarbutt) tidy up plugin params
session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
LOG.debug(msg, instance=instance, exc_info=True)
raise exception.MigrationError(reason=msg)
def vm_ref_or_raise(session, instance_name):
vm_ref = lookup(session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
return vm_ref
def handle_ipxe_iso(session, instance, cd_vdi, network_info):
"""iPXE ISOs are a mechanism to allow the customer to roll their own
image.
To use this feature, a service provider needs to configure the
appropriate Nova flags, roll an iPXE ISO, then distribute that image
to customers via Glance.
NOTE: `mkisofs` is not present by default in the Dom0, so the service
provider can either add that package manually to Dom0 or include the
`mkisofs` binary in the image itself.
"""
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url:
LOG.warn(_('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance)
return
network_name = CONF.xenserver.ipxe_network_name
if not network_name:
LOG.warn(_('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance)
return
network = None
for vif in network_info:
if vif['network']['label'] == network_name:
network = vif['network']
break
if not network:
LOG.warn(_("Unable to find network matching '%(network_name)s', user"
" will have to enter IP manually...") %
{'network_name': network_name}, instance=instance)
return
sr_path = get_sr_path(session)
# Unpack IPv4 network info
subnet = [sn for sn in network['subnets']
if sn['version'] == 4][0]
ip = subnet['ips'][0]
ip_address = ip['address']
netmask = network_model.get_netmask(ip, subnet)
gateway = subnet['gateway']['address']
dns = subnet['dns'][0]['address']
try:
session.call_plugin_serialized("ipxe", "inject", sr_path,
cd_vdi['uuid'], boot_menu_url, ip_address, netmask,
gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'CommandNotFound':
LOG.warn(_("ISO creation tool '%s' does not exist.") %
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else:
raise
def set_other_config_pci(session, vm_ref, params):
"""Set the pci key of other-config parameter to params."""
other_config = session.call_xenapi("VM.get_other_config", vm_ref)
other_config['pci'] = params
session.call_xenapi("VM.set_other_config", vm_ref, other_config)
| apache-2.0 | 2,891,476,081,493,106,700 | 36.829259 | 79 | 0.583615 | false |
taotie12010/bigfour | lms/djangoapps/discussion_api/tests/utils.py | 21 | 12908 | """
Discussion API test utilities
"""
import json
import re
import httpretty
def _get_thread_callback(thread_data):
"""
Get a callback function that will return POST/PUT data overridden by
response_overrides.
"""
def callback(request, _uri, headers):
"""
Simulate the thread creation or update endpoint by returning the provided
data along with the data from response_overrides and dummy values for any
additional required fields.
"""
response_data = make_minimal_cs_thread(thread_data)
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "closed", "pinned"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
def _get_comment_callback(comment_data, thread_id, parent_id):
"""
Get a callback function that will return a comment containing the given data
plus necessary dummy data, overridden by the content of the POST/PUT
request.
"""
def callback(request, _uri, headers):
"""
Simulate the comment creation or update endpoint as described above.
"""
response_data = make_minimal_cs_comment(comment_data)
# thread_id and parent_id are not included in request payload but
# are returned by the comments service
response_data["thread_id"] = thread_id
response_data["parent_id"] = parent_id
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "endorsed"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
class CommentsServiceMockMixin(object):
"""Mixin with utility methods for mocking the comments service"""
def register_get_threads_response(self, threads, page, num_pages):
"""Register a mock response for GET on the CS thread list endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads",
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
}),
status=200
)
def register_get_threads_search_response(self, threads, rewrite):
"""Register a mock response for GET on the CS thread search endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/search/threads",
body=json.dumps({
"collection": threads,
"page": 1,
"num_pages": 1,
"corrected_text": rewrite,
}),
status=200
)
def register_post_thread_response(self, thread_data):
"""Register a mock response for POST on the CS commentable endpoint"""
httpretty.register_uri(
httpretty.POST,
re.compile(r"http://localhost:4567/api/v1/(\w+)/threads"),
body=_get_thread_callback(thread_data)
)
def register_put_thread_response(self, thread_data):
"""
Register a mock response for PUT on the CS endpoint for the given
thread_id.
"""
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/threads/{}".format(thread_data["id"]),
body=_get_thread_callback(thread_data)
)
def register_get_thread_error_response(self, thread_id, status_code):
"""Register a mock error response for GET on the CS thread endpoint."""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread_id),
body="",
status=status_code
)
def register_get_thread_response(self, thread):
"""
Register a mock response for GET on the CS thread instance endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread["id"]),
body=json.dumps(thread),
status=200
)
def register_post_comment_response(self, comment_data, thread_id, parent_id=None):
"""
Register a mock response for POST on the CS comments endpoint for the
given thread or parent; exactly one of thread_id and parent_id must be
specified.
"""
if parent_id:
url = "http://localhost:4567/api/v1/comments/{}".format(parent_id)
else:
url = "http://localhost:4567/api/v1/threads/{}/comments".format(thread_id)
httpretty.register_uri(
httpretty.POST,
url,
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_put_comment_response(self, comment_data):
"""
Register a mock response for PUT on the CS endpoint for the given
comment data (which must include the key "id").
"""
thread_id = comment_data["thread_id"]
parent_id = comment_data.get("parent_id")
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/comments/{}".format(comment_data["id"]),
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_get_comment_error_response(self, comment_id, status_code):
"""
Register a mock error response for GET on the CS comment instance
endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment_id),
body="",
status=status_code
)
def register_get_comment_response(self, response_overrides):
"""
Register a mock response for GET on the CS comment instance endpoint.
"""
comment = make_minimal_cs_comment(response_overrides)
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment["id"]),
body=json.dumps(comment),
status=200
)
def register_get_user_response(self, user, subscribed_thread_ids=None, upvoted_ids=None):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{id}".format(id=user.id),
body=json.dumps({
"id": str(user.id),
"subscribed_thread_ids": subscribed_thread_ids or [],
"upvoted_ids": upvoted_ids or [],
}),
status=200
)
def register_subscribed_threads_response(self, user, threads, page, num_pages):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{}/subscribed_threads".format(user.id),
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
}),
status=200
)
def register_subscription_response(self, user):
"""
Register a mock response for POST and DELETE on the CS user subscription
endpoint
"""
for method in [httpretty.POST, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/users/{id}/subscriptions".format(id=user.id),
body=json.dumps({}), # body is unused
status=200
)
def register_thread_votes_response(self, thread_id):
"""
Register a mock response for PUT and DELETE on the CS thread votes
endpoint
"""
for method in [httpretty.PUT, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/threads/{}/votes".format(thread_id),
body=json.dumps({}), # body is unused
status=200
)
def register_comment_votes_response(self, comment_id):
"""
Register a mock response for PUT and DELETE on the CS comment votes
endpoint
"""
for method in [httpretty.PUT, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/comments/{}/votes".format(comment_id),
body=json.dumps({}), # body is unused
status=200
)
def register_flag_response(self, content_type, content_id):
"""Register a mock response for PUT on the CS flag endpoints"""
for path in ["abuse_flag", "abuse_unflag"]:
httpretty.register_uri(
"PUT",
"http://localhost:4567/api/v1/{content_type}s/{content_id}/{path}".format(
content_type=content_type,
content_id=content_id,
path=path
),
body=json.dumps({}), # body is unused
status=200
)
def register_thread_flag_response(self, thread_id):
"""Register a mock response for PUT on the CS thread flag endpoints"""
self.register_flag_response("thread", thread_id)
def register_comment_flag_response(self, comment_id):
"""Register a mock response for PUT on the CS comment flag endpoints"""
self.register_flag_response("comment", comment_id)
def register_delete_thread_response(self, thread_id):
"""
Register a mock response for DELETE on the CS thread instance endpoint
"""
httpretty.register_uri(
httpretty.DELETE,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread_id),
body=json.dumps({}), # body is unused
status=200
)
def register_delete_comment_response(self, comment_id):
"""
Register a mock response for DELETE on the CS comment instance endpoint
"""
httpretty.register_uri(
httpretty.DELETE,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment_id),
body=json.dumps({}), # body is unused
status=200
)
def assert_query_params_equal(self, httpretty_request, expected_params):
"""
Assert that the given mock request had the expected query parameters
"""
actual_params = dict(httpretty_request.querystring)
actual_params.pop("request_id") # request_id is random
self.assertEqual(actual_params, expected_params)
def assert_last_query_params(self, expected_params):
"""
Assert that the last mock request had the expected query parameters
"""
self.assert_query_params_equal(httpretty.last_request(), expected_params)
def make_minimal_cs_thread(overrides=None):
"""
Create a dictionary containing all needed thread fields as returned by the
comments service with dummy data and optional overrides
"""
ret = {
"type": "thread",
"id": "dummy",
"course_id": "dummy/dummy/dummy",
"commentable_id": "dummy",
"group_id": None,
"user_id": "0",
"username": "dummy",
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "1970-01-01T00:00:00Z",
"updated_at": "1970-01-01T00:00:00Z",
"thread_type": "discussion",
"title": "dummy",
"body": "dummy",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 0},
"comments_count": 0,
"unread_comments_count": 0,
"children": [],
"resp_total": 0,
}
ret.update(overrides or {})
return ret
def make_minimal_cs_comment(overrides=None):
"""
Create a dictionary containing all needed comment fields as returned by the
comments service with dummy data and optional overrides
"""
ret = {
"type": "comment",
"id": "dummy",
"thread_id": "dummy",
"parent_id": None,
"user_id": "0",
"username": "dummy",
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "1970-01-01T00:00:00Z",
"updated_at": "1970-01-01T00:00:00Z",
"body": "dummy",
"abuse_flaggers": [],
"votes": {"up_count": 0},
"endorsed": False,
"children": [],
}
ret.update(overrides or {})
return ret
| agpl-3.0 | 2,143,241,298,417,913,000 | 34.756233 | 93 | 0.570189 | false |
crashtack/code-katas | src/distance.py | 1 | 1134 | def calculate_distance(point1, point2):
"""
Calculate the distance (in miles) between point1 and point2.
point1 and point2 must have the format [latitude, longitude].
The return value is a float.
Modified and converted to Python from: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
def convert_to_radians(degrees):
return degrees * math.pi / 180
radius_earth = 6.371E3 # km
phi1 = convert_to_radians(point1[0])
phi2 = convert_to_radians(point2[0])
delta_phi = convert_to_radians(point1[0] - point2[0])
delta_lam = convert_to_radians(point1[1] - point2[1])
a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return radius_earth * c / 1.60934 # convert km to miles
if __name__ == "__main__":
print(calculate_distance([59.35444, 17.93972], [19.39667, -102.03917]))
print(calculate_distance([19.39667, -102.03917], [59.35444, 17.93972]))
print(calculate_distance([1, 1], [20, 20]))
print(calculate_distance([20, 20], [1, 1]))
| mit | -6,536,165,982,946,111,000 | 38.103448 | 101 | 0.641975 | false |
stefan-caraiman/cloudbase-init | cloudbaseinit/utils/debiface.py | 3 | 4325 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as oslo_logging
import six
from cloudbaseinit.metadata.services import base as service_base
LOG = oslo_logging.getLogger(__name__)
NAME = "name"
MAC = "mac"
ADDRESS = "address"
ADDRESS6 = "address6"
NETMASK = "netmask"
NETMASK6 = "netmask6"
BROADCAST = "broadcast"
GATEWAY = "gateway"
GATEWAY6 = "gateway6"
DNSNS = "dnsnameservers"
# Fields of interest by regexps.
FIELDS = {
NAME: re.compile(r"iface\s+(?P<{}>\S+)"
r"\s+inet6?\s+static".format(NAME)),
MAC: re.compile(r"hwaddress\s+ether\s+"
r"(?P<{}>\S+)".format(MAC)),
ADDRESS: re.compile(r"address\s+"
r"(?P<{}>\S+)".format(ADDRESS)),
ADDRESS6: re.compile(r"post-up ip -6 addr add (?P<{}>[^/]+)/"
r"(\d+) dev".format(ADDRESS6)),
NETMASK: re.compile(r"netmask\s+"
r"(?P<{}>\S+)".format(NETMASK)),
NETMASK6: re.compile(r"post-up ip -6 addr add ([^/]+)/"
r"(?P<{}>\d+) dev".format(NETMASK6)),
BROADCAST: re.compile(r"broadcast\s+"
r"(?P<{}>\S+)".format(BROADCAST)),
GATEWAY: re.compile(r"gateway\s+"
r"(?P<{}>\S+)".format(GATEWAY)),
GATEWAY6: re.compile(r"post-up ip -6 route add default via "
r"(?P<{}>.+) dev".format(GATEWAY6)),
DNSNS: re.compile(r"dns-nameservers\s+(?P<{}>.+)".format(DNSNS))
}
IFACE_TEMPLATE = dict.fromkeys(FIELDS.keys())
# Map IPv6 availability by value index under `NetworkDetails`.
V6_PROXY = {
ADDRESS: ADDRESS6,
NETMASK: NETMASK6,
GATEWAY: GATEWAY6
}
DETAIL_PREPROCESS = {
MAC: lambda value: value.upper(),
DNSNS: lambda value: value.strip().split()
}
def _get_iface_blocks(data):
""""Yield interface blocks as pairs of v4 and v6 halves."""
lines, lines6 = [], []
crt_lines = lines
for line in data.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
if "iface" in line:
if "inet6" in line:
crt_lines = lines6
continue
if lines:
yield lines, lines6
lines[:] = []
lines6[:] = []
crt_lines = lines
crt_lines.append(line)
if lines:
yield lines, lines6
def _get_field(line):
for field, regex in FIELDS.items():
match = regex.match(line)
if match:
yield field, match.group(field)
def _add_nic(iface, nics):
if not iface or iface == IFACE_TEMPLATE:
return # no information gathered
LOG.debug("Found new interface: %s", iface)
# Each missing detail is marked as None.
nic = service_base.NetworkDetails(**iface)
nics.append(nic)
def parse(data):
"""Parse the received content and obtain network details."""
if not data or not isinstance(data, six.string_types):
LOG.error("Invalid Debian config to parse:\n%s", data)
return
LOG.info("Parsing Debian config...\n%s", data)
nics = [] # list of NetworkDetails objects
for lines_pair in _get_iface_blocks(data):
iface = IFACE_TEMPLATE.copy()
for lines, use_proxy in zip(lines_pair, (False, True)):
for line in lines:
for field, value in _get_field(line):
if use_proxy:
field = V6_PROXY.get(field)
if not field:
continue
func = DETAIL_PREPROCESS.get(field, lambda value: value)
iface[field] = func(value) if value != "None" else None
_add_nic(iface, nics)
return nics
| apache-2.0 | 5,532,225,192,987,332,000 | 32.269231 | 78 | 0.577803 | false |
Dhivyap/ansible | lib/ansible/modules/cloud/amazon/aws_codepipeline.py | 5 | 11145 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_codepipeline
short_description: Create or delete AWS CodePipelines
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html)
description:
- Create or delete a CodePipeline on AWS.
version_added: "2.9"
author:
- Stefan Horning (@stefanhorning) <[email protected]>
requirements: [ botocore, boto3 ]
options:
name:
description:
- Name of the pipeline
required: true
role_arn:
description:
- ARN of the IAM role to use when executing the pipeline
required: true
artifact_store:
description:
- Location information where artifacts are stored (on S3). Dictionary with fields type and location.
required: true
suboptions:
type:
description:
- Type of the artifacts storage (only 'S3' is currently supported).
location:
description:
- Bucket name for artifacts.
stages:
description:
- List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
required: true
suboptions:
name:
description:
- Name of the stage (step) in the codepipeline
actions:
description:
- List of action configurations for that stage.
version:
description:
- Version number of the pipeline. This number is automatically incremented when a pipeline is updated.
required: false
state:
description:
- Create or remove code pipeline
default: 'present'
choices: ['present', 'absent']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
- aws_codepipeline:
name: my_deploy_pipeline
role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
artifact_store:
type: S3
location: my_s3_codepipline_bucket
stages:
- name: Get_source
actions:
-
name: Git_pull
actionTypeId:
category: Source
owner: ThirdParty
provider: GitHub
version: '1'
outputArtifacts:
- { name: my-app-source }
configuration:
Owner: mediapeers
Repo: my_gh_repo
PollForSourceChanges: 'true'
Branch: master
# Generate token like this:
# https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
# GH Link: https://github.com/settings/tokens
OAuthToken: 'abc123def456'
runOrder: 1
- name: Build
actions:
-
name: CodeBuild
actionTypeId:
category: Build
owner: AWS
provider: CodeBuild
version: '1'
inputArtifacts:
- { name: my-app-source }
outputArtifacts:
- { name: my-app-build }
configuration:
# A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
ProjectName: codebuild-project-name
runOrder: 1
- name: ECS_deploy
actions:
-
name: ECS_deploy
actionTypeId:
category: Deploy
owner: AWS
provider: ECS
version: '1'
inputArtifacts:
- { name: vod-api-app-build }
configuration:
# an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
ClusterName: ecs-cluster-name
ServiceName: ecs-cluster-service-name
FileName: imagedefinitions.json
region: us-east-1
state: present
'''
RETURN = '''
pipeline:
description: Returns the dictionary describing the code pipeline configuration.
returned: success
type: complex
contains:
name:
description: Name of the CodePipeline
returned: always
type: string
sample: my_deploy_pipeline
role_arn:
description: ARN of the IAM role attached to the code pipeline
returned: always
type: string
sample: arn:aws:iam::123123123:role/codepipeline-service-role
artifact_store:
description: Information about where the build artifacts are stored
returned: always
type: complex
contains:
type:
description: The type of the artifacts store, such as S3
returned: always
type: string
sample: S3
location:
description: The location of the artifacts storage (s3 bucket name)
returned: always
type: string
sample: my_s3_codepipline_bucket
encryption_key:
description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
returned: when configured
type: string
stages:
description: List of stages configured for this pipeline
returned: always
type: list
version:
description: The version number of the pipeline. This number is auto incremented when pipeline params are changed.
returned: always
type: int
'''
import copy
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
if version:
pipeline_dict['version'] = version
try:
resp = client.create_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def update_pipeline(client, pipeline_dict, module):
try:
resp = client.update_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc())
def delete_pipeline(client, name, module):
try:
resp = client.delete_pipeline(name=name)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def describe_pipeline(client, name, version, module):
pipeline = {}
try:
if version is not None:
pipeline = client.get_pipeline(name=name, version=version)
return pipeline
else:
pipeline = client.get_pipeline(name=name)
return pipeline
except is_boto3_error_code('PipelineNotFoundException'):
return pipeline
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
role_arn=dict(required=True, type='str'),
artifact_store=dict(required=True, type='dict'),
stages=dict(required=True, type='list'),
version=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client_conn = module.client('codepipeline')
state = module.params.get('state')
changed = False
# Determine if the CodePipeline exists
found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
pipeline_result = {}
if state == 'present':
if 'pipeline' in found_code_pipeline:
pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
# Update dictionary with provided module params:
pipeline_dict['roleArn'] = module.params['role_arn']
pipeline_dict['artifactStore'] = module.params['artifact_store']
pipeline_dict['stages'] = module.params['stages']
if module.params['version'] is not None:
pipeline_dict['version'] = module.params['version']
pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
changed = True
else:
pipeline_result = create_pipeline(
client=client_conn,
name=module.params['name'],
role_arn=module.params['role_arn'],
artifact_store=module.params['artifact_store'],
stages=module.params['stages'],
version=module.params['version'],
module=module)
changed = True
elif state == 'absent':
if found_code_pipeline:
pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
changed = True
module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
if __name__ == '__main__':
main()
| gpl-3.0 | -3,009,924,253,532,782,000 | 35.421569 | 140 | 0.613279 | false |
ainafp/nilearn | nilearn/decoding/tests/test_searchlight.py | 1 | 2253 | """
Test the searchlight module
"""
# Author: Alexandre Abraham
# License: simplified BSD
from nose.tools import assert_equal
import numpy as np
import nibabel
from .. import searchlight
def test_searchlight():
# Create a toy dataset to run searchlight on
# Initialize with 4x4x4 scans of random values on 30 frames
rand = np.random.RandomState(0)
frames = 30
data = rand.rand(5, 5, 5, frames)
mask = np.ones((5, 5, 5), np.bool)
mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4))
# Create a condition array
cond = np.arange(frames, dtype=int) > frames / 2
# Create an activation pixel.
data[2, 2, 2, :] = 0
data[2, 2, 2][cond.astype(np.bool)] = 2
data_img = nibabel.Nifti1Image(data, np.eye(4))
# Define cross validation
from sklearn.cross_validation import check_cv
# avoid using KFold for compatibility with sklearn 0.10-0.13
cv = check_cv(4, cond)
n_jobs = 1
# Run Searchlight with different radii
# Small radius : only one pixel is selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 1)
assert_equal(sl.scores_[2, 2, 2], 1.)
# Medium radius : little ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
assert_equal(sl.scores_[1, 2, 2], 1.)
assert_equal(sl.scores_[2, 1, 2], 1.)
assert_equal(sl.scores_[2, 2, 1], 1.)
assert_equal(sl.scores_[3, 2, 2], 1.)
assert_equal(sl.scores_[2, 3, 2], 1.)
assert_equal(sl.scores_[2, 2, 3], 1.)
# Big radius : big ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=2,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 33)
assert_equal(sl.scores_[2, 2, 2], 1.)
| bsd-3-clause | -3,541,917,900,307,912,000 | 33.661538 | 79 | 0.610297 | false |
ashishb/python_based_web_frontend_test | samples/flask_sample_code_test.py | 1 | 1177 | """An example of using frontend_testing_helper with flask as a sample framework.
The code will work with any framework, just modify the _GetEndpointsList
accordingly.
"""
import unittest
import flask_sample_code
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from frontend_testing_helper import FrontendTestBase
class FlaskSampleCodeTest(FrontendTestBase):
@classmethod
def setUpClass(cls):
FrontendTestBase.setUpClass()
def setUp(self):
super(FlaskSampleCodeTest, self).setUp()
pass
@classmethod
def _GetEndpointsList(cls):
"""This must be over-ridden with framework specific code for extracting list
of all url endpoints."""
endpoint_list = list()
for rule in flask_sample_code.app.url_map.iter_rules():
if rule.endpoint != 'static':
endpoint_list.append(rule.rule)
return endpoint_list
"""
Remove one or more of the tests listed below, to see failures.
"""
@FrontendTestBase.Tests('/path1/')
def test_path2(self):
pass
@FrontendTestBase.Tests('/path2/<ip>')
def test_path1(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | -2,917,197,306,074,032,000 | 23.020408 | 80 | 0.703483 | false |
SVoxel/R7800 | git_home/samba.git/third_party/waf/wafadmin/Tools/gas.py | 32 | 1108 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008 (ita)
"as and gas"
import os, sys
import Task
from TaskGen import extension, taskgen, after, before
EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}'
Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o', shell=False)
@extension(EXT_ASM)
def asm_hook(self, node):
# create the compilation task: cpp or cc
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('asm', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
self.meths.append('asm_incflags')
@after('apply_obj_vars_cc')
@after('apply_obj_vars_cxx')
@before('apply_link')
def asm_incflags(self):
self.env.append_value('_ASINCFLAGS', self.env.ASINCFLAGS)
var = ('cxx' in self.features) and 'CXX' or 'CC'
self.env.append_value('_ASINCFLAGS', self.env['_%sINCFLAGS' % var])
def detect(conf):
conf.find_program(['gas', 'as'], var='AS')
if not conf.env.AS: conf.env.AS = conf.env.CC
#conf.env.ASFLAGS = ['-c'] <- may be necesary for .S files
| gpl-2.0 | 1,019,404,871,725,606,500 | 28.945946 | 71 | 0.659747 | false |
mozilla-metrics/fhr-toolbox | mrjob/plugin-collection.py | 2 | 5996 | """
Collect plugin stats.
"""
import healthreportutils
from datetime import date, datetime, timedelta
import os, shutil, csv
import sys, codecs
import traceback
import mrjob
from mrjob.job import MRJob
import tempfile
try:
import simplejson as json
except ImportError:
import json
# How many days must a user be gone to be considered "lost"?
LOSS_DAYS = 7 * 6 # 42 days/one release cycle
TOTAL_DAYS = 180
def intorstr(v):
try:
return int(v)
except ValueError:
return v
def compareversions(v1, v2):
v1 = map(intorstr, v1.split('.'))
v2 = map(intorstr, v2.split('.'))
return cmp(v1, v2)
main_channels = (
'nightly',
'aurora',
'beta',
'release'
)
def last_saturday(d):
"""Return the Saturday on or before the date."""
# .weekday in python starts on 0=Monday
return d - timedelta(days=(d.weekday() + 2) % 7)
def start_date(dstr):
"""
Start measuring a few days before the snapshot was taken to give clients
time to upload.
"""
snapshot = datetime.strptime(dstr, "%Y-%m-%d").date()
startdate = last_saturday(snapshot) - timedelta(days=7)
return startdate
def date_back(start, days):
"""iter backwards from start for N days"""
date = start
for n in xrange(0, days):
yield date - timedelta(days=n)
def active_day(day):
if day is None:
return False
return any(k != "org.mozilla.crashes.crashes" for k in day)
def logexceptions(func):
def wrapper(job, k, v):
try:
for k1, v1 in func(job, k, v):
yield (k1, v1)
except:
exc = traceback.format_exc()
print >>sys.stderr, "Script exception: ", exc
yield ("exception", exc)
return wrapper
@logexceptions
@healthreportutils.FHRMapper()
def mapjob(job, key, payload):
channel = payload.channel.split("-")[0]
if channel not in main_channels:
return
days = payload.get('data', {}).get('days', {})
def get_day(d):
dstr = d.strftime("%Y-%m-%d")
return days.get(dstr, None)
sd = start_date(job.options.start_date)
week_end = sd # sd is always a Saturday
active_user = False
for d in date_back(sd, LOSS_DAYS):
day = get_day(d)
if active_day(day):
active_user = True
break
if not active_user:
return
# require the v1 plugin data
plugins = payload.last.get("org.mozilla.addons.plugins", {})
os = payload.last.get("org.mozilla.appInfo.appinfo", {}).get("os", "?")
yield (("totals", channel, os), 1)
# Sometimes multiple versions of the same plugin are present. Don't double-
# count.
pluginmap = {} # name
for pluginid, data in plugins.items():
if pluginid == "_v":
continue
name = data.get("name", "?")
if name in pluginmap:
if compareversions(data.get("version", "?"),
pluginmap[name].get("version", "?")) > 0:
pluginmap[name] = data
else:
pluginmap[name] = data
for data in pluginmap.values():
yield (("plugins", channel, os,
data.get("name", "?"),
data.get("version", "?"),
data.get("blocklisted", "?"),
data.get("disabled", "?"),
data.get("clicktoplay", "?")), 1)
def reduce(job, k, vlist):
if k == "exception":
print >> sys.stderr, "FOUND exception", vlist
for v in vlist:
yield (k, v)
else:
yield (k, sum(vlist))
class AggJob(MRJob):
HADOOP_INPUT_FORMAT="org.apache.hadoop.mapred.SequenceFileAsTextInputFormat"
INPUT_PROTOCOL = mrjob.protocol.RawProtocol
def run_job(self):
self.stdout = tempfile.TemporaryFile()
if self.options.start_date is None:
raise Exception("--start-date is required")
# validate the start date here
start_date(self.options.start_date)
# Do the big work
super(AggJob, self).run_job()
# Produce the separated output files
outpath = self.options.output_path
if outpath is None:
outpath = os.path.expanduser("~/fhr-plugindata-" + self.options.start_date)
output(self.stdout, outpath)
def configure_options(self):
super(AggJob, self).configure_options()
self.add_passthrough_option('--output-path', help="Specify output path",
default=None)
self.add_passthrough_option('--start-date', help="Specify start date",
default=None)
def mapper(self, key, value):
return mapjob(self, key, value)
def reducer(self, key, vlist):
return reduce(self, key, vlist)
combiner = reducer
def getresults(fd):
fd.seek(0)
for line in fd:
k, v = line.split("\t")
yield json.loads(k), json.loads(v)
def unwrap(l, v):
"""
Unwrap a value into a list. Dicts are added in their repr form.
"""
if isinstance(v, (tuple, list)):
for e in v:
unwrap(l, e)
elif isinstance(v, dict):
l.append(repr(v))
elif isinstance(v, unicode):
l.append(v.encode("utf-8"))
else:
l.append(v)
def output(fd, path):
try:
shutil.rmtree(path)
except OSError:
pass
os.mkdir(path)
writers = {}
errs = codecs.getwriter("utf-8")(open(os.path.join(path, "exceptions.txt"), "w"))
for k, v in getresults(fd):
if k == "exception":
print >>errs, "==ERR=="
print >>errs, v
continue
l = []
unwrap(l, k)
unwrap(l, v)
fname = l.pop(0)
if fname in writers:
w = writers[fname]
else:
fd = open(os.path.join(path, fname + ".csv"), "w")
w = csv.writer(fd)
writers[fname] = w
w.writerow(l)
if __name__ == '__main__':
AggJob.run()
| apache-2.0 | 7,737,720,010,921,893,000 | 25.530973 | 87 | 0.564543 | false |
timvandermeij/drone-tomography | core/Import_Manager.py | 3 | 6502 | import importlib
import sys
import types
class Import_Manager(object):
"""
A manager for dynamically importing modules.
"""
def __init__(self):
"""
Initialize the import manager.
"""
self._package = __package__.split('.')[0]
self._unloaded_modules = {}
@property
def package(self):
"""
Retrieve the base package of the import manager.
"""
return self._package
def load(self, module, relative=True, relative_module=None):
"""
Import the given `module` and return the module object.
If `relative` is `True`, then the module is assumed to be relative to
the base package. If `relative_module` is given, then it is relative
to this submodule instead. Otherwise, if `relative` is `False`, then
the module is a global or core module.
"""
if relative_module is not None:
module = "{}.{}.{}".format(self._package, relative_module, module)
elif relative:
module = "{}.{}".format(self._package, module)
try:
return importlib.import_module(module)
except ImportError as e:
raise ImportError("Cannot import module '{}': {}".format(module, e.message))
def load_class(self, class_name, module=None, relative_module=None):
"""
Import the class with the given `class_name` from a certain module
relative to the base package.
If `module` is not given, then the module has the same name as the class
name, relative to the base package. If `relative_module` is given, then
the module is actually relative to this submodule, which in turn is
relative to the package.
If the module and class can be imported, then the class object is
returned. Otherwise, an `ImportError` is raised.
"""
if module is None:
module = class_name
import_module = self.load(module, relative_module=relative_module)
try:
return import_module.__dict__[class_name]
except KeyError:
raise ImportError("Cannot import class name '{}' from module '{}'".format(class_name, import_module.__name__))
def unload(self, module, relative=True, store=True):
"""
Unload the given `module` from Python.
This removes the module from the module cache, meaning that a future
import reimports the module.
If `relative` is `True`, then the module is assumed to be relative to
the base package. Otherwise, if `False` is given, then the module is
a global or core module.
Only use this when there are no other active modules that directly or
indirectly reference the given module. Otherwise, their references may
become corrupt. The module is stored in the import manager, unless
`store` is `False`, until the import manager itself is dereferenced.
However, this is no guarantee that the module will continue to function
while it is unloaded. The stored module can be reloaded with `reload`
using its `unloaded` argument.
Returns `True` if the module was unloaded, or `False` if it was not
loaded to begin with.
"""
if relative:
module = "{}.{}".format(self._package, module)
if module not in sys.modules:
return False
if store:
self._unloaded_modules[module] = sys.modules[module]
del sys.modules[module]
return True
def reload(self, module, relative=True):
"""
Reload a new version of the given `module` into Python.
This method has two functions. The first and default option works
similar to the `reload` builtin (or `importlib.reload` in Python 3),
which replaces a previously imported module with an updated one.
The difference with the core function is that the global module
variables are discarded by this method, and the new module is completely
fresh. The `module` to this function can a module name or module object.
If `relative` is `True`, then the module name is assumed to be relative
to the base package. Otherwise, if `False` is given, then the module is
a global or core module. It is not recommended to reload a core module.
The `relative` argument is ignored if `module` is a module object.
Only use this `reload` method when there are no other active modules
that directly or indirectly reference the given module. Otherwise, their
references may become corrupt.
If the module was not previously loaded via `load` or a normal import,
then this method raises an `ImportError` to signal the import failure.
"""
if isinstance(module, types.ModuleType):
module = module.__name__
relative = False
if not self.unload(module, relative=relative, store=False):
raise ImportError("Module '{}' was not previously loaded".format(module))
return self.load(module, relative=relative)
def reload_unloaded(self, module, relative=True):
"""
Reregister the given `module` that was unloaded with `unload`.
When a module is unloaded, then it is in a corrupt state where it may
not have access to its own variables, nor do references to anything
from that unloaded module. One way that could fix this is to register
the module again, which this method does.
If the module was imported normally in between `unload` and `reload`,
then this version of the module is in fact dereferenced, and may also
become corrupt if it is still referenced elsewhere. The "old" version
of the module takes its place in the registry, and should become
usable again.
If the module was not previously unloaded via `unload`, then this method
raises an `ImportError` to signal the import failure. Otherwise, this
method returns the reloaded module object.
"""
if relative:
module = "{}.{}".format(self._package, module)
if module not in self._unloaded_modules:
raise ImportError("Module '{}' was not previously unloaded".format(module))
sys.modules[module] = self._unloaded_modules[module]
del self._unloaded_modules[module]
return sys.modules[module]
| gpl-3.0 | 955,164,929,699,004,200 | 38.168675 | 122 | 0.642264 | false |
maxfoow/MalmoExperience | Malmo-0.16.0-Windows-64bit/Python_Examples/tutorial_4_solved.py | 3 | 6528 | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
# Tutorial sample #4: Challenge - get to the centre of the sponge (with solution)
import MalmoPython
import os
import sys
import time
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
def Menger(xorg, yorg, zorg, size, blocktype, variant, holetype):
#draw solid chunk
genstring = GenCuboidWithVariant(xorg,yorg,zorg,xorg+size-1,yorg+size-1,zorg+size-1,blocktype,variant) + "\n"
#now remove holes
unit = size
while (unit >= 3):
w=unit/3
for i in xrange(0, size, unit):
for j in xrange(0, size, unit):
x=xorg+i
y=yorg+j
genstring += GenCuboid(x+w,y+w,zorg,(x+2*w)-1,(y+2*w)-1,zorg+size-1,holetype) + "\n"
y=yorg+i
z=zorg+j
genstring += GenCuboid(xorg,y+w,z+w,xorg+size-1, (y+2*w)-1,(z+2*w)-1,holetype) + "\n"
genstring += GenCuboid(x+w,yorg,z+w,(x+2*w)-1,yorg+size-1,(z+2*w)-1,holetype) + "\n"
unit/=3
return genstring
def GenCuboid(x1, y1, z1, x2, y2, z2, blocktype):
return '<DrawCuboid x1="' + str(x1) + '" y1="' + str(y1) + '" z1="' + str(z1) + '" x2="' + str(x2) + '" y2="' + str(y2) + '" z2="' + str(z2) + '" type="' + blocktype + '"/>'
def GenCuboidWithVariant(x1, y1, z1, x2, y2, z2, blocktype, variant):
return '<DrawCuboid x1="' + str(x1) + '" y1="' + str(y1) + '" z1="' + str(z1) + '" x2="' + str(x2) + '" y2="' + str(y2) + '" z2="' + str(z2) + '" type="' + blocktype + '" variant="' + variant + '"/>'
missionXML='''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawSphere x="-27" y="70" z="0" radius="30" type="air"/>''' + Menger(-40, 40, -13, 27, "stone", "smooth_granite", "air") + '''
<DrawBlock x="-27" y="39" z="0" type="diamond_block"/>
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="30000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56.0" z="0.5" yaw="90"/>
<Inventory>
<InventoryItem slot="8" type="diamond_pickaxe"/>
</Inventory>
</AgentStart>
<AgentHandlers>
<ObservationFromFullStats/>
<ContinuousMovementCommands turnSpeedDegs="180"/>
<InventoryCommands/>
<AgentQuitFromReachingPosition>
<Marker x="-26.5" y="40.0" z="0.5" tolerance="0.5" description="Goal_found"/>
</AgentQuitFromReachingPosition>
</AgentHandlers>
</AgentSection>
</Mission>'''
# Create default Malmo objects:
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
my_mission = MalmoPython.MissionSpec(missionXML, True)
my_mission_record = MalmoPython.MissionRecordSpec()
# Attempt to start a mission:
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
# Loop until mission starts:
print "Waiting for the mission to start ",
world_state = agent_host.getWorldState()
while not world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission running ",
agent_host.sendCommand("hotbar.9 1")
agent_host.sendCommand("hotbar.9 0")
agent_host.sendCommand("pitch 0.2")
time.sleep(1)
agent_host.sendCommand("pitch 0")
agent_host.sendCommand("move 1")
agent_host.sendCommand("attack 1")
# Loop until mission ends:
while world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission ended"
# Mission has ended. | gpl-3.0 | -343,366,979,993,780,500 | 39.80625 | 203 | 0.580729 | false |
cedricp/ddt4all | crcmod/_crcfunpy.py | 1 | 2995 | #-----------------------------------------------------------------------------
# Low level CRC functions for use by crcmod. This version is implemented in
# Python for a couple of reasons. 1) Provide a reference implememtation.
# 2) Provide a version that can be used on systems where a C compiler is not
# available for building extension modules.
#
# Copyright (c) 2009 Raymond L. Buvel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-----------------------------------------------------------------------------
def _crc8(data, crc, table):
crc = crc & 0xFF
for x in data:
crc = table[x ^ crc]
return crc
def _crc8r(data, crc, table):
crc = crc & 0xFF
for x in data:
crc = table[x ^ crc]
return crc
def _crc16(data, crc, table):
crc = crc & 0xFFFF
for x in data:
crc = table[x ^ ((crc>>8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
return crc
def _crc16r(data, crc, table):
crc = crc & 0xFFFF
for x in data:
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc24(data, crc, table):
crc = crc & 0xFFFFFF
for x in data:
crc = table[x ^ (crc>>16 & 0xFF)] ^ ((crc << 8) & 0xFFFF00)
return crc
def _crc24r(data, crc, table):
crc = crc & 0xFFFFFF
for x in data:
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc32(data, crc, table):
crc = crc & 0xFFFFFFFF
for x in data:
crc = table[x ^ ((crc>>24) & 0xFF)] ^ ((crc << 8) & 0xFFFFFF00)
return crc
def _crc32r(data, crc, table):
crc = crc & 0xFFFFFFFF
for x in data:
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc64(data, crc, table):
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in data:
crc = table[x ^ ((crc>>56) & 0xFF)] ^ ((crc << 8) & 0xFFFFFFFFFFFFFF00)
return crc
def _crc64r(data, crc, table):
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in data:
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
| gpl-3.0 | 1,332,846,799,121,102,800 | 33.425287 | 79 | 0.615025 | false |
albatrossandco/brubeck_cms | brubeck/core/emailing/recaptcha.py | 1 | 4463 | # CREDIT: http://smileychris.tactful.co.nz/ramblings/recaptcha/
from django.forms import *
from django.conf import settings
import httplib, urllib
class RecaptchaWidget(Widget):
def __init__(self, theme=None, tabindex=None):
'''
From http://recaptcha.net/apidocs/captcha/#look-n-feel:
theme: 'red' | 'white' | 'blackglass'
Defines which theme to use for reCAPTCHA.
tabindex: any integer
Sets a tabindex for the reCAPTCHA text box. If other elements in
the form use a tabindex, this should be set so that navigation is
easier for the user.
'''
options = {}
if theme:
options['theme'] = theme
if tabindex:
options['tabindex'] = tabindex
self.options = options
super(RecaptchaWidget, self).__init__()
def render(self, name, value, attrs=None):
args = dict(public_key=settings.RECAPTCHA_PUBLIC_KEY)
if self.options:
args['options'] = '''<script type="text/javascript">
var RecaptchaOptions = %r;
</script>
''' % self.options
return '''%(options)s<script type="text/javascript"
src="http://api.recaptcha.net/challenge?k=%(public_key)s">
</script>
<noscript>
<iframe src="http://api.recaptcha.net/noscript?k=%(public_key)s"
height="300" width="500" frameborder="0"></iframe><br>
<textarea name="recaptcha_challenge_field" rows="3" cols="40">
</textarea>
<input type="hidden" name="recaptcha_response_field"
value="manual_challenge">
</noscript>''' % args
def value_from_datadict(self, data, files, name):
challenge = data.get('recaptcha_challenge_field')
response = data.get('recaptcha_response_field')
return (challenge, response)
def id_for_label(self, id_):
return None
class RecaptchaField(Field):
widget = RecaptchaWidget
def __init__(self, remote_ip, *args, **kwargs):
self.remote_ip = remote_ip
super(RecaptchaField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(RecaptchaField, self).clean(value)
challenge, response = value
if not challenge:
raise ValidationError(u'An error occured with the CAPTCHA service. Please try again.')
if not response:
raise ValidationError(u'Please enter the CAPTCHA solution.')
value = validate_recaptcha(self.remote_ip, challenge, response)
if not value.get('result'):
raise ValidationError(u'An incorrect CAPTCHA solution was entered.')
return value
class RecaptchaFieldPlaceholder(Field):
'''
Placeholder field for use with RecaptchaBaseForm which gets replaced with
RecaptchaField (which is passed the remote_ip) when RecaptchaBaseForm is
initialised.
'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class RecaptchaBaseForm(BaseForm):
def __init__(self, remote_ip, *args, **kwargs):
for key, field in self.base_fields.items():
if isinstance(field, RecaptchaFieldPlaceholder):
self.base_fields[key] = RecaptchaField(remote_ip, *field.args, **field.kwargs)
super(RecaptchaBaseForm, self).__init__(*args, **kwargs)
class RecaptchaForm(RecaptchaBaseForm, Form):
pass
def validate_recaptcha(remote_ip, challenge, response):
# Request validation from recaptcha.net
if challenge:
params = urllib.urlencode(dict(privatekey=settings.RECAPTCHA_PRIVATE_KEY,
remoteip=remote_ip,
challenge=challenge,
response=response))
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = httplib.HTTPConnection("api-verify.recaptcha.net")
conn.request("POST", "/verify", params, headers)
response = conn.getresponse()
if response.status == 200:
data = response.read()
else:
data = ''
conn.close()
# Validate based on response data
result = data.startswith('true')
error_code = ''
if not result:
bits = data.split('\n', 2)
if len(bits) > 1:
error_code = bits[1]
# Return dictionary
return dict(result=result,
error_code=error_code)
| bsd-3-clause | -1,978,053,276,252,926,500 | 34.141732 | 98 | 0.610576 | false |
leiferikb/bitpop | src/build/android/pylib/gtest/gtest_config.py | 9 | 1624 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'breakpad_unittests',
'cc_unittests',
'components_unittests',
'content_browsertests',
'content_unittests',
'events_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sandbox_linux_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
]
# Tests fail in component=shared_library build, which is required for ASan.
# http://crbug.com/344868
ASAN_EXCLUDED_TEST_SUITES = [
'breakpad_unittests',
'sandbox_linux_unittests'
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
| gpl-3.0 | -335,279,971,260,037,060 | 26.066667 | 75 | 0.687192 | false |
muennich/mupdf | scripts/cmapflatten.py | 1 | 3109 | # Parse a Uni* CMap file and flatten it.
#
# The Uni* CMap files only have 'cidchar' and 'cidrange' sections, never
# 'bfchar' or 'bfrange'.
import sys
def flattencmap(filename):
codespacerange = []
usecmap = ""
cmapname = ""
cmapversion = "1.0"
csi_registry = "(Adobe)"
csi_ordering = "(Unknown)"
csi_supplement = 1
wmode = 0
map = {}
def tocode(s):
if s[0] == '<' and s[-1] == '>':
return int(s[1:-1], 16)
return int(s, 10)
def map_cidchar(lo, v):
map[lo] = v
def map_cidrange(lo, hi, v):
while lo <= hi:
map[lo] = v
lo = lo + 1
v = v + 1
current = None
for line in open(filename, "r").readlines():
if line[0] == '%':
continue
line = line.strip().split()
if len(line) == 0:
continue
if line[0] == '/CMapVersion': cmapversion = line[1]
elif line[0] == '/CMapName': cmapname = line[1][1:]
elif line[0] == '/WMode': wmode = int(line[1])
elif line[0] == '/Registry': csi_registry = line[1]
elif line[0] == '/Ordering': csi_ordering = line[1]
elif line[0] == '/Supplement': csi_supplement = line[1]
elif len(line) > 1 and line[1] == 'usecmap': usecmap = line[0][1:]
elif len(line) > 1 and line[1] == 'begincodespacerange': current = 'codespacerange'
elif len(line) > 1 and line[1] == 'begincidrange': current = 'cidrange'
elif len(line) > 1 and line[1] == 'begincidchar': current = 'cidchar'
elif line[0].startswith("end"):
current = None
elif current == 'codespacerange' and len(line) == 2:
n, a, b = (len(line[0])-2)/2, tocode(line[0]), tocode(line[1])
codespacerange.append((n, a, b))
elif current == 'cidrange' and len(line) == 3:
a, b, c = tocode(line[0]), tocode(line[1]), tocode(line[2])
map_cidrange(a, b, c)
elif current == 'cidchar' and len(line) == 2:
a, b = tocode(line[0]), tocode(line[1])
map_cidchar(a, b)
# Print flattened CMap file
print "%!PS-Adobe-3.0 Resource-CMap"
print "%%DocumentNeededResources: procset (CIDInit)"
print "%%IncludeResource: procset (CIDInit)"
print "%%%%BeginResource: CMap (%s)" % cmapname
print "%%%%Version: %s" % cmapversion
print "%%EndComments"
print "/CIDInit /ProcSet findresource begin"
print "12 dict begin"
print "begincmap"
if usecmap: print "/%s usecmap" % usecmap
print "/CIDSystemInfo 3 dict dup begin"
print " /Registry %s def" % csi_registry
print " /Ordering %s def" % csi_ordering
print " /Supplement %s def" % csi_supplement
print "end def"
print "/CMapName /%s def" % cmapname
print "/CMapVersion %s def" % cmapversion
print "/CMapType 1 def"
print "/WMode %d def" % wmode
if len(codespacerange):
print "%d begincodespacerange" % len(codespacerange)
for r in codespacerange:
fmt = "<%%0%dx> <%%0%dx>" % (r[0]*2, r[0]*2)
print fmt % (r[1], r[2])
print "endcodespacerange"
keys = map.keys()
keys.sort()
print "%d begincidchar" % len(keys)
for code in keys:
v = map[code]
print "<%04x> %d" % (code, v)
print "endcidchar"
print "endcmap"
print "CMapName currentdict /CMap defineresource pop"
print "end"
print "end"
print "%%EndResource"
print "%%EOF"
for arg in sys.argv[1:]:
flattencmap(arg)
| agpl-3.0 | 1,733,319,341,587,167,200 | 27.787037 | 85 | 0.628498 | false |
AlexanderFabisch/scikit-learn | sklearn/preprocessing/imputation.py | 29 | 14119 | # Authors: Nicolas Tresegnie <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <imputation>`.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
force_all_finite=False, copy=self.copy)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES,
force_all_finite=False, copy=self.copy)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = astype(valid_statistics[indexes], X.dtype,
copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
| bsd-3-clause | 8,555,956,253,983,739,000 | 36.650667 | 79 | 0.531907 | false |
OSEHRA-Sandbox/VistA | Scripts/SplitZWR.py | 1 | 3389 | #!/usr/bin/env python
# Split a .zwr files into pieces of maximum size:
#
# python SplitZWR.py --size <MiB> *.zwr
#
# or
#
# ls *.zwr | python SplitZWR.py --size <MiB> --stdin
#
#---------------------------------------------------------------------------
# Copyright 2011 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import argparse
import os
import sys
class SplitZWR:
def __init__(self, filepath, maxSize):
self.maxSize = maxSize
self.dir = os.path.dirname(filepath)
nameSplit = os.path.basename(filepath).split('+',1)
if len(nameSplit) > 1:
self.num, self.name = nameSplit
else:
self.num=0
self.name=nameSplit[0]
self.input = open(filepath, 'r')
self.headers = []
while len(self.headers) < 2:
self.headers.append(self.input.readline())
self.hdrSize = sum([len(l) for l in self.headers])
self.outSize = self.maxSize
self.outFile = None
self.index = 0
def new_file(self):
self.index += 1
outName = '%s-%d+%s' % (self.num, self.index, self.name)
outPath = os.path.join(self.dir, outName)
self.outFile = open(outPath, 'w')
self.outFile.writelines(self.headers)
self.outSize = self.hdrSize
sys.stdout.write(' %s\n' % outPath)
def do_line(self, line):
if self.outSize + len(line) > self.maxSize:
self.new_file()
self.outSize += len(line)
self.outFile.write(line)
def run(self):
for line in self.input:
self.do_line(line)
def splitZWR(f, maxSize):
sys.stdout.write('Splitting "%s":\n' % f)
SplitZWR(f, maxSize).run()
os.remove(f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--size', dest='size', action='store',
type = int, required=True,
metavar='<MiB>', help='max output file size in MiB')
parser.add_argument('--stdin', dest='stdin',
action='store_const', const=True, default=False,
help='read files to split from standard input lines')
parser.add_argument('files', action='append', nargs='*', metavar='<files>',
help='files to split')
config = parser.parse_args()
maxSize = int(config.size) << 20
files = config.files[0]
if config.stdin:
files.extend([a.rstrip() for a in sys.stdin])
for f in files:
if "DD.zwr" in f:
continue
if f[-4:].lower() != '.zwr':
sys.stderr.write('Skipping non-.zwr file: %s\n' % f)
continue
if os.stat(f).st_size > maxSize:
splitZWR(f, maxSize)
if __name__ == '__main__':
main()
| apache-2.0 | -6,671,641,364,911,628,000 | 33.232323 | 79 | 0.567129 | false |
RealTimeWeb/wikisite | MoinMoin/script/old/migration/12_to_13_mig02.py | 1 | 6493 | #!/usr/bin/env python
"""
migration from moin 1.3 < patch-78 to moin 1.3 >= patch-78
* switch quoting mechanism from (xx)(xx) to (xxxx)
* charset isn't changed, it was utf-8 before and will be utf-8 after
Steps for a successful migration:
1. stop your wiki and make a backup
2. make a copy of the wiki's "data" directory to your working dir
3. run this script from your working dir
4. if there was no error, you will find:
data.pre-mig2 (the script renames your data directory copy to that name)
data (result, converted)
5. verify conversion results (number of pages, size of logs, attachments,
number of backup copies) - everything should be reasonable before
you proceed.
6. copy additional files from data.pre-mig2 to data (maybe intermaps, logs,
etc.). Be aware that the file contents AND file names of wiki content
may have changed, so DO NOT copy the cache/ directory, but let
the wiki recreate it.
7. replace the data directory your wiki uses with the data directory
you created by previous steps. DO NOT simply copy the converted stuff
into the original or you will duplicate pages and create chaos!
8. test it. if something has gone wrong, you still have your backup.
9. if you use dictionaries for spellchecking, you have to convert them
to config.charset, too. Remove your dict.cache before re-starting
your wiki.
@copyright: 2004 Thomas Waldmann
@license: GPL, see COPYING for details
"""
from_encoding = 'utf-8'
to_encoding = 'utf-8'
import os.path, sys, shutil, urllib
sys.path.insert(0, '../../../..')
from MoinMoin import wikiutil
from MoinMoin.script.migration.migutil import opj, listdir, copy_file, copy_dir
# this is a copy of the wikiutil.unquoteWikiname of moin--main--1.3--patch-77
def unquoteWikinameOld(filename, charsets=[from_encoding, ]):
"""
Return decoded original filename when given an encoded filename.
@param filename: encoded filename
@rtype: string
@return: decoded, original filename
"""
if isinstance(filename, type(u'')): # from some places we get called with unicode
filename = filename.encode(from_encoding)
fn = ''
i = 0
while i < len(filename):
c = filename[i]
if c == '(':
c1 = filename[i+1]
c2 = filename[i+2]
close = filename[i+3]
if close != ')':
raise Exception('filename encoding invalid')
i += 4
fn = fn + chr(16 * int(c1, 16) + int(c2, 16))
else:
fn = fn + c
i += 1
return wikiutil.decodeUserInput(fn, charsets)
def convert_string(str, enc_from, enc_to):
return str.decode(enc_from).encode(enc_to)
def qf_convert_string(str, enc_from, enc_to):
""" Convert filename from pre patch 78 quoting to new quoting
The old quoting function from patch 77 can convert name ONLY from
the old way to the new, so if you have a partially converted
directory, as it the situation as of moin--main--1.3--patch-86,
it does not work.
The new unquoting function is backward compatible, and can unquote
both post and pre patch 78 file names.
"""
str = wikiutil.unquoteWikiname(str, [enc_from])
str = wikiutil.quoteWikinameFS(str, enc_to)
return str
def convert_file(fname_from, fname_to, enc_from, enc_to):
print "%s -> %s" % (fname_from, fname_to)
file_from = open(fname_from)
file_to = open(fname_to, "w")
for line in file_from:
file_to.write(convert_string(line, enc_from, enc_to))
file_to.close()
file_from.close()
st = os.stat(fname_from)
os.utime(fname_to, (st.st_atime, st.st_mtime))
def convert_textdir(dir_from, dir_to, enc_from, enc_to, is_backupdir=0):
os.mkdir(dir_to)
for fname_from in listdir(dir_from):
if is_backupdir:
fname, timestamp = fname_from.split('.')
else:
fname = fname_from
fname = qf_convert_string(fname, enc_from, enc_to)
if is_backupdir:
fname_to = '.'.join([fname, timestamp])
else:
fname_to = fname
convert_file(opj(dir_from, fname_from), opj(dir_to, fname_to),
enc_from, enc_to)
def convert_pagedir(dir_from, dir_to, enc_from, enc_to):
os.mkdir(dir_to)
for dname_from in listdir(dir_from):
dname_to = qf_convert_string(dname_from, enc_from, enc_to)
print "%s -> %s" % (dname_from, dname_to)
shutil.copytree(opj(dir_from, dname_from), opj(dir_to, dname_to), 1)
try:
convert_editlog(opj(dir_from, dname_from, 'last-edited'),
opj(dir_to, dname_to, 'last-edited'),
enc_from, enc_to)
except IOError:
pass # we ignore if it doesnt exist
def convert_userdir(dir_from, dir_to, enc_from, enc_to):
os.mkdir(dir_to)
for fname in listdir(dir_from):
convert_file(opj(dir_from, fname), opj(dir_to, fname),
enc_from, enc_to)
def convert_editlog(log_from, log_to, enc_from, enc_to):
file_from = open(log_from)
file_to = open(log_to, "w")
for line in file_from:
fields = line.split('\t')
fields[0] = qf_convert_string(fields[0], enc_from, enc_to)
fields[5] = convert_string(fields[5], enc_from, enc_to)
line = '\t'.join(fields)
file_to.write(line)
origdir = 'data.pre-mig2'
# Backup original dir and create new empty dir
try:
os.rename('data', origdir)
os.mkdir('data')
except OSError:
print "You need to be in the directory where your copy of the 'data' directory is located."
sys.exit(1)
convert_textdir(opj(origdir, 'text'), opj('data', 'text'), from_encoding, to_encoding)
convert_textdir(opj(origdir, 'backup'), opj('data', 'backup'), from_encoding, to_encoding, 1)
convert_pagedir(opj(origdir, 'pages'), opj('data', 'pages'), from_encoding, to_encoding)
convert_userdir(opj(origdir, 'user'), opj('data', 'user'), from_encoding, to_encoding)
convert_editlog(opj(origdir, 'editlog'), opj('data', 'editlog'), from_encoding, to_encoding)
copy_file(opj(origdir, 'event.log'), opj('data', 'event.log'))
copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
| apache-2.0 | -4,561,852,729,684,848,000 | 36.316092 | 95 | 0.623287 | false |
jjmiranda/edx-platform | cms/envs/devstack_optimized.py | 23 | 1642 | """
Settings to run Studio in devstack using optimized static assets.
This configuration changes Studio to use the optimized static assets generated for testing,
rather than picking up the files directly from the source tree.
The following Paver command can be used to run Studio in optimized mode:
paver devstack studio --optimized
You can also generate the assets explicitly and then run Studio:
paver update_assets cms --settings=test_static_optimized
paver devstack studio --settings=devstack_optimized --fast
Note that changes to JavaScript assets will not be picked up automatically
as they are for non-optimized devstack. Instead, update_assets must be
invoked each time that changes have been made.
"""
########################## Devstack settings ###################################
from .devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
TEST_ROOT = REPO_ROOT / "test_root"
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Set REQUIRE_DEBUG to false so that it behaves like production
REQUIRE_DEBUG = False
# Fetch static files out of the pipeline's static root
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Serve static files at /static directly from the staticfiles directory under test root.
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = [
(TEST_ROOT / "staticfiles" / "cms").abspath(),
]
| agpl-3.0 | 6,952,518,904,967,960,000 | 34.695652 | 91 | 0.721681 | false |
ahamilton55/ansible | lib/ansible/modules/system/ping.py | 29 | 2109 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,821,988,215,486,247,000 | 30.014706 | 96 | 0.675676 | false |
CanonicalLtd/subiquity | subiquitycore/models/network.py | 1 | 16975 | # Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import enum
import ipaddress
import logging
import yaml
from socket import AF_INET, AF_INET6
import attr
from typing import List, Optional
from subiquitycore.gettext38 import pgettext
from subiquitycore import netplan
NETDEV_IGNORED_IFACE_TYPES = [
'lo', 'bridge', 'tun', 'tap', 'dummy', 'sit', 'can', '???'
]
NETDEV_ALLOWED_VIRTUAL_IFACE_TYPES = ['vlan', 'bond']
log = logging.getLogger('subiquitycore.models.network')
def addr_version(ip):
return ipaddress.ip_interface(ip).version
class NetDevAction(enum.Enum):
# Information about a network interface
INFO = pgettext("NetDevAction", "Info")
EDIT_WLAN = pgettext("NetDevAction", "Edit Wifi")
EDIT_IPV4 = pgettext("NetDevAction", "Edit IPv4")
EDIT_IPV6 = pgettext("NetDevAction", "Edit IPv6")
EDIT_BOND = pgettext("NetDevAction", "Edit bond")
ADD_VLAN = pgettext("NetDevAction", "Add a VLAN tag")
DELETE = pgettext("NetDevAction", "Delete")
def str(self):
return pgettext(type(self).__name__, self.value)
class DHCPState(enum.Enum):
PENDING = enum.auto()
TIMED_OUT = enum.auto()
RECONFIGURE = enum.auto()
CONFIGURED = enum.auto()
@attr.s(auto_attribs=True)
class DHCPStatus:
enabled: bool
state: Optional[DHCPState]
addresses: List[str]
@attr.s(auto_attribs=True)
class StaticConfig:
addresses: List[str] = attr.Factory(list)
gateway: Optional[str] = None
nameservers: List[str] = attr.Factory(list)
searchdomains: List[str] = attr.Factory(list)
@attr.s(auto_attribs=True)
class VLANConfig:
id: int
link: str
@attr.s(auto_attribs=True)
class WLANConfig:
ssid: str
psk: str
@attr.s(auto_attribs=True)
class WLANStatus:
config: WLANConfig
scan_state: Optional[str]
visible_ssids: List[str]
@attr.s(auto_attribs=True)
class BondConfig:
interfaces: List[str]
mode: str
xmit_hash_policy: Optional[str] = None
lacp_rate: Optional[str] = None
def to_config(self):
mode = self.mode
params = {
'mode': self.mode,
}
if mode in BondParameters.supports_xmit_hash_policy:
params['transmit-hash-policy'] = self.xmit_hash_policy
if mode in BondParameters.supports_lacp_rate:
params['lacp-rate'] = self.lacp_rate
return {
'interfaces': self.interfaces,
'parameters': params,
}
@attr.s(auto_attribs=True)
class NetDevInfo:
"""All the information about a NetworkDev that the view code needs."""
name: str
type: str
is_connected: bool
bond_master: Optional[str]
is_used: bool
disabled_reason: Optional[str]
hwaddr: Optional[str]
vendor: Optional[str]
model: Optional[str]
is_virtual: bool
has_config: bool
vlan: Optional[VLANConfig]
bond: Optional[BondConfig]
wlan: Optional[WLANConfig]
dhcp4: DHCPStatus
dhcp6: DHCPStatus
static4: StaticConfig
static6: StaticConfig
enabled_actions: List[NetDevAction]
class BondParameters:
# Just a place to hang various data about how bonds can be
# configured.
modes = [
'balance-rr',
'active-backup',
'balance-xor',
'broadcast',
'802.3ad',
'balance-tlb',
'balance-alb',
]
supports_xmit_hash_policy = {
'balance-xor',
'802.3ad',
'balance-tlb',
}
xmit_hash_policies = [
'layer2',
'layer2+3',
'layer3+4',
'encap2+3',
'encap3+4',
]
supports_lacp_rate = {
'802.3ad',
}
lacp_rates = [
'slow',
'fast',
]
class NetworkDev(object):
def __init__(self, model, name, typ):
self._model = model
self._name = name
self.type = typ
self.config = {}
self.info = None
self.disabled_reason = None
self.dhcp_events = {}
self._dhcp_state = {
4: None,
6: None,
}
def netdev_info(self) -> NetDevInfo:
if self.type == 'eth':
is_connected = bool(self.info.is_connected)
else:
is_connected = True
bond_master = None
for dev2 in self._model.get_all_netdevs():
if dev2.type != "bond":
continue
if self.name in dev2.config.get('interfaces', []):
bond_master = dev2.name
break
if self.type == 'bond' and self.config is not None:
params = self.config['parameters']
bond = BondConfig(
interfaces=self.config['interfaces'],
mode=params['mode'],
xmit_hash_policy=params.get('xmit-hash-policy'),
lacp_rate=params.get('lacp-rate'))
else:
bond = None
if self.type == 'vlan' and self.config is not None:
vlan = VLANConfig(id=self.config['id'], link=self.config['link'])
else:
vlan = None
if self.type == 'wlan':
ssid, psk = self.configured_ssid
wlan = WLANStatus(
config=WLANConfig(ssid=ssid, psk=psk),
scan_state=self.info.wlan['scan_state'],
visible_ssids=self.info.wlan['visible_ssids'])
else:
wlan = None
dhcp_addresses = self.dhcp_addresses()
configured_addresseses = {4: [], 6: []}
if self.config is not None:
for addr in self.config.get('addresses', []):
configured_addresseses[addr_version(addr)].append(addr)
ns = self.config.get('nameservers', {})
else:
ns = {}
dhcp_statuses = {}
static_configs = {}
for v in 4, 6:
dhcp_statuses[v] = DHCPStatus(
enabled=self.dhcp_enabled(v),
state=self._dhcp_state[v],
addresses=dhcp_addresses[v])
if self.config is not None:
gateway = self.config.get('gateway' + str(v))
else:
gateway = None
static_configs[v] = StaticConfig(
addresses=configured_addresseses[v],
gateway=gateway,
nameservers=ns.get('nameservers', []),
searchdomains=ns.get('search', []))
return NetDevInfo(
name=self.name,
type=self.type,
is_connected=is_connected,
vlan=vlan,
bond_master=bond_master,
bond=bond,
wlan=wlan,
dhcp4=dhcp_statuses[4],
dhcp6=dhcp_statuses[6],
static4=static_configs[4],
static6=static_configs[6],
is_used=self.is_used,
disabled_reason=self.disabled_reason,
enabled_actions=[
action for action in NetDevAction
if self.supports_action(action)
],
hwaddr=getattr(self.info, 'hwaddr', None),
vendor=getattr(self.info, 'vendor', None),
model=getattr(self.info, 'model', None),
is_virtual=self.is_virtual,
has_config=self.config is not None)
def dhcp_addresses(self):
r = {4: [], 6: []}
if self.info is not None:
for a in self.info.addresses.values():
if a.family == AF_INET:
v = 4
elif a.family == AF_INET6:
v = 6
else:
continue
if a.source == 'dhcp':
r[v].append(str(a.address))
return r
def dhcp_enabled(self, version):
if self.config is None:
return False
else:
return self.config.get('dhcp{v}'.format(v=version), False)
def dhcp_state(self, version):
if not self.config.get('dhcp{v}'.format(v=version), False):
return None
return self._dhcp_state[version]
def set_dhcp_state(self, version, state):
self._dhcp_state[version] = state
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
# If a virtual device that already exists is renamed, we need
# to create a dummy NetworkDev so that the existing virtual
# device is actually deleted when the config is applied.
if new_name != self.name and self.is_virtual:
if new_name in self._model.devices_by_name:
raise RuntimeError(
"renaming {old_name} over {new_name}".format(
old_name=self.name, new_name=new_name))
self._model.devices_by_name[new_name] = self
if self.info is not None:
dead_device = NetworkDev(self._model, self.name, self.type)
self._model.devices_by_name[self.name] = dead_device
dead_device.config = None
dead_device.info = self.info
self.info = None
self._name = new_name
def supports_action(self, action):
return getattr(self, "_supports_" + action.name)
@property
def configured_ssid(self):
for ssid, settings in self.config.get('access-points', {}).items():
psk = settings.get('password')
return ssid, psk
return None, None
def set_ssid_psk(self, ssid, psk):
aps = self.config.setdefault('access-points', {})
aps.clear()
if ssid is not None:
aps[ssid] = {}
if psk is not None:
aps[ssid]['password'] = psk
@property
def ifindex(self):
if self.info is not None:
return self.info.ifindex
else:
return None
@property
def is_virtual(self):
return self.type in NETDEV_ALLOWED_VIRTUAL_IFACE_TYPES
@property
def is_bond_slave(self):
for dev in self._model.get_all_netdevs():
if dev.type == "bond":
if self.name in dev.config.get('interfaces', []):
return True
return False
@property
def is_used(self):
for dev in self._model.get_all_netdevs():
if dev.type == "bond":
if self.name in dev.config.get('interfaces', []):
return True
if dev.type == "vlan":
if self.name == dev.config.get('link'):
return True
return False
@property
def actual_global_ip_addresses(self):
return [addr.ip for _, addr in sorted(self.info.addresses.items())
if addr.scope == "global"]
_supports_INFO = True
_supports_EDIT_WLAN = property(lambda self: self.type == "wlan")
_supports_EDIT_IPV4 = True
_supports_EDIT_IPV6 = True
_supports_EDIT_BOND = property(lambda self: self.type == "bond")
_supports_ADD_VLAN = property(
lambda self: self.type != "vlan" and not self.is_bond_slave)
_supports_DELETE = property(
lambda self: self.is_virtual and not self.is_used)
def remove_ip_networks_for_version(self, version):
self.config.pop('dhcp{v}'.format(v=version), None)
self.config.pop('gateway{v}'.format(v=version), None)
addrs = []
for ip in self.config.get('addresses', []):
if addr_version(ip) != version:
addrs.append(ip)
if addrs:
self.config['addresses'] = addrs
else:
self.config.pop('addresses', None)
class NetworkModel(object):
""" """
def __init__(self, project, support_wlan=True):
self.support_wlan = support_wlan
self.devices_by_name = {} # Maps interface names to NetworkDev
self.has_network = False
self.project = project
def parse_netplan_configs(self, netplan_root):
self.config = netplan.Config()
self.config.load_from_root(netplan_root)
def new_link(self, ifindex, link):
log.debug("new_link %s %s %s", ifindex, link.name, link.type)
if link.type in NETDEV_IGNORED_IFACE_TYPES:
return
if not self.support_wlan and link.type == "wlan":
return
if link.is_virtual and (
link.type not in NETDEV_ALLOWED_VIRTUAL_IFACE_TYPES):
return
dev = self.devices_by_name.get(link.name)
if dev is not None:
# XXX What to do if types don't match??
if dev.info is not None:
# This shouldn't happen! No sense getting too upset
# about if it does though.
pass
else:
dev.info = link
else:
config = self.config.config_for_device(link)
if link.is_virtual and not config:
# If we see a virtual device without there already
# being a config for it, we just ignore it.
return
dev = NetworkDev(self, link.name, link.type)
dev.info = link
dev.config = config
log.debug("new_link %s %s with config %s",
ifindex, link.name,
netplan.sanitize_interface_config(dev.config))
self.devices_by_name[link.name] = dev
return dev
def update_link(self, ifindex):
for name, dev in self.devices_by_name.items():
if dev.ifindex == ifindex:
return dev
def del_link(self, ifindex):
for name, dev in self.devices_by_name.items():
if dev.ifindex == ifindex:
dev.info = None
if dev.is_virtual:
# We delete all virtual devices before running netplan
# apply. If a device has been deleted in the UI, we set
# dev.config to None. Now it's actually gone, forget we
# ever knew it existed.
if dev.config is None:
del self.devices_by_name[name]
else:
# If a physical interface disappears on us, it's gone.
del self.devices_by_name[name]
return dev
def new_vlan(self, device_name, tag):
name = "{name}.{tag}".format(name=device_name, tag=tag)
dev = self.devices_by_name[name] = NetworkDev(self, name, 'vlan')
dev.config = {
'link': device_name,
'id': tag,
}
return dev
def new_bond(self, name, bond_config):
dev = self.devices_by_name[name] = NetworkDev(self, name, 'bond')
dev.config = bond_config.to_config()
return dev
def get_all_netdevs(self, include_deleted=False):
devs = [v for k, v in sorted(self.devices_by_name.items())]
if not include_deleted:
devs = [v for v in devs if v.config is not None]
return devs
def get_netdev_by_name(self, name):
return self.devices_by_name[name]
def stringify_config(self, config):
return '\n'.join([
"# This is the network config written by '{}'".format(
self.project),
yaml.dump(config, default_flow_style=False),
])
def render_config(self):
config = {
'network': {
'version': 2,
},
}
type_to_key = {
'eth': 'ethernets',
'bond': 'bonds',
'wlan': 'wifis',
'vlan': 'vlans',
}
for dev in self.get_all_netdevs():
key = type_to_key[dev.type]
configs = config['network'].setdefault(key, {})
if dev.config or dev.is_used:
configs[dev.name] = dev.config
return config
def render(self):
return {
'write_files': {
'etc_netplan_installer': {
'path': 'etc/netplan/00-installer-config.yaml',
'content': self.stringify_config(self.render_config()),
},
'nonet': {
'path': ('etc/cloud/cloud.cfg.d/'
'subiquity-disable-cloudinit-networking.cfg'),
'content': 'network: {config: disabled}\n',
},
},
}
| agpl-3.0 | -9,217,052,195,006,354,000 | 30.552045 | 77 | 0.549691 | false |
YzPaul3/h2o-3 | h2o-py/h2o/estimators/deeplearning.py | 1 | 27113 | from .estimator_base import H2OEstimator
class H2ODeepLearningEstimator(H2OEstimator):
"""Build a supervised Deep Neural Network model
Builds a feed-forward multilayer artificial neural network on an H2OFrame
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
overwrite_with_best_model : bool
If True, overwrite the final model with the best model found during training.
Defaults to True.
checkpoint : H2ODeepLearningModel, optional
Model checkpoint (either key or H2ODeepLearningModel) to resume training with.
use_all_factor_levels : bool
Use all factor levels of categorical variance. Otherwise the first factor level
is omitted (without loss of accuracy). Useful for variable importances and
auto-enabled for autoencoder..
standardize : bool
If enabled, automatically standardize the data. If disabled, the user must
provide properly scaled input data.
activation : str
A string indicating the activation function to use.
Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout",
"Maxout", or "MaxoutWithDropout"
hidden : list
Hidden layer sizes (e.g. [100,100])
epochs : float
How many times the dataset should be iterated (streamed), can be fractional
train_samples_per_iteration : int
Number of training samples (globally) per MapReduce iteration.
Special values are: 0 one epoch; -1 all available data
(e.g., replicated training data); or -2 auto-tuning (default)
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when
running single threaded
adaptive_rate : bool
Adaptive learning rate (ADAELTA)
rho : float
Adaptive learning rate time decay factor (similarity to prior updates)
epsilon : float
Adaptive learning rate parameter, similar to learn rate annealing during initial
training phase. Typical values are between 1.0e-10 and 1.0e-4
rate : float
Learning rate (higher => less stable, lower => slower convergence)
rate_annealing : float
Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
rate_decay : float
Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
momentum_start : float
Initial momentum at the beginning of training (try 0.5)
momentum_ramp : float
Number of training samples for which momentum increases
momentum_stable : float
Final momentum after the amp is over (try 0.99)
nesterov_accelerated_gradient : bool
Logical. Use Nesterov accelerated gradient (recommended)
input_dropout_ratio : float
A fraction of the features for each training row to be omitted from training in
order to improve generalization (dimension sampling).
hidden_dropout_ratios : float
Input layer dropout ratio (can improve generalization) specify one value per
hidden layer, defaults to 0.5
l1 : float
L1 regularization (can add stability and improve generalization,
causes many weights to become 0)
l2 : float
L2 regularization (can add stability and improve generalization,
causes many weights to be small)
max_w2 : float
Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
initial_weight_distribution : str
Can be "Uniform", "UniformAdaptive", or "Normal"
initial_weight_scale : str
Uniform: -value ... value, Normal: stddev
loss : str
Loss function: "Automatic", "CrossEntropy" (for classification only),
"Quadratic", "Absolute" (experimental) or "Huber" (experimental)
distribution : str
A character string. The distribution function of the response.
Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma",
"tweedie", "laplace", "huber", "quantile" or "gaussian"
quantile_alpha : float
Quantile (only for Quantile regression, must be between 0 and 1)
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
score_interval : int
Shortest time interval (in secs) between model scoring
score_training_samples : int
Number of training set samples for scoring (0 for all)
score_validation_samples : int
Number of validation set samples for scoring (0 for all)
score_duty_cycle : float
Maximum duty cycle fraction for scoring (lower: more training, higher: more
scoring)
classification_stop : float
Stopping criterion for classification error fraction on training data
(-1 to disable)
regression_stop : float
Stopping criterion for regression error (MSE) on training data (-1 to disable)
stopping_rounds : int
Early stopping based on convergence of stopping_metric.
Stop if simple moving average of length k of the stopping_metric does not
improve (by stopping_tolerance) for k=stopping_rounds scoring events.
Can only trigger after at least 2k scoring events. Use 0 to disable.
stopping_metric : str
Metric to use for convergence checking, only for _stopping_rounds > 0
Can be one of "AUTO", "deviance", "logloss", "MSE", "AUC", "r2",
"misclassification".
stopping_tolerance : float
Relative tolerance for metric-based stopping criterion (stop if relative
improvement is not at least this much)
quiet_mode : bool
Enable quiet mode for less output to standard output
max_confusion_matrix_size : int
Max. size (number of classes) for confusion matrices to be shown
max_hit_ratio_k : float
Max number (top K) of predictions to use for hit ratio computation
(for multi-class only, 0 to disable)
balance_classes : bool
Balance training data class counts via over/under-sampling (for imbalanced data)
class_sampling_factors : list
Desired over/under-sampling ratios per class (in lexicographic order).
If not specified, sampling factors will be automatically computed to obtain
class balance during training. Requires balance_classes.
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts
(can be less than 1.0)
score_validation_sampling :
Method used to sample validation dataset for scoring
diagnostics : bool
Enable diagnostics for hidden layers
variable_importances : bool
Compute variable importances for input features (Gedeon method) - can be slow
for large networks)
fast_mode : bool
Enable fast mode (minor approximations in back-propagation)
ignore_const_cols : bool
Ignore constant columns (no information can be gained anyway)
force_load_balance : bool
Force extra load balancing to increase training speed for small datasets
(to keep all cores busy)
replicate_training_data : bool
Replicate the entire training dataset onto every node for faster training
single_node_mode : bool
Run on a single node for fine-tuning of model parameters
shuffle_training_data : bool
Enable shuffling of training data (recommended if training data is replicated
and train_samples_per_iteration is close to \eqn{numRows*numNodes
sparse : bool
Sparse data handling (Experimental)
col_major : bool
Use a column major weight matrix for input layer. Can speed up forward
propagation, but might slow down back propagation (Experimental)
average_activation : float
Average activation for sparse auto-encoder (Experimental)
sparsity_beta : bool
Sparsity regularization (Experimental)
max_categorical_features : int
Max. number of categorical features, enforced via hashing Experimental)
reproducible : bool
Force reproducibility on small data (will be slow - only uses 1 thread)
missing_values_handling : str
Handling of missing values. Either "Skip" or "MeanImputation".
export_weights_and_biases : bool
Whether to export Neural Network weights and biases to H2O Frames"
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified
Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
keep_cross_validation_fold_assignment : bool
Whether to keep the cross-validation fold assignment.
Examples
--------
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
>>> ml.init()
>>> rows=[[1,2,3,4,0],[2,1,2,4,1],[2,1,4,2,1],[0,1,2,34,1],[2,3,4,1,0]]*50
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2ODeepLearningEstimator()
>>> model.train(x=range(4), y=4, training_frame=fr)
"""
def __init__(self, model_id=None, overwrite_with_best_model=None, checkpoint=None,
pretrained_autoencoder=None, use_all_factor_levels=None,
standardize=None, activation=None, hidden=None, epochs=None,
train_samples_per_iteration=None, seed=None, adaptive_rate=None,
rho=None, epsilon=None, rate=None, rate_annealing=None, rate_decay=None,
momentum_start=None, momentum_ramp=None, momentum_stable=None,
nesterov_accelerated_gradient=None, input_dropout_ratio=None,
hidden_dropout_ratios=None, l1=None, l2=None, max_w2=None,
initial_weight_distribution=None, initial_weight_scale=None, loss=None,
distribution=None, quantile_alpha=None, tweedie_power=None,
score_interval=None, score_training_samples=None,
score_validation_samples=None, score_duty_cycle=None,
classification_stop=None, regression_stop=None, quiet_mode=None,
max_confusion_matrix_size=None, max_hit_ratio_k=None, balance_classes=None,
class_sampling_factors=None, max_after_balance_size=None,
score_validation_sampling=None, diagnostics=None,
variable_importances=None, fast_mode=None, ignore_const_cols=None,
force_load_balance=None, replicate_training_data=None,
single_node_mode=None, shuffle_training_data=None, sparse=None,
col_major=None, average_activation=None, sparsity_beta=None,
max_categorical_features=None, missing_values_handling=None,
reproducible=None, export_weights_and_biases=None, nfolds=None,
fold_assignment=None, keep_cross_validation_predictions=None,
keep_cross_validation_fold_assignment=None,
stopping_rounds=None, stopping_metric=None, stopping_tolerance=None,
initial_weights=None, initial_biases=None):
super(H2ODeepLearningEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.items() if k!="self"}
self._parms["autoencoder"] = isinstance(self, H2OAutoEncoderEstimator)
@property
def overwrite_with_best_model(self):
return self._parms["overwrite_with_best_model"]
@overwrite_with_best_model.setter
def overwrite_with_best_model(self, value):
self._parms["overwrite_with_best_model"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
@property
def pretrained_autoencoder(self):
return self._parms["pretrained_autoencoder"]
@pretrained_autoencoder.setter
def pretrained_autoencoder(self, value):
self._parms["pretrained_autoencoder"] = value
@property
def use_all_factor_levels(self):
return self._parms["use_all_factor_levels"]
@use_all_factor_levels.setter
def use_all_factor_levels(self, value):
self._parms["use_all_factor_levels"] = value
@property
def standardize(self):
return self._parms["standardize"]
@standardize.setter
def standardize(self, value):
self._parms["standardize"] = value
@property
def activation(self):
return self._parms["activation"]
@activation.setter
def activation(self, value):
self._parms["activation"] = value
@property
def hidden(self):
return self._parms["hidden"]
@hidden.setter
def hidden(self, value):
self._parms["hidden"] = value
@property
def epochs(self):
return self._parms["epochs"]
@epochs.setter
def epochs(self, value):
self._parms["epochs"] = value
@property
def train_samples_per_iteration(self):
return self._parms["train_samples_per_iteration"]
@train_samples_per_iteration.setter
def train_samples_per_iteration(self, value):
self._parms["train_samples_per_iteration"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def adaptive_rate(self):
return self._parms["adaptive_rate"]
@adaptive_rate.setter
def adaptive_rate(self, value):
self._parms["adaptive_rate"] = value
@property
def rho(self):
return self._parms["rho"]
@rho.setter
def rho(self, value):
self._parms["rho"] = value
@property
def epsilon(self):
return self._parms["epsilon"]
@epsilon.setter
def epsilon(self, value):
self._parms["epsilon"] = value
@property
def rate(self):
return self._parms["rate"]
@rate.setter
def rate(self, value):
self._parms["rate"] = value
@property
def rate_annealing(self):
return self._parms["rate_annealing"]
@rate_annealing.setter
def rate_annealing(self, value):
self._parms["rate_annealing"] = value
@property
def rate_decay(self):
return self._parms["rate_decay"]
@rate_decay.setter
def rate_decay(self, value):
self._parms["rate_decay"] = value
@property
def momentum_start(self):
return self._parms["momentum_start"]
@momentum_start.setter
def momentum_start(self, value):
self._parms["momentum_start"] = value
@property
def momentum_ramp(self):
return self._parms["momentum_ramp"]
@momentum_ramp.setter
def momentum_ramp(self, value):
self._parms["momentum_ramp"] = value
@property
def momentum_stable(self):
return self._parms["momentum_stable"]
@momentum_stable.setter
def momentum_stable(self, value):
self._parms["momentum_stable"] = value
@property
def nesterov_accelerated_gradient(self):
return self._parms["nesterov_accelerated_gradient"]
@nesterov_accelerated_gradient.setter
def nesterov_accelerated_gradient(self, value):
self._parms["nesterov_accelerated_gradient"] = value
@property
def input_dropout_ratio(self):
return self._parms["input_dropout_ratio"]
@input_dropout_ratio.setter
def input_dropout_ratio(self, value):
self._parms["input_dropout_ratio"] = value
@property
def hidden_dropout_ratios(self):
return self._parms["hidden_dropout_ratios"]
@hidden_dropout_ratios.setter
def hidden_dropout_ratios(self, value):
self._parms["hidden_dropout_ratios"] = value
@property
def l1(self):
return self._parms["l1"]
@l1.setter
def l1(self, value):
self._parms["l1"] = value
@property
def l2(self):
return self._parms["l2"]
@l2.setter
def l2(self, value):
self._parms["l2"] = value
@property
def max_w2(self):
return self._parms["max_w2"]
@max_w2.setter
def max_w2(self, value):
self._parms["max_w2"] = value
@property
def initial_weight_distribution(self):
return self._parms["initial_weight_distribution"]
@initial_weight_distribution.setter
def initial_weight_distribution(self, value):
self._parms["initial_weight_distribution"] = value
@property
def initial_weight_scale(self):
return self._parms["initial_weight_scale"]
@initial_weight_scale.setter
def initial_weight_scale(self, value):
self._parms["initial_weight_scale"] = value
@property
def loss(self):
return self._parms["loss"]
@loss.setter
def loss(self, value):
self._parms["loss"] = value
@property
def distribution(self):
return self._parms["distribution"]
@distribution.setter
def distribution(self, value):
self._parms["distribution"] = value
@property
def quantile_alpha(self):
return self._parms["quantile_alpha"]
@quantile_alpha.setter
def quantile_alpha(self, value):
self._parms["quantile_alpha"] = value
@property
def tweedie_power(self):
return self._parms["tweedie_power"]
@tweedie_power.setter
def tweedie_power(self, value):
self._parms["tweedie_power"] = value
@property
def score_interval(self):
return self._parms["score_interval"]
@score_interval.setter
def score_interval(self, value):
self._parms["score_interval"] = value
@property
def score_training_samples(self):
return self._parms["score_training_samples"]
@score_training_samples.setter
def score_training_samples(self, value):
self._parms["score_training_samples"] = value
@property
def score_validation_samples(self):
return self._parms["score_validation_samples"]
@score_validation_samples.setter
def score_validation_samples(self, value):
self._parms["score_validation_samples"] = value
@property
def score_duty_cycle(self):
return self._parms["score_duty_cycle"]
@score_duty_cycle.setter
def score_duty_cycle(self, value):
self._parms["score_duty_cycle"] = value
@property
def classification_stop(self):
return self._parms["classification_stop"]
@classification_stop.setter
def classification_stop(self, value):
self._parms["classification_stop"] = value
@property
def regression_stop(self):
return self._parms["regression_stop"]
@regression_stop.setter
def regression_stop(self, value):
self._parms["regression_stop"] = value
@property
def stopping_rounds(self):
return self._parms["stopping_rounds"]
@stopping_rounds.setter
def stopping_rounds(self, value):
self._parms["stopping_rounds"] = value
@property
def stopping_metric(self):
return self._parms["stopping_metric"]
@stopping_metric.setter
def stopping_metric(self, value):
self._parms["stopping_metric"] = value
@property
def stopping_tolerance(self):
return self._parms["stopping_tolerance"]
@stopping_tolerance.setter
def stopping_tolerance(self, value):
self._parms["stopping_tolerance"] = value
@property
def quiet_mode(self):
return self._parms["quiet_mode"]
@quiet_mode.setter
def quiet_mode(self, value):
self._parms["quiet_mode"] = value
@property
def max_confusion_matrix_size(self):
return self._parms["max_confusion_matrix_size"]
@max_confusion_matrix_size.setter
def max_confusion_matrix_size(self, value):
self._parms["max_confusion_matrix_size"] = value
@property
def max_hit_ratio_k(self):
return self._parms["max_hit_ratio_k"]
@max_hit_ratio_k.setter
def max_hit_ratio_k(self, value):
self._parms["max_hit_ratio_k"] = value
@property
def balance_classes(self):
return self._parms["balance_classes"]
@balance_classes.setter
def balance_classes(self, value):
self._parms["balance_classes"] = value
@property
def class_sampling_factors(self):
return self._parms["class_sampling_factors"]
@class_sampling_factors.setter
def class_sampling_factors(self, value):
self._parms["class_sampling_factors"] = value
@property
def max_after_balance_size(self):
return self._parms["max_after_balance_size"]
@max_after_balance_size.setter
def max_after_balance_size(self, value):
self._parms["max_after_balance_size"] = value
@property
def score_validation_sampling(self):
return self._parms["score_validation_sampling"]
@score_validation_sampling.setter
def score_validation_sampling(self, value):
self._parms["score_validation_sampling"] = value
@property
def diagnostics(self):
return self._parms["diagnostics"]
@diagnostics.setter
def diagnostics(self, value):
self._parms["diagnostics"] = value
@property
def variable_importances(self):
return self._parms["variable_importances"]
@variable_importances.setter
def variable_importances(self, value):
self._parms["variable_importances"] = value
@property
def fast_mode(self):
return self._parms["fast_mode"]
@fast_mode.setter
def fast_mode(self, value):
self._parms["fast_mode"] = value
@property
def ignore_const_cols(self):
return self._parms["ignore_const_cols"]
@ignore_const_cols.setter
def ignore_const_cols(self, value):
self._parms["ignore_const_cols"] = value
@property
def force_load_balance(self):
return self._parms["force_load_balance"]
@force_load_balance.setter
def force_load_balance(self, value):
self._parms["force_load_balance"] = value
@property
def replicate_training_data(self):
return self._parms["replicate_training_data"]
@replicate_training_data.setter
def replicate_training_data(self, value):
self._parms["replicate_training_data"] = value
@property
def single_node_mode(self):
return self._parms["single_node_mode"]
@single_node_mode.setter
def single_node_mode(self, value):
self._parms["single_node_mode"] = value
@property
def shuffle_training_data(self):
return self._parms["shuffle_training_data"]
@shuffle_training_data.setter
def shuffle_training_data(self, value):
self._parms["shuffle_training_data"] = value
@property
def sparse(self):
return self._parms["sparse"]
@sparse.setter
def sparse(self, value):
self._parms["sparse"] = value
@property
def col_major(self):
return self._parms["col_major"]
@col_major.setter
def col_major(self, value):
self._parms["col_major"] = value
@property
def average_activation(self):
return self._parms["average_activation"]
@average_activation.setter
def average_activation(self, value):
self._parms["average_activation"] = value
@property
def sparsity_beta(self):
return self._parms["sparsity_beta"]
@sparsity_beta.setter
def sparsity_beta(self, value):
self._parms["sparsity_beta"] = value
@property
def max_categorical_features(self):
return self._parms["max_categorical_features"]
@max_categorical_features.setter
def max_categorical_features(self, value):
self._parms["max_categorical_features"] = value
@property
def missing_values_handling(self):
return self._parms["missing_values_handling"]
@missing_values_handling.setter
def missing_values_handling(self, value):
self._parms["missing_values_handling"] = value
@property
def reproducible(self):
return self._parms["reproducible"]
@reproducible.setter
def reproducible(self, value):
self._parms["reproducible"] = value
@property
def export_weights_and_biases(self):
return self._parms["export_weights_and_biases"]
@export_weights_and_biases.setter
def export_weights_and_biases(self, value):
self._parms["export_weights_and_biases"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def keep_cross_validation_fold_assignment(self):
return self._parms["keep_cross_validation_fold_assignment"]
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, value):
self._parms["keep_cross_validation_fold_assignment"] = value
@property
def initial_weights(self):
return self._parms["initial_weights"]
@initial_weights.setter
def initial_weights(self, value):
self._parms["initial_weights"] = value
@property
def initial_biases(self):
return self._parms["initial_biases"]
@initial_biases.setter
def initial_biases(self, value):
self._parms["initial_biases"] = value
class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
"""
Examples
--------
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> ml.init()
>>> rows=[[1,2,3,4,0]*50,[2,1,2,4,1]*50,[2,1,4,2,1]*50,[0,1,2,34,1]*50,[2,3,4,1,0]*50]
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2OAutoEncoderEstimator()
>>> model.train(x=range(4), training_frame=fr)
"""
pass | apache-2.0 | -5,524,750,572,744,844,000 | 30.749415 | 94 | 0.6408 | false |
yiheng/BigDL | spark/dl/src/test/resources/tf/models/temporal_convolution.py | 9 | 1651 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from sys import argv
from util import run_model
def main():
"""
You can also run these commands manually to generate the pb file
1. git clone https://github.com/tensorflow/models.git
2. export PYTHONPATH=Path_to_your_model_folder
3. python temporal_convolution.py
"""
tf.set_random_seed(1024)
input_width = 32
input_channel = 3
inputs = tf.Variable(tf.random_uniform((1, input_width, input_channel)), name='input')
inputs = tf.identity(inputs, "input_node")
filter_width = 4
output_channels = 6
filters = tf.Variable(tf.random_uniform((filter_width, input_channel, output_channels)))
conv_out = tf.nn.conv1d(inputs, filters, stride=1, padding="VALID")
bias = tf.Variable(tf.zeros([output_channels]))
output = tf.nn.tanh(tf.nn.bias_add(conv_out, bias), name="output")
net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
run_model(net_outputs, argv[1], backward=(argv[3] == 'True'))
if __name__ == "__main__":
main() | apache-2.0 | -6,083,195,665,722,011,000 | 35.711111 | 97 | 0.697759 | false |
mspark93/VTK | Rendering/Core/Testing/Python/TestOutOfRangeDiscretizableColorTransferFunction.py | 9 | 1421 | #!/usr/bin/env python
import sys
import vtk
from vtk.test import Testing
useBelowRangeColor = 0
if sys.argv.count("--useBelowRangeColor") > 0:
useBelowRangeColor = 1
useAboveRangeColor = 0
if sys.argv.count("--useAboveRangeColor") > 0:
useAboveRangeColor = 1
cmap = vtk.vtkDiscretizableColorTransferFunction()
cmap.AddRGBPoint(-.4, 0.8, 0.8, 0.8)
cmap.AddRGBPoint(0.4, 1, 0, 0)
cmap.SetUseBelowRangeColor(useBelowRangeColor)
cmap.SetBelowRangeColor(0.0, 1.0, 0.0)
cmap.SetUseAboveRangeColor(useAboveRangeColor)
cmap.SetAboveRangeColor(1.0, 1.0, 0.0)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(32)
sphere.SetThetaResolution(32)
sphere.Update()
pd = sphere.GetOutput().GetPointData()
for i in range(pd.GetNumberOfArrays()):
print(pd.GetArray(i).GetName())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
mapper.SetScalarModeToUsePointFieldData()
mapper.SelectColorArray("Normals")
mapper.ColorByArrayComponent("Normals", 0)
mapper.SetLookupTable(cmap)
mapper.UseLookupTableScalarRangeOn()
mapper.InterpolateScalarsBeforeMappingOn()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.ResetCamera()
renderer.ResetCameraClippingRange()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
| bsd-3-clause | -3,426,403,097,778,588,000 | 25.314815 | 50 | 0.790992 | false |
pyrocko/pyrocko | src/scenario/targets/gnss_campaign.py | 1 | 3991 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
from __future__ import absolute_import, division, print_function
import logging
import os.path as op
import numpy as num
from pyrocko import gf, util
from pyrocko.guts import Float
from .base import TargetGenerator, NoiseGenerator
from ..station import RandomStationGenerator, StationGenerator
DEFAULT_STORE_ID = 'ak135_static'
logger = logging.getLogger('pyrocko.scenario.targets.gnss_campaign')
guts_prefix = 'pf.scenario'
class GPSNoiseGenerator(NoiseGenerator):
measurement_duarion_days = Float.T(
default=2.,
help='Measurement duration in days')
def add_noise(self, campaign):
# https://www.nat-hazards-earth-syst-sci.net/15/875/2015/nhess-15-875-2015.pdf
waterlevel = 1. - (.99 + .0015 * self.measurement_duarion_days) # noqa
logger.warning('GNSSNoiseGenerator is a work-in-progress!')
for ista, sta in enumerate(campaign.stations):
pass
# rstate = self.get_rstate(ista)
# sta.north.sigma = 8e-3
# sta.east.sigma = 8e-3
# sta.north.shift += rstate.normal(0., sta.north.sigma)
# sta.east.shift += rstate.normal(0., sta.east.sigma)
class GNSSCampaignGenerator(TargetGenerator):
station_generator = StationGenerator.T(
default=RandomStationGenerator(
network_name='GN',
channels=None),
help='The StationGenerator for creating the stations.')
noise_generator = NoiseGenerator.T(
default=GPSNoiseGenerator.D(),
optional=True,
help='Add Synthetic noise to the GNSS displacements.')
store_id = gf.StringID.T(
default=DEFAULT_STORE_ID,
help='The GF store to use for forward-calculations.')
def get_stations(self):
return self.station_generator.get_stations()
def get_targets(self):
stations = self.get_stations()
lats = num.array([s.lat for s in stations])
lons = num.array([s.lon for s in stations])
target = gf.GNSSCampaignTarget(
lats=lats,
lons=lons,
store_id=self.store_id)
return [target]
def get_gnss_campaigns(self, engine, sources, tmin=None, tmax=None):
try:
resp = engine.process(
sources,
self.get_targets(),
nthreads=0)
except gf.meta.OutOfBounds:
logger.warning('Could not calculate GNSS displacements'
' - the GF store\'s extend is too small!')
return []
campaigns = [r.campaign for r in resp.static_results()]
stacked_campaign = campaigns[0]
stacked_campaign.name = 'Scenario Campaign'
for camp in campaigns[1:]:
for ista, sta in enumerate(camp.stations):
stacked_campaign.stations[ista].north.shift += sta.north.shift
stacked_campaign.stations[ista].east.shift += sta.east.shift
stacked_campaign.stations[ista].up.shift += sta.up.shift
for ista, sta in enumerate(stacked_campaign.stations):
sta.code = 'SY%02d' % (ista + 1)
if self.noise_generator is not None:
self.noise_generator.add_noise(stacked_campaign)
return [stacked_campaign]
def ensure_data(self, engine, sources, path, tmin=None, tmax=None):
path_gnss = op.join(path, 'gnss')
util.ensuredir(path_gnss)
fn = op.join(path_gnss,
'campaign-%s.yml' % self.station_generator.network_name)
if op.exists(fn):
return
campaigns = self.get_gnss_campaigns(engine, sources, tmin, tmax)
with open(fn, 'w') as f:
for camp in campaigns:
camp.dump(stream=f)
def add_map_artists(self, engine, sources, automap):
automap.add_gnss_campaign(self.get_gnss_campaigns(engine, sources)[0])
| gpl-3.0 | 6,038,908,862,209,002,000 | 31.983471 | 86 | 0.614132 | false |
wxs/keras | tests/auto/test_loss_weighting.py | 31 | 5537 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1336) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
import unittest
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
class_weight = dict([(i, standard_weight) for i in range(nb_classes)])
class_weight[weighted_class] = high_weight
sample_weight = np.ones((y_train.shape[0])) * standard_weight
sample_weight[y_train == weighted_class] = high_weight
def create_sequential_model():
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
return model
def create_graph_model():
model = Graph()
model.add_input(name='input')
model.add_node(Dense(784, 50, activation='relu'), name='d1', input='input')
model.add_node(Dense(50, 10, activation='softmax'), name='d2', input='d1')
model.add_output(name='output', input='d2')
return model
def _test_weights_sequential(model, class_weight=None, sample_weight=None):
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0,
class_weight=class_weight, sample_weight=sample_weight)
model.train_on_batch(X_train[:32], Y_train[:32],
class_weight=class_weight, sample_weight=sample_weight[:32] if sample_weight is not None else None)
model.test_on_batch(X_train[:32], Y_train[:32],
sample_weight=sample_weight[:32] if sample_weight is not None else None)
score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
return score
def _test_weights_graph(model, class_weight=None, sample_weight=None):
model.fit({'input': X_train, 'output': Y_train}, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0,
class_weight={'output': class_weight}, sample_weight={'output': sample_weight})
model.train_on_batch({'input': X_train[:32], 'output': Y_train[:32]},
class_weight={'output': class_weight}, sample_weight={'output': sample_weight[:32] if sample_weight is not None else None})
model.test_on_batch({'input': X_train[:32], 'output': Y_train[:32]},
sample_weight={'output': sample_weight[:32] if sample_weight is not None else None})
score = model.evaluate({'input': X_test[test_ids, :], 'output': Y_test[test_ids, :]}, verbose=0)
return score
class TestLossWeighting(unittest.TestCase):
def test_sequential(self):
for loss in ['mae', 'mse', 'categorical_crossentropy']:
print('loss:', loss)
print('sequential')
# no weights: reference point
model = create_sequential_model()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
standard_score = _test_weights_sequential(model)
# test class_weight
model = create_sequential_model()
model.compile(loss=loss, optimizer='rmsprop')
score = _test_weights_sequential(model, class_weight=class_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
# test sample_weight
model = create_sequential_model()
model.compile(loss=loss, optimizer='rmsprop')
score = _test_weights_sequential(model, sample_weight=sample_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
def test_graph(self):
for loss in ['mae', 'mse', 'categorical_crossentropy']:
print('loss:', loss)
print('graph')
# no weights: reference point
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
standard_score = _test_weights_graph(model)
# test class_weight
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
score = _test_weights_graph(model, class_weight=class_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
# test sample_weight
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
score = _test_weights_graph(model, sample_weight=sample_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
if __name__ == '__main__':
print('Test class_weight and sample_weight')
unittest.main()
| mit | 4,567,960,397,381,501,000 | 41.922481 | 148 | 0.641864 | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/models/__init__.py | 1 | 9184 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AliasPathType
from ._models_py3 import AliasType
from ._models_py3 import BasicDependency
from ._models_py3 import ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import DebugSetting
from ._models_py3 import Dependency
from ._models_py3 import Deployment
from ._models_py3 import DeploymentExportResult
from ._models_py3 import DeploymentExtended
from ._models_py3 import DeploymentExtendedFilter
from ._models_py3 import DeploymentListResult
from ._models_py3 import DeploymentOperation
from ._models_py3 import DeploymentOperationProperties
from ._models_py3 import DeploymentOperationsListResult
from ._models_py3 import DeploymentProperties
from ._models_py3 import DeploymentPropertiesExtended
from ._models_py3 import DeploymentValidateResult
from ._models_py3 import DeploymentWhatIf
from ._models_py3 import DeploymentWhatIfProperties
from ._models_py3 import DeploymentWhatIfSettings
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import ExportTemplateRequest
from ._models_py3 import GenericResource
from ._models_py3 import GenericResourceExpanded
from ._models_py3 import GenericResourceFilter
from ._models_py3 import HttpMessage
from ._models_py3 import Identity
from ._models_py3 import OnErrorDeployment
from ._models_py3 import OnErrorDeploymentExtended
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ParametersLink
from ._models_py3 import Plan
from ._models_py3 import Provider
from ._models_py3 import ProviderListResult
from ._models_py3 import ProviderResourceType
from ._models_py3 import Resource
from ._models_py3 import ResourceGroup
from ._models_py3 import ResourceGroupExportResult
from ._models_py3 import ResourceGroupFilter
from ._models_py3 import ResourceGroupListResult
from ._models_py3 import ResourceGroupPatchable
from ._models_py3 import ResourceGroupProperties
from ._models_py3 import ResourceListResult
from ._models_py3 import ResourceProviderOperationDisplayProperties
from ._models_py3 import ResourcesMoveInfo
from ._models_py3 import ScopedDeployment
from ._models_py3 import Sku
from ._models_py3 import SubResource
from ._models_py3 import TagCount
from ._models_py3 import TagDetails
from ._models_py3 import TagValue
from ._models_py3 import TagsListResult
from ._models_py3 import TargetResource
from ._models_py3 import TemplateHashResult
from ._models_py3 import TemplateLink
from ._models_py3 import WhatIfChange
from ._models_py3 import WhatIfOperationResult
from ._models_py3 import WhatIfPropertyChange
from ._models_py3 import ZoneMapping
except (SyntaxError, ImportError):
from ._models import AliasPathType # type: ignore
from ._models import AliasType # type: ignore
from ._models import BasicDependency # type: ignore
from ._models import ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties # type: ignore
from ._models import DebugSetting # type: ignore
from ._models import Dependency # type: ignore
from ._models import Deployment # type: ignore
from ._models import DeploymentExportResult # type: ignore
from ._models import DeploymentExtended # type: ignore
from ._models import DeploymentExtendedFilter # type: ignore
from ._models import DeploymentListResult # type: ignore
from ._models import DeploymentOperation # type: ignore
from ._models import DeploymentOperationProperties # type: ignore
from ._models import DeploymentOperationsListResult # type: ignore
from ._models import DeploymentProperties # type: ignore
from ._models import DeploymentPropertiesExtended # type: ignore
from ._models import DeploymentValidateResult # type: ignore
from ._models import DeploymentWhatIf # type: ignore
from ._models import DeploymentWhatIfProperties # type: ignore
from ._models import DeploymentWhatIfSettings # type: ignore
from ._models import ErrorAdditionalInfo # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ExportTemplateRequest # type: ignore
from ._models import GenericResource # type: ignore
from ._models import GenericResourceExpanded # type: ignore
from ._models import GenericResourceFilter # type: ignore
from ._models import HttpMessage # type: ignore
from ._models import Identity # type: ignore
from ._models import OnErrorDeployment # type: ignore
from ._models import OnErrorDeploymentExtended # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import ParametersLink # type: ignore
from ._models import Plan # type: ignore
from ._models import Provider # type: ignore
from ._models import ProviderListResult # type: ignore
from ._models import ProviderResourceType # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceGroup # type: ignore
from ._models import ResourceGroupExportResult # type: ignore
from ._models import ResourceGroupFilter # type: ignore
from ._models import ResourceGroupListResult # type: ignore
from ._models import ResourceGroupPatchable # type: ignore
from ._models import ResourceGroupProperties # type: ignore
from ._models import ResourceListResult # type: ignore
from ._models import ResourceProviderOperationDisplayProperties # type: ignore
from ._models import ResourcesMoveInfo # type: ignore
from ._models import ScopedDeployment # type: ignore
from ._models import Sku # type: ignore
from ._models import SubResource # type: ignore
from ._models import TagCount # type: ignore
from ._models import TagDetails # type: ignore
from ._models import TagValue # type: ignore
from ._models import TagsListResult # type: ignore
from ._models import TargetResource # type: ignore
from ._models import TemplateHashResult # type: ignore
from ._models import TemplateLink # type: ignore
from ._models import WhatIfChange # type: ignore
from ._models import WhatIfOperationResult # type: ignore
from ._models import WhatIfPropertyChange # type: ignore
from ._models import ZoneMapping # type: ignore
from ._resource_management_client_enums import (
ChangeType,
DeploymentMode,
OnErrorDeploymentType,
PropertyChangeType,
ResourceIdentityType,
WhatIfResultFormat,
)
__all__ = [
'AliasPathType',
'AliasType',
'BasicDependency',
'ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties',
'DebugSetting',
'Dependency',
'Deployment',
'DeploymentExportResult',
'DeploymentExtended',
'DeploymentExtendedFilter',
'DeploymentListResult',
'DeploymentOperation',
'DeploymentOperationProperties',
'DeploymentOperationsListResult',
'DeploymentProperties',
'DeploymentPropertiesExtended',
'DeploymentValidateResult',
'DeploymentWhatIf',
'DeploymentWhatIfProperties',
'DeploymentWhatIfSettings',
'ErrorAdditionalInfo',
'ErrorResponse',
'ExportTemplateRequest',
'GenericResource',
'GenericResourceExpanded',
'GenericResourceFilter',
'HttpMessage',
'Identity',
'OnErrorDeployment',
'OnErrorDeploymentExtended',
'Operation',
'OperationDisplay',
'OperationListResult',
'ParametersLink',
'Plan',
'Provider',
'ProviderListResult',
'ProviderResourceType',
'Resource',
'ResourceGroup',
'ResourceGroupExportResult',
'ResourceGroupFilter',
'ResourceGroupListResult',
'ResourceGroupPatchable',
'ResourceGroupProperties',
'ResourceListResult',
'ResourceProviderOperationDisplayProperties',
'ResourcesMoveInfo',
'ScopedDeployment',
'Sku',
'SubResource',
'TagCount',
'TagDetails',
'TagValue',
'TagsListResult',
'TargetResource',
'TemplateHashResult',
'TemplateLink',
'WhatIfChange',
'WhatIfOperationResult',
'WhatIfPropertyChange',
'ZoneMapping',
'ChangeType',
'DeploymentMode',
'OnErrorDeploymentType',
'PropertyChangeType',
'ResourceIdentityType',
'WhatIfResultFormat',
]
| mit | 8,691,949,672,866,523,000 | 41.915888 | 124 | 0.726699 | false |
spotify/luigi | luigi/retcodes.py | 8 | 4658 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module containing the logic for exit codes for the luigi binary. It's useful
when you in a programmatic way need to know if luigi actually finished the
given task, and if not why.
"""
import luigi
import sys
import logging
from luigi import IntParameter
from luigi.setup_logging import InterfaceLogging
class retcode(luigi.Config):
"""
See the :ref:`return codes configuration section <retcode-config>`.
"""
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
unhandled_exception = IntParameter(default=4,
description='For internal luigi errors.',
)
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
missing_data = IntParameter(default=0,
description="For when there are incomplete ExternalTask dependencies.",
)
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
task_failed = IntParameter(default=0,
description='''For when a task's run() method fails.''',
)
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
already_running = IntParameter(default=0,
description='For both local --lock and luigid "lock"',
)
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
scheduling_error = IntParameter(default=0,
description='''For when a task's complete() or requires() fails,
or task-limit reached'''
)
# default value inconsistent with doc/configuration.rst for backwards compatibility reasons
not_run = IntParameter(default=0,
description="For when a task is not granted run permission by the scheduler."
)
def run_with_retcodes(argv):
"""
Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code.
Note: Usually you use the luigi binary directly and don't call this function yourself.
:param argv: Should (conceptually) be ``sys.argv[1:]``
"""
logger = logging.getLogger('luigi-interface')
with luigi.cmdline_parser.CmdlineParser.global_instance(argv):
retcodes = retcode()
worker = None
try:
worker = luigi.interface._run(argv).worker
except luigi.interface.PidLockAlreadyTakenExit:
sys.exit(retcodes.already_running)
except Exception:
# Some errors occur before logging is set up, we set it up now
env_params = luigi.interface.core()
InterfaceLogging.setup(env_params)
logger.exception("Uncaught exception in luigi")
sys.exit(retcodes.unhandled_exception)
with luigi.cmdline_parser.CmdlineParser.global_instance(argv):
task_sets = luigi.execution_summary._summary_dict(worker)
root_task = luigi.execution_summary._root_task(worker)
non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys()
def has(status):
assert status in luigi.execution_summary._ORDERED_STATUSES
return status in non_empty_categories
codes_and_conds = (
(retcodes.missing_data, has('still_pending_ext')),
(retcodes.task_failed, has('failed')),
(retcodes.already_running, has('run_by_other_worker')),
(retcodes.scheduling_error, has('scheduling_error')),
(retcodes.not_run, has('not_run')),
)
expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds)
if expected_ret_code == 0 and \
root_task not in task_sets["completed"] and \
root_task not in task_sets["already_done"]:
sys.exit(retcodes.not_run)
else:
sys.exit(expected_ret_code)
| apache-2.0 | 23,745,922,212,479,748 | 42.12963 | 104 | 0.646844 | false |
KennethNielsen/SoCo | setup.py | 1 | 2580 | #!/usr/bin/env python
import io
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
# Code from here: https://pytest.org/latest/goodpractises.html
def finalize_options(self):
TestCommand.finalize_options(self)
# we don't run integration tests which need an actual Sonos device
self.test_args = ['-m', 'not integration']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
src = io.open('soco/__init__.py', encoding='utf-8').read()
metadata = dict(re.findall("__([a-z]+)__ = \"([^\"]+)\"", src))
docstrings = re.findall('"""(.*?)"""', src, re.MULTILINE | re.DOTALL)
NAME = 'soco'
PACKAGES = (
'soco',
'soco.plugins',
'soco.music_services',
)
TEST_REQUIREMENTS = list(open('requirements-dev.txt'))
AUTHOR_EMAIL = metadata['author']
VERSION = metadata['version']
WEBSITE = metadata['website']
LICENSE = metadata['license']
DESCRIPTION = docstrings[0]
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Home Automation',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Software Development :: Libraries :: Python Modules',
]
with io.open('README.rst', encoding='utf-8') as file:
LONG_DESCRIPTION = file.read()
# Extract name and e-mail ("Firstname Lastname <[email protected]>")
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
REQUIREMENTS = list(open('requirements.txt'))
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
url=WEBSITE,
packages=PACKAGES,
install_requires=REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
long_description=LONG_DESCRIPTION,
cmdclass={'test': PyTest},
classifiers=CLASSIFIERS,
)
| mit | 3,196,205,863,717,753,000 | 28.318182 | 74 | 0.65155 | false |
mariusbaumann/pyload | module/plugins/internal/MultiHoster.py | 1 | 2415 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies
class MultiHoster(SimpleHoster):
__name__ = "MultiHoster"
__type__ = "hoster"
__version__ = "0.28"
__pattern__ = r'^unmatchable$'
__description__ = """Multi hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
LOGIN_ACCOUNT = True
def setup(self):
self.chunkLimit = 1
self.multiDL = self.premium
def prepare(self):
self.info = {}
self.link = "" #@TODO: Move to hoster class in 0.4.10
self.directDL = False #@TODO: Move to hoster class in 0.4.10
if self.LOGIN_ACCOUNT and not self.account:
self.fail(_("Required account not found"))
self.req.setOption("timeout", 120)
if isinstance(self.COOKIES, list):
set_cookies(self.req.cj, self.COOKIES)
if self.DIRECT_LINK is None:
self.directDL = self.__pattern__ != r'^unmatchable$'
else:
self.directDL = self.DIRECT_LINK
self.pyfile.url = replace_patterns(self.pyfile.url,
self.FILE_URL_REPLACEMENTS if hasattr(self, "FILE_URL_REPLACEMENTS") else self.URL_REPLACEMENTS) #@TODO: Remove FILE_URL_REPLACEMENTS check in 0.4.10
def process(self, pyfile):
self.prepare()
if self.directDL:
self.logDebug("Looking for direct download link...")
self.handleDirect(pyfile)
if self.link:
self.pyfile.url = self.link
self.checkNameSize()
elif not self.lastDownload:
self.preload()
self.checkInfo()
if self.premium and (not self.CHECK_TRAFFIC or self.checkTrafficLeft()):
self.logDebug("Handled as premium download")
self.handlePremium()
else:
self.logDebug("Handled as free download")
self.handleFree()
self.downloadLink(self.link)
self.checkFile()
def handlePremium(self, pyfile=None):
return self.handleFree(pyfile)
def handleFree(self, pyfile=None):
if self.premium:
raise NotImplementedError
else:
self.fail(_("Required premium account not found"))
| gpl-3.0 | -5,986,857,389,860,580,000 | 27.75 | 193 | 0.575983 | false |
denisff/python-for-android | python3-alpha/python3-src/Lib/idlelib/Debugger.py | 55 | 16368 | import os
import bdb
import types
from tkinter import *
from idlelib.WindowList import ListedToplevel
from idlelib.ScrolledList import ScrolledList
from idlelib import macosxSupport
class Idb(bdb.Bdb):
def __init__(self, gui):
self.gui = gui
bdb.Bdb.__init__(self)
def user_line(self, frame):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame)
def user_exception(self, frame, info):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame, info)
def in_rpc_code(self, frame):
if frame.f_code.co_filename.count('rpc.py'):
return True
else:
prev_frame = frame.f_back
if prev_frame.f_code.co_filename.count('Debugger.py'):
# (that test will catch both Debugger.py and RemoteDebugger.py)
return False
return self.in_rpc_code(prev_frame)
def __frame2message(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
return message
class Debugger:
vstack = vsource = vlocals = vglobals = None
def __init__(self, pyshell, idb=None):
if idb is None:
idb = Idb(self)
self.pyshell = pyshell
self.idb = idb
self.frame = None
self.make_gui()
self.interacting = 0
def run(self, *args):
try:
self.interacting = 1
return self.idb.run(*args)
finally:
self.interacting = 0
def close(self, event=None):
if self.interacting:
self.top.bell()
return
if self.stackviewer:
self.stackviewer.close(); self.stackviewer = None
# Clean up pyshell if user clicked debugger control close widget.
# (Causes a harmless extra cycle through close_debugger() if user
# toggled debugger from pyshell Debug menu)
self.pyshell.close_debugger()
# Now close the debugger control window....
self.top.destroy()
def make_gui(self):
pyshell = self.pyshell
self.flist = pyshell.flist
self.root = root = pyshell.root
self.top = top = ListedToplevel(root)
self.top.wm_title("Debug Control")
self.top.wm_iconname("Debug")
top.wm_protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<Escape>", self.close)
#
self.bframe = bframe = Frame(top)
self.bframe.pack(anchor="w")
self.buttons = bl = []
#
self.bcont = b = Button(bframe, text="Go", command=self.cont)
bl.append(b)
self.bstep = b = Button(bframe, text="Step", command=self.step)
bl.append(b)
self.bnext = b = Button(bframe, text="Over", command=self.next)
bl.append(b)
self.bret = b = Button(bframe, text="Out", command=self.ret)
bl.append(b)
self.bret = b = Button(bframe, text="Quit", command=self.quit)
bl.append(b)
#
for b in bl:
b.configure(state="disabled")
b.pack(side="left")
#
self.cframe = cframe = Frame(bframe)
self.cframe.pack(side="left")
#
if not self.vstack:
self.__class__.vstack = BooleanVar(top)
self.vstack.set(1)
self.bstack = Checkbutton(cframe,
text="Stack", command=self.show_stack, variable=self.vstack)
self.bstack.grid(row=0, column=0)
if not self.vsource:
self.__class__.vsource = BooleanVar(top)
self.bsource = Checkbutton(cframe,
text="Source", command=self.show_source, variable=self.vsource)
self.bsource.grid(row=0, column=1)
if not self.vlocals:
self.__class__.vlocals = BooleanVar(top)
self.vlocals.set(1)
self.blocals = Checkbutton(cframe,
text="Locals", command=self.show_locals, variable=self.vlocals)
self.blocals.grid(row=1, column=0)
if not self.vglobals:
self.__class__.vglobals = BooleanVar(top)
self.bglobals = Checkbutton(cframe,
text="Globals", command=self.show_globals, variable=self.vglobals)
self.bglobals.grid(row=1, column=1)
#
self.status = Label(top, anchor="w")
self.status.pack(anchor="w")
self.error = Label(top, anchor="w")
self.error.pack(anchor="w", fill="x")
self.errorbg = self.error.cget("background")
#
self.fstack = Frame(top, height=1)
self.fstack.pack(expand=1, fill="both")
self.flocals = Frame(top)
self.flocals.pack(expand=1, fill="both")
self.fglobals = Frame(top, height=1)
self.fglobals.pack(expand=1, fill="both")
#
if self.vstack.get():
self.show_stack()
if self.vlocals.get():
self.show_locals()
if self.vglobals.get():
self.show_globals()
def interaction(self, message, frame, info=None):
self.frame = frame
self.status.configure(text=message)
#
if info:
type, value, tb = info
try:
m1 = type.__name__
except AttributeError:
m1 = "%s" % str(type)
if value is not None:
try:
m1 = "%s: %s" % (m1, str(value))
except:
pass
bg = "yellow"
else:
m1 = ""
tb = None
bg = self.errorbg
self.error.configure(text=m1, background=bg)
#
sv = self.stackviewer
if sv:
stack, i = self.idb.get_stack(self.frame, tb)
sv.load_stack(stack, i)
#
self.show_variables(1)
#
if self.vsource.get():
self.sync_source_line()
#
for b in self.buttons:
b.configure(state="normal")
#
self.top.wakeup()
self.root.mainloop()
#
for b in self.buttons:
b.configure(state="disabled")
self.status.configure(text="")
self.error.configure(text="", background=self.errorbg)
self.frame = None
def sync_source_line(self):
frame = self.frame
if not frame:
return
filename, lineno = self.__frame2fileline(frame)
if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
self.flist.gotofileline(filename, lineno)
def __frame2fileline(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
return filename, lineno
def cont(self):
self.idb.set_continue()
self.root.quit()
def step(self):
self.idb.set_step()
self.root.quit()
def next(self):
self.idb.set_next(self.frame)
self.root.quit()
def ret(self):
self.idb.set_return(self.frame)
self.root.quit()
def quit(self):
self.idb.set_quit()
self.root.quit()
stackviewer = None
def show_stack(self):
if not self.stackviewer and self.vstack.get():
self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
if self.frame:
stack, i = self.idb.get_stack(self.frame, None)
sv.load_stack(stack, i)
else:
sv = self.stackviewer
if sv and not self.vstack.get():
self.stackviewer = None
sv.close()
self.fstack['height'] = 1
def show_source(self):
if self.vsource.get():
self.sync_source_line()
def show_frame(self, stackitem):
frame, lineno = stackitem
self.frame = frame
self.show_variables()
localsviewer = None
globalsviewer = None
def show_locals(self):
lv = self.localsviewer
if self.vlocals.get():
if not lv:
self.localsviewer = NamespaceViewer(self.flocals, "Locals")
else:
if lv:
self.localsviewer = None
lv.close()
self.flocals['height'] = 1
self.show_variables()
def show_globals(self):
gv = self.globalsviewer
if self.vglobals.get():
if not gv:
self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
else:
if gv:
self.globalsviewer = None
gv.close()
self.fglobals['height'] = 1
self.show_variables()
def show_variables(self, force=0):
lv = self.localsviewer
gv = self.globalsviewer
frame = self.frame
if not frame:
ldict = gdict = None
else:
ldict = frame.f_locals
gdict = frame.f_globals
if lv and gv and ldict is gdict:
ldict = None
if lv:
lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
if gv:
gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
def set_breakpoint_here(self, filename, lineno):
self.idb.set_break(filename, lineno)
def clear_breakpoint_here(self, filename, lineno):
self.idb.clear_break(filename, lineno)
def clear_file_breaks(self, filename):
self.idb.clear_all_file_breaks(filename)
def load_breakpoints(self):
"Load PyShellEditorWindow breakpoints into subprocess debugger"
for editwin in self.pyshell.flist.inversedict:
filename = editwin.io.filename
try:
for lineno in editwin.breakpoints:
self.set_breakpoint_here(filename, lineno)
except AttributeError:
continue
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
if macosxSupport.runningAsOSXApp():
# At least on with the stock AquaTk version on OSX 10.4 you'll
# get an shaking GUI that eventually kills IDLE if the width
# argument is specified.
ScrolledList.__init__(self, master)
else:
ScrolledList.__init__(self, master, width=80)
self.flist = flist
self.gui = gui
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
import linecache
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
"override base method"
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
"override base method"
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
"override base method"
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def on_double(self, index):
"override base method"
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
import reprlib
self.repr = reprlib.Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0, rpc_client=None):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in list(subframe.children.values()):
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
#names = sorted(dict)
###
# Because of (temporary) limitations on the dict_keys type (not yet
# public or pickleable), have the subprocess to send a list of
# keys, not a dict_keys object. sorted() will take a dict_keys
# (no subprocess) or a list.
#
# There is also an obscure bug in sorted(dict) where the
# interpreter gets into a loop requesting non-existing dict[0],
# dict[1], dict[2], etc from the RemoteDebugger.DictProxy.
###
keys_list = dict.keys()
names = sorted(keys_list)
###
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
# Strip extra quotes caused by calling repr on the (already)
# repr'd value sent across the RPC interface:
if rpc_client:
svalue = svalue[1:-1]
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
| apache-2.0 | 5,973,071,751,481,614,000 | 32.268293 | 79 | 0.539895 | false |
neumerance/cloudloon2 | openstack_dashboard/openstack/common/rpc/matchmaker_ring.py | 19 | 3692 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import itertools
import json
from oslo.config import cfg
from openstack_dashboard.openstack.common.gettextutils import _
from openstack_dashboard.openstack.common import log as logging
from openstack_dashboard.openstack.common.rpc import matchmaker as mm
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('ringfile',
deprecated_name='matchmaker_ringfile',
deprecated_group='DEFAULT',
default='/etc/oslo/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
LOG = logging.getLogger(__name__)
class RingExchange(mm.Exchange):
"""
Match Maker where hosts are loaded from a static file containing
a hashmap (JSON formatted).
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ring.ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
if key in self.ring0:
return True
return False
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class MatchMakerRing(mm.MatchMakerBase):
"""
Match Maker where hosts are loaded from a static hashmap.
"""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
| apache-2.0 | 1,185,011,274,033,428,700 | 31.385965 | 78 | 0.626219 | false |
migue/voltdb | lib/python/voltcli/voltadmin.d/save.py | 1 | 3812 | # This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import os
import urllib
@VOLT.Command(
bundles = VOLT.AdminBundle(),
description = 'Save a VoltDB database snapshot.',
options = (
VOLT.BooleanOption('-b', '--blocking', 'blocking',
'block transactions and wait until the snapshot completes',
default = False),
VOLT.EnumOption('-f', '--format', 'format',
'snapshot format', 'native', 'csv',
default = 'native'),
VOLT.StringListOption(None, '--tables', 'tables',
'tables to include in the snapshot',
default = None),
VOLT.StringListOption(None, '--skiptables', 'skip_tables',
'tables to skip in the snapshot',
default = None)
),
arguments=(
VOLT.PathArgument('directory', 'the snapshot server directory', absolute=True, optional=True),
VOLT.StringArgument('nonce', 'the unique snapshot identifier (nonce)', optional=True)
)
)
def save(runner):
uri = None
dir_specified = False
if runner.opts.directory is not None:
uri = 'file://%s' % urllib.quote(runner.opts.directory)
dir_specified = True
nonce = None
if runner.opts.nonce is not None:
nonce = runner.opts.nonce.replace('"', '\\"')
elif dir_specified:
runner.abort('When a DIRECTORY is given a NONCE must be specified as well.')
else:
runner.opts.format = 'native'
runner.opts.tables = None
runner.opts.skip_tables = None
if runner.opts.blocking:
blocking = 'true'
else:
blocking = 'false'
if uri is not None:
if nonce is not None:
raw_json_opts = ['uripath:"%s"' % (uri),
'nonce:"%s"' % (nonce),
'block:%s' % (blocking),
'format:"%s"' % (runner.opts.format)]
else:
raw_json_opts = ['uripath:"%s"' % (uri),
'block:%s' % (blocking),
'format:"%s"' % (runner.opts.format)]
else:
if nonce is not None:
raw_json_opts = ['uripath:"%s"' % (uri),
'nonce:"%s"' % (nonce),
'block:%s' % (blocking),
'format:"%s"' % (runner.opts.format)]
else:
raw_json_opts = ['block:%s' % (blocking),
'format:"%s"' % (runner.opts.format)]
if runner.opts.tables:
raw_json_opts.append('tables:%s' % (runner.opts.tables))
if runner.opts.skip_tables:
raw_json_opts.append('skiptables:%s' % (runner.opts.skip_tables))
runner.verbose_info('@SnapshotSave "%s"' % raw_json_opts)
columns = [VOLT.FastSerializer.VOLTTYPE_STRING]
response = runner.call_proc('@SnapshotSave', columns,
['{%s}' % (','.join(raw_json_opts))])
print response.table(0).format_table(caption = 'Snapshot Save Results')
| agpl-3.0 | 7,709,832,698,870,177,000 | 41.355556 | 106 | 0.554302 | false |
weese/seqan | misc/seqan_instrumentation/py2exe/dist/classes/requests/packages/urllib3/util.py | 41 | 3789 | # urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
from .packages import six
from .exceptions import LocationParseError
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def get_host(url):
"""
Given a url, return its scheme, host and port (None if it's not there).
For example: ::
>>> get_host('http://google.com/mail/')
('http', 'google.com', None)
>>> get_host('google.com:80')
('http', 'google.com', 80)
"""
# This code is actually similar to urlparse.urlsplit, but much
# simplified for our needs.
port = None
scheme = 'http'
if '://' in url:
scheme, url = url.split('://', 1)
if '/' in url:
url, _path = url.split('/', 1)
if '@' in url:
_auth, url = url.split('@', 1)
if ':' in url:
url, port = url.split(':', 1)
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
return scheme, url, port
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
``HTTPConnection`` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: #Platform-specific: AppEngine
return False
if not poll: # Platform-specific
if not select: #Platform-specific: AppEngine
return False
return select([sock], [], [], 0.0)[0]
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
| bsd-3-clause | 3,920,647,605,950,137,300 | 26.860294 | 79 | 0.604117 | false |
lemonade512/BluebonnetsPointsApp | bluebonnetspointsapp/lib/pbr/builddoc.py | 18 | 9083 | # Copyright 2011 OpenStack LLC.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import log
import fnmatch
import os
import pkg_resources
import sys
import warnings
try:
import cStringIO
except ImportError:
import io as cStringIO
try:
from sphinx import apidoc
from sphinx import application
from sphinx import config
from sphinx import setup_command
except Exception as e:
# NOTE(dhellmann): During the installation of docutils, setuptools
# tries to import pbr code to find the egg_info.writer hooks. That
# imports this module, which imports sphinx, which imports
# docutils, which is being installed. Because docutils uses 2to3
# to convert its code during installation under python 3, the
# import fails, but it fails with an error other than ImportError
# (today it's a NameError on StandardError, an exception base
# class). Convert the exception type here so it can be caught in
# packaging.py where we try to determine if we can import and use
# sphinx by importing this module. See bug #1403510 for details.
raise ImportError(str(e))
from pbr import git
from pbr import options
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalBuildDoc(setup_command.BuildDoc):
command_name = 'build_sphinx'
builders = ['html', 'man']
def _get_source_dir(self):
option_dict = self.distribution.get_option_dict('build_sphinx')
if 'source_dir' in option_dict:
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
else:
source_dir = 'doc/source/api'
if not os.path.exists(source_dir):
os.makedirs(source_dir)
return source_dir
def generate_autoindex(self, excluded_modules=None):
log.info("[pbr] Autodocumenting from %s"
% os.path.abspath(os.curdir))
modules = {}
source_dir = self._get_source_dir()
for pkg in self.distribution.packages:
if '.' not in pkg:
for dirpath, dirnames, files in os.walk(pkg):
_find_modules(modules, dirpath, files)
def include(module):
return not any(fnmatch.fnmatch(module, pat)
for pat in excluded_modules)
module_list = sorted(mod for mod in modules.keys() if include(mod))
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
log.info("[pbr] Generating %s"
% output_filename)
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def _sphinx_tree(self):
source_dir = self._get_source_dir()
cmd = ['apidoc', '.', '-H', 'Modules', '-o', source_dir]
apidoc.main(cmd + self.autodoc_tree_excludes)
def _sphinx_run(self):
if not self.verbose:
status_stream = cStringIO.StringIO()
else:
status_stream = sys.stdout
confoverrides = {}
if self.version:
confoverrides['version'] = self.version
if self.release:
confoverrides['release'] = self.release
if self.today:
confoverrides['today'] = self.today
sphinx_config = config.Config(self.config_dir, 'conf.py', {}, [])
sphinx_ver = pkg_resources.get_distribution("sphinx").version
if pkg_resources.parse_version(sphinx_ver) > \
pkg_resources.parse_version('1.2.3'):
sphinx_config.init_values(warnings.warn)
else:
sphinx_config.init_values()
if self.builder == 'man' and len(sphinx_config.man_pages) == 0:
return
app = application.Sphinx(
self.source_dir, self.config_dir,
self.builder_target_dir, self.doctree_dir,
self.builder, confoverrides, status_stream,
freshenv=self.fresh_env, warningiserror=False)
try:
app.build(force_all=self.all_files)
except Exception as err:
from docutils import utils
if isinstance(err, utils.SystemMessage):
sys.stder.write('reST markup error:\n')
sys.stderr.write(err.args[0].encode('ascii',
'backslashreplace'))
sys.stderr.write('\n')
else:
raise
if self.link_index:
src = app.config.master_doc + app.builder.out_suffix
dst = app.builder.get_outfilename('index')
os.symlink(src, dst)
def run(self):
option_dict = self.distribution.get_option_dict('pbr')
if git._git_is_installed():
git.write_git_changelog(option_dict=option_dict)
git.generate_authors(option_dict=option_dict)
tree_index = options.get_boolean_option(option_dict,
'autodoc_tree_index_modules',
'AUTODOC_TREE_INDEX_MODULES')
auto_index = options.get_boolean_option(option_dict,
'autodoc_index_modules',
'AUTODOC_INDEX_MODULES')
if not os.getenv('SPHINX_DEBUG'):
# NOTE(afazekas): These options can be used together,
# but they do a very similar thing in a different way
if tree_index:
self._sphinx_tree()
if auto_index:
self.generate_autoindex(
set(option_dict.get(
"autodoc_exclude_modules",
[None, ""])[1].split()))
for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
if options.get_boolean_option(option_dict,
'warnerrors', 'WARNERRORS'):
self._sphinx_run()
else:
setup_command.BuildDoc.run(self)
def initialize_options(self):
# Not a new style class, super keyword does not work.
setup_command.BuildDoc.initialize_options(self)
# NOTE(dstanek): exclude setup.py from the autodoc tree index
# builds because all projects will have an issue with it
self.autodoc_tree_excludes = ['setup.py']
def finalize_options(self):
# Not a new style class, super keyword does not work.
setup_command.BuildDoc.finalize_options(self)
# Handle builder option from command line - override cfg
option_dict = self.distribution.get_option_dict('build_sphinx')
if 'command line' in option_dict.get('builder', [[]])[0]:
self.builders = option_dict['builder'][1]
# Allow builders to be configurable - as a comma separated list.
if not isinstance(self.builders, list) and self.builders:
self.builders = self.builders.split(',')
# NOTE(dstanek): check for autodoc tree exclusion overrides
# in the setup.cfg
opt = 'autodoc_tree_excludes'
option_dict = self.distribution.get_option_dict('pbr')
if opt in option_dict:
self.autodoc_tree_excludes = option_dict[opt][1]
self.ensure_string_list(opt)
class LocalBuildLatex(LocalBuildDoc):
builders = ['latex']
command_name = 'build_sphinx_latex'
| gpl-3.0 | -130,640,983,658,371,090 | 38.491304 | 78 | 0.58582 | false |
marcusmueller/gnuradio | grc/core/generator/cpp_top_block.py | 3 | 14845 | import codecs
import yaml
import operator
import os
import tempfile
import textwrap
import re
from mako.template import Template
from .. import Messages, blocks
from ..Constants import TOP_BLOCK_FILE_MODE
from .FlowGraphProxy import FlowGraphProxy
from ..utils import expr_utils
from .top_block import TopBlockGenerator
DATA_DIR = os.path.dirname(__file__)
HEADER_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/flow_graph.hpp.mako')
SOURCE_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/flow_graph.cpp.mako')
CMAKE_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/CMakeLists.txt.mako')
header_template = Template(filename=HEADER_TEMPLATE)
source_template = Template(filename=SOURCE_TEMPLATE)
cmake_template = Template(filename=CMAKE_TEMPLATE)
class CppTopBlockGenerator(TopBlockGenerator):
def __init__(self, flow_graph, file_path):
"""
Initialize the C++ top block generator object.
Args:
flow_graph: the flow graph object
file_path: the path where we want to create
a new directory with C++ files
"""
self._flow_graph = FlowGraphProxy(flow_graph)
self._generate_options = self._flow_graph.get_option('generate_options')
self._mode = TOP_BLOCK_FILE_MODE
# Handle the case where the directory is read-only
# In this case, use the system's temp directory
if not os.access(file_path, os.W_OK):
file_path = tempfile.gettempdir()
# When generating C++ code, we create a new directory
# (file_path) and generate the files inside that directory
filename = self._flow_graph.get_option('id')
self.file_path = os.path.join(file_path, filename)
self._dirname = file_path
def write(self):
"""create directory, generate output and write it to files"""
self._warnings()
fg = self._flow_graph
platform = fg.parent
self.title = fg.get_option('title') or fg.get_option('id').replace('_', ' ').title()
variables = fg.get_cpp_variables()
parameters = fg.get_parameters()
monitors = fg.get_monitors()
self._variable_types()
self._parameter_types()
self.namespace = {
'flow_graph': fg,
'variables': variables,
'parameters': parameters,
'monitors': monitors,
'generate_options': self._generate_options,
'version': platform.config.version
}
if not os.path.exists(self.file_path):
os.makedirs(self.file_path)
for filename, data in self._build_cpp_header_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if not self._generate_options.startswith('hb'):
if not os.path.exists(os.path.join(self.file_path, 'build')):
os.makedirs(os.path.join(self.file_path, 'build'))
for filename, data in self._build_cpp_source_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if fg.get_option('gen_cmake') == 'On':
for filename, data in self._build_cmake_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
def _build_cpp_source_code_from_template(self):
"""
Convert the flow graph to a C++ source file.
Returns:
a string of C++ code
"""
file_path = self.file_path + '/' + self._flow_graph.get_option('id') + '.cpp'
output = []
flow_graph_code = source_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _build_cpp_header_code_from_template(self):
"""
Convert the flow graph to a C++ header file.
Returns:
a string of C++ code
"""
file_path = self.file_path + '/' + self._flow_graph.get_option('id') + '.hpp'
output = []
flow_graph_code = header_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _build_cmake_code_from_template(self):
"""
Convert the flow graph to a CMakeLists.txt file.
Returns:
a string of CMake code
"""
filename = 'CMakeLists.txt'
file_path = os.path.join(self.file_path, filename)
cmake_tuples = []
cmake_opt = self._flow_graph.get_option("cmake_opt")
cmake_opt = " " + cmake_opt # To make sure we get rid of the "-D"s when splitting
for opt_string in cmake_opt.split(" -D"):
opt_string = opt_string.strip()
if opt_string:
cmake_tuples.append(tuple(opt_string.split("=")))
output = []
flow_graph_code = cmake_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
links=self._links(),
cmake_tuples=cmake_tuples,
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _links(self):
fg = self._flow_graph
links = fg.links()
seen = set()
output = []
for link_list in links:
if link_list:
for link in link_list:
seen.add(link)
return list(seen)
def _includes(self):
fg = self._flow_graph
includes = fg.includes()
seen = set()
output = []
def is_duplicate(l):
if l.startswith('#include') and l in seen:
return True
seen.add(line)
return False
for block_ in includes:
for include_ in block_:
if not include_:
continue
line = include_.rstrip()
if not is_duplicate(line):
output.append(line)
return output
def _blocks(self):
fg = self._flow_graph
parameters = fg.get_parameters()
# List of blocks not including variables and imports and parameters and disabled
def _get_block_sort_text(block):
code = block.cpp_templates.render('make').replace(block.name, ' ')
try:
code += block.params['gui_hint'].get_value() # Newer gui markup w/ qtgui
except:
pass
return code
blocks = [
b for b in fg.blocks
if b.enabled and not (b.get_bypassed() or b.is_import or b in parameters or b.key == 'options')
]
blocks = expr_utils.sort_objects(blocks, operator.attrgetter('name'), _get_block_sort_text)
blocks_make = []
for block in blocks:
translations = block.cpp_templates.render('translations')
make = block.cpp_templates.render('make')
declarations = block.cpp_templates.render('declarations')
if translations:
translations = yaml.safe_load(translations)
else:
translations = {}
translations.update(
{r"gr\.sizeof_([\w_]+)": r"sizeof(\1)"}
)
for key in translations:
make = re.sub(key.replace("\\\\", "\\"), translations[key],make)
declarations = declarations.replace(key, translations[key])
if make:
blocks_make.append((block, make, declarations))
elif 'qt' in block.key:
# The QT Widget blocks are technically variables,
# but they contain some code we don't want to miss
blocks_make.append(('', make, declarations))
return blocks_make
def _variable_types(self):
fg = self._flow_graph
variables = fg.get_cpp_variables()
for var in variables:
var.decide_type()
def _parameter_types(self):
fg = self._flow_graph
parameters = fg.get_parameters()
for param in parameters:
type_translation = {'eng_float' : 'double', 'intx' : 'int', 'std' : 'std::string'};
param.vtype = type_translation[param.params['type'].value]
def _callbacks(self):
fg = self._flow_graph
variables = fg.get_cpp_variables()
parameters = fg.get_parameters()
# List of variable names
var_ids = [var.name for var in parameters + variables]
replace_dict = dict((var_id, 'this->' + var_id) for var_id in var_ids)
callbacks_all = []
for block in fg.iter_enabled_blocks():
callbacks_all.extend(expr_utils.expr_replace(cb, replace_dict) for cb in block.get_cpp_callbacks())
# Map var id to callbacks
def uses_var_id(callback):
used = expr_utils.get_variable_dependencies(callback, [var_id])
return used and ('this->' + var_id in callback) # callback might contain var_id itself
callbacks = {}
for var_id in var_ids:
callbacks[var_id] = [callback for callback in callbacks_all if uses_var_id(callback)]
return callbacks
def _connections(self):
fg = self._flow_graph
templates = {key: Template(text)
for key, text in fg.parent_platform.cpp_connection_templates.items()}
def make_port_sig(port):
if port.parent.key in ('pad_source', 'pad_sink'):
block = 'self()'
key = fg.get_pad_port_global_key(port)
else:
block = 'this->' + port.parent_block.name
key = port.key
if not key.isdigit():
key = re.findall(r'\d+', key)[0]
return '{block}, {key}'.format(block=block, key=key)
connections = fg.get_enabled_connections()
# Get the virtual blocks and resolve their connections
connection_factory = fg.parent_platform.Connection
virtual = [c for c in connections if isinstance(c.source_block, blocks.VirtualSource)]
for connection in virtual:
sink = connection.sink_port
for source in connection.source_port.resolve_virtual_source():
resolved = connection_factory(fg.orignal_flowgraph, source, sink)
connections.append(resolved)
# Remove the virtual connection
connections.remove(connection)
# Bypassing blocks: Need to find all the enabled connections for the block using
# the *connections* object rather than get_connections(). Create new connections
# that bypass the selected block and remove the existing ones. This allows adjacent
# bypassed blocks to see the newly created connections to downstream blocks,
# allowing them to correctly construct bypass connections.
bypassed_blocks = fg.get_bypassed_blocks()
for block in bypassed_blocks:
# Get the upstream connection (off of the sink ports)
# Use *connections* not get_connections()
source_connection = [c for c in connections if c.sink_port == block.sinks[0]]
# The source connection should never have more than one element.
assert (len(source_connection) == 1)
# Get the source of the connection.
source_port = source_connection[0].source_port
# Loop through all the downstream connections
for sink in (c for c in connections if c.source_port == block.sources[0]):
if not sink.enabled:
# Ignore disabled connections
continue
connection = connection_factory(fg.orignal_flowgraph, source_port, sink.sink_port)
connections.append(connection)
# Remove this sink connection
connections.remove(sink)
# Remove the source connection
connections.remove(source_connection[0])
# List of connections where each endpoint is enabled (sorted by domains, block names)
def by_domain_and_blocks(c):
return c.type, c.source_block.name, c.sink_block.name
rendered = []
for con in sorted(connections, key=by_domain_and_blocks):
template = templates[con.type]
if con.source_port.dtype != 'bus':
code = template.render(make_port_sig=make_port_sig, source=con.source_port, sink=con.sink_port)
if not self._generate_options.startswith('hb'):
code = 'this->tb->' + code
rendered.append(code)
else:
# Bus ports need to iterate over the underlying connections and then render
# the code for each subconnection
porta = con.source_port
portb = con.sink_port
fg = self._flow_graph
if porta.dtype == 'bus' and portb.dtype == 'bus':
# which bus port is this relative to the bus structure
if len(porta.bus_structure) == len(portb.bus_structure):
for port_num in porta.bus_structure:
hidden_porta = porta.parent.sources[port_num]
hidden_portb = portb.parent.sinks[port_num]
connection = fg.parent_platform.Connection(
parent=self, source=hidden_porta, sink=hidden_portb)
code = template.render(make_port_sig=make_port_sig, source=hidden_porta, sink=hidden_portb)
if not self._generate_options.startswith('hb'):
code = 'this->tb->' + code
rendered.append(code)
return rendered
| gpl-3.0 | 4,455,088,950,805,332,000 | 36.677665 | 119 | 0.567666 | false |
arante/pyloc | py2/cool-things/gvr_towers.py | 2 | 5500 | # hanoi.py
# Animated Towers of Hanoi using Tk with optional bitmap file in
# background.
#
# Usage: tkhanoi [n [bitmapfile]]
#
# n is the number of pieces to animate; default is 4, maximum 15.
#
# The bitmap file can be any X11 bitmap file (look in
# /usr/include/X11/bitmaps for samples); it is displayed as the
# background of the animation. Default is no bitmap.
# This uses Steen Lumholt's Tk interface
from Tkinter import *
# Basic Towers-of-Hanoi algorithm: move n pieces from a to b, using c
# as temporary. For each move, call report()
def hanoi(n, a, b, c, report):
if n <= 0: return
hanoi(n-1, a, c, b, report)
report(n, a, b)
hanoi(n-1, c, b, a, report)
# The graphical interface
class Tkhanoi:
# Create our objects
def __init__(self, n, bitmap = None):
self.n = n
self.tk = tk = Tk()
self.canvas = c = Canvas(tk)
c.pack()
width, height = tk.getint(c['width']), tk.getint(c['height'])
# Add background bitmap
if bitmap:
self.bitmap = c.create_bitmap(width/2, height/2,
{'bitmap': bitmap,
'foreground': 'blue'})
# Generate pegs
pegwidth = 10
pegheight = height/2
pegdist = width/3
x1, y1 = (pegdist-pegwidth)/2, height*1/3
x2, y2 = x1+pegwidth, y1+pegheight
self.pegs = []
p = c.create_rectangle(x1, y1, x2, y2, {'fill': 'black'})
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, {'fill': 'black'})
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, {'fill': 'black'})
self.pegs.append(p)
self.tk.update()
# Generate pieces
pieceheight = pegheight/16
maxpiecewidth = pegdist*2/3
minpiecewidth = 2*pegwidth
self.pegstate = [[], [], []]
self.pieces = {}
x1, y1 = (pegdist-maxpiecewidth)/2, y2-pieceheight-2
x2, y2 = x1+maxpiecewidth, y1+pieceheight
dx = (maxpiecewidth-minpiecewidth) / (2*max(1, n-1))
for i in range(n, 0, -1):
p = c.create_rectangle(x1, y1, x2, y2,
{'fill': 'red'})
self.pieces[i] = p
self.pegstate[0].append(i)
x1, x2 = x1 + dx, x2-dx
y1, y2 = y1 - pieceheight-2, y2-pieceheight-2
self.tk.update()
self.tk.after(25)
# Run -- never returns
def run(self):
hanoi(self.n, 0, 1, 2, self.report)
self.tk.mainloop() # Hang around...
# Reporting callback for the actual hanoi function
def report(self, i, a, b):
if self.pegstate[a][-1] != i: raise RuntimeError # Assertion
del self.pegstate[a][-1]
p = self.pieces[i]
c = self.canvas
# Lift the piece above peg a
ax1, ay1, ax2, ay2 = c.bbox(self.pegs[a])
while 1:
x1, y1, x2, y2 = c.bbox(p)
if y2 < ay1: break
c.move(p, 0, -1)
self.tk.update()
# Move it towards peg b
bx1, by1, bx2, by2 = c.bbox(self.pegs[b])
newcenter = (bx1+bx2)/2
while 1:
x1, y1, x2, y2 = c.bbox(p)
center = (x1+x2)/2
if center == newcenter: break
if center > newcenter: c.move(p, -1, 0)
else: c.move(p, 1, 0)
self.tk.update()
# Move it down on top of the previous piece
pieceheight = y2-y1-2
newbottom = by2 - pieceheight*len(self.pegstate[b]) - 2
while 1:
x1, y1, x2, y2 = c.bbox(p)
if y2 >= newbottom: break
c.move(p, 0, 1)
self.tk.update()
# Update peg state
self.pegstate[b].append(i)
# Main program
def main():
import sys, string
# First argument is number of pegs, default 4
if sys.argv[1:]:
n = string.atoi(sys.argv[1])
else:
n = 4
# Second argument is bitmap file, default none
if sys.argv[2:]:
bitmap = sys.argv[2]
# Reverse meaning of leading '@' compared to Tk
if bitmap[0] == '@': bitmap = bitmap[1:]
else: bitmap = '@' + bitmap
else:
bitmap = None
# Create the graphical objects...
h = Tkhanoi(n, bitmap)
# ...and run!
h.run()
# Call main when run as script
if __name__ == '__main__':
main()
| gpl-3.0 | -3,750,815,699,398,943,000 | 35.414966 | 77 | 0.431455 | false |
sindhus/hasjob | alembic/versions/c55612fb52a_organization.py | 4 | 3065 | """Organization
Revision ID: c55612fb52a
Revises: 1fce9db567a5
Create Date: 2015-01-27 02:02:42.510116
"""
# revision identifiers, used by Alembic.
revision = 'c55612fb52a'
down_revision = '1fce9db567a5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('domain', sa.Unicode(length=253), nullable=True),
sa.Column('logo_image', sa.Unicode(length=250), nullable=True),
sa.Column('cover_image', sa.Unicode(length=250), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.Column('userid', sa.Unicode(length=22), nullable=False),
sa.Column('name', sa.Unicode(length=250), nullable=False),
sa.Column('title', sa.Unicode(length=250), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('userid')
)
op.create_index(op.f('ix_organization_domain'), 'organization', ['domain'], unique=False)
op.create_table('org_location',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('latitude', sa.Numeric(), nullable=True),
sa.Column('longitude', sa.Numeric(), nullable=True),
sa.Column('org_id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(length=80), nullable=True),
sa.Column('address1', sa.Unicode(length=80), nullable=True),
sa.Column('address2', sa.Unicode(length=80), nullable=True),
sa.Column('city', sa.Unicode(length=80), nullable=True),
sa.Column('state', sa.Unicode(length=80), nullable=True),
sa.Column('postcode', sa.Unicode(length=16), nullable=True),
sa.Column('country', sa.Unicode(length=80), nullable=True),
sa.Column('geonameid', sa.Integer(), nullable=True),
sa.Column('url_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['org_id'], ['organization.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('org_id', 'url_id')
)
op.create_index(op.f('ix_org_location_geonameid'), 'org_location', ['geonameid'], unique=False)
op.add_column('jobpost', sa.Column('org_id', sa.Integer(), nullable=True))
op.create_foreign_key('jobpost_org_id_fkey', 'jobpost', 'organization', ['org_id'], ['id'], ondelete='SET NULL')
def downgrade():
op.drop_constraint('jobpost_org_id_fkey', 'jobpost', type_='foreignkey')
op.drop_column('jobpost', 'org_id')
op.drop_index(op.f('ix_org_location_geonameid'), table_name='org_location')
op.drop_table('org_location')
op.drop_index(op.f('ix_organization_domain'), table_name='organization')
op.drop_table('organization')
| agpl-3.0 | 6,588,068,108,834,663,000 | 45.439394 | 116 | 0.647961 | false |
mjun/lightdm-gtk-greeter-settings | lightdm_gtk_greeter_settings/IconChooserDialog.py | 1 | 15171 | #!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# LightDM GTK Greeter Settings
# Copyright (C) 2014 Andrew P. <[email protected]>, Matko Jun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
from gi.repository import Gtk, GObject
from lightdm_gtk_greeter_settings.helpers import get_data_path, NC_, C_
__all__ = ['IconChooserDialog']
class IconChooserDialog(Gtk.Dialog):
__gtype_name__ = 'IconChooserDialog'
ContextsModelRow = namedtuple('ContextsModelRow',
('name', 'standard', 'title'))
IconsModelRow = namedtuple('IconsModelRow',
('name', 'standard', 'context'))
IconsFilterArgs = namedtuple('IconsFilterArgs', ('standard', 'context'))
BUILDER_WIDGETS = ('name', 'preview', 'standard_toggle', 'spinner',
'contexts_view', 'contexts_selection', 'contexts_model',
'contexts_filter', 'icons_view', 'icons_selection',
'icons_model', 'icons_sorted', 'icons_filter')
def __new__(cls):
builder = Gtk.Builder()
builder.add_from_file(get_data_path('%s.ui' % cls.__name__))
window = builder.get_object('icon_chooser_dialog')
window._builder = builder
window.__dict__.update(('_' + w, builder.get_object(w))
for w in cls.BUILDER_WIDGETS)
builder.connect_signals(window)
window._init_window()
return window
def _init_window(self):
# Map ContextsModelRow fields to self._CONTEXT_{FIELD} = {field-index}
for i, field in enumerate(IconChooserDialog.ContextsModelRow._fields):
setattr(self, '_CONTEXT_' + field.upper(), i)
# Map IconsModelRow fields to self._ICON_{FIELD} = {field-index}
for i, field in enumerate(IconChooserDialog.IconsModelRow._fields):
setattr(self, '_ICON_' + field.upper(), i)
self._icons_loaded = False
self._icon_to_select = None
self._icons_filter_args = None
self._contexts_view.set_row_separator_func(
self._contexts_row_separator_callback, None)
self._contexts_filter.set_visible_func(
self._contexts_filter_visible_callback)
self._icons_filter.set_visible_func(
self._icons_filter_visible_callback)
self._icons_sorted.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self._reload()
def _read_icons(self):
theme = Gtk.IconTheme.get_default()
standard_contexts = set(name for name, title in STANDARD_CONTEXTS)
self._contexts_model.clear()
for name, title in STANDARD_CONTEXTS:
translated_title = title and C_('icon-dialog', title)
self._contexts_model.append(self.ContextsModelRow(name=name,
standard=True,
title=
translated_title)
)
for name in theme.list_contexts():
if name not in standard_contexts:
self._contexts_model.append(self.ContextsModelRow(name=name,
standard=
False,
title=name))
self._icons_model.clear()
for context in theme.list_contexts():
for icon in theme.list_icons(context):
row = self.IconsModelRow(name=icon,
standard=icon in STANDARD_ICON_NAMES,
context=context)
self._icons_model.append(row)
self._icons_loaded = True
if self._icon_to_select:
self.select_icon(self._icon_to_select)
self._icon_to_select = None
return False
def _reload(self):
GObject.idle_add(self._read_icons)
def _update_contexts_filter(self):
selected_iter = self._contexts_selection.get_selected()[1]
selected_path = self._contexts_filter.get_path(
selected_iter) if selected_iter else None
self._contexts_filter.refilter()
if selected_path and \
self._contexts_selection.path_is_selected(selected_path):
self._update_icons_filter()
def _update_icons_filter(self):
model, rowiter = self._contexts_selection.get_selected()
if rowiter:
self._icons_filter_args = self.IconsFilterArgs(
self._standard_toggle.props.active,
model[rowiter][self._CONTEXT_NAME])
else:
self._icons_filter_args = None
self._icons_view.props.model = None
self._icons_filter.refilter()
self._icons_view.props.model = self._icons_sorted
def _contexts_filter_visible_callback(self, model, rowiter, data):
if not self._standard_toggle.props.active:
return True
return model[rowiter][self._CONTEXT_STANDARD]
def _contexts_row_separator_callback(self, model, rowiter, data):
return not model[rowiter][self._CONTEXT_NAME] and \
not model[rowiter][self._CONTEXT_TITLE]
def _icons_filter_visible_callback(self, model, rowiter, data):
if not self._icons_filter_args:
return False
if self._icons_filter_args.standard and \
not model[rowiter][self._ICON_STANDARD]:
return False
if not self._icons_filter_args.context:
return True
return model[rowiter][self._ICON_CONTEXT] == \
self._icons_filter_args.context
def run(self):
return super(IconChooserDialog, self).run()
def get_iconname(self):
return self._name.props.text
def select_icon(self, name):
if not self._icons_loaded:
self._icon_to_select = name
return
if not self._icons_filter_args or \
self._icons_filter_args.context is not None:
if name not in STANDARD_ICON_NAMES:
self._standard_toggle.props.active = False
self._contexts_selection.select_path(0)
for row in self._icons_sorted:
if row[self._ICON_NAME] == name:
self._icons_view.set_cursor(row.path)
self._icons_selection.select_path(row.path)
break
else:
self._name.props.text = name
def on_icons_selection_changed(self, selection):
model, rowiter = self._icons_selection.get_selected()
if rowiter:
name = model[rowiter][self._ICON_NAME]
self._name.props.text = name
def on_contexts_selection_changed(self, selection):
self._icons_selection.unselect_all()
self._update_icons_filter()
def on_standard_toggled(self, toggle):
self._update_contexts_filter()
def on_name_changed(self, entry):
name = entry.props.text
if not Gtk.IconTheme.get_default().has_icon(name):
name = ''
self._preview.props.icon_name = name
#http://standards.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html
STANDARD_CONTEXTS =\
(
(None, NC_('icon-dialog', 'All contexts')),
(None, ''), # separator
('Actions', NC_('icon-dialog', 'Actions')),
('Applications', NC_('icon-dialog', 'Applications')),
('Categories', NC_('icon-dialog', 'Categories')),
('Devices', NC_('icon-dialog', 'Devices')),
('Emblems', NC_('icon-dialog', 'Emblems')),
('Emotes', NC_('icon-dialog', 'Emoticons')),
('International', NC_('icon-dialog', 'International')),
('MimeTypes', NC_('icon-dialog', 'MIME Types')),
('Places', NC_('icon-dialog', 'Places')),
('Status', NC_('icon-dialog', 'Status'))
)
STANDARD_ICON_NAMES = \
{
# Actions
'address-book-new', 'application-exit', 'appointment-new',
'call-start', 'call-stop', 'contact-new', 'document-new',
'document-open', 'document-open-recent', 'document-page-setup',
'document-print', 'document-print-preview', 'document-properties',
'document-revert', 'document-save', 'document-save-as',
'document-send', 'edit-clear', 'edit-copy', 'edit-cut', 'edit-delete',
'edit-find', 'edit-find-replace', 'edit-paste', 'edit-redo',
'edit-select-all', 'edit-undo', 'folder-new', 'format-indent-less',
'format-indent-more', 'format-justify-center', 'format-justify-fill',
'format-justify-left', 'format-justify-right',
'format-text-direction-ltr', 'format-text-direction-rtl',
'format-text-bold', 'format-text-italic', 'format-text-underline',
'format-text-strikethrough', 'go-bottom', 'go-down', 'go-first',
'go-home', 'go-jump', 'go-last', 'go-next', 'go-previous', 'go-top',
'go-up', 'help-about', 'help-contents', 'help-faq', 'insert-image',
'insert-link', 'insert-object', 'insert-text', 'list-add',
'list-remove', 'mail-forward', 'mail-mark-important', 'mail-mark-junk',
'mail-mark-notjunk', 'mail-mark-read', 'mail-mark-unread',
'mail-message-new', 'mail-reply-all', 'mail-reply-sender', 'mail-send',
'mail-send-receive', 'media-eject', 'media-playback-pause',
'media-playback-start', 'media-playback-stop', 'media-record',
'media-seek-backward', 'media-seek-forward', 'media-skip-backward',
'media-skip-forward', 'object-flip-horizontal', 'object-flip-vertical',
'object-rotate-left', 'object-rotate-right', 'process-stop',
'system-lock-screen', 'system-log-out', 'system-run', 'system-search',
'system-reboot', 'system-shutdown', 'tools-check-spelling',
'view-fullscreen', 'view-refresh', 'view-restore',
'view-sort-ascending', 'view-sort-descending', 'window-close',
'window-new', 'zoom-fit-best', 'zoom-in', 'zoom-original', 'zoom-out',
# StandardApplicationIcons
'accessories-calculator', 'accessories-character-map',
'accessories-dictionary', 'accessories-text-editor', 'help-browser',
'multimedia-volume-control', 'preferences-desktop-accessibility',
'preferences-desktop-font', 'preferences-desktop-keyboard',
'preferences-desktop-locale', 'preferences-desktop-multimedia',
'preferences-desktop-screensaver', 'preferences-desktop-theme',
'preferences-desktop-wallpaper', 'system-file-manager',
'system-software-install', 'system-software-update',
'utilities-system-monitor', 'utilities-terminal',
# StandardCategoryIcons
'applications-accessories', 'applications-development',
'applications-engineering', 'applications-games',
'applications-graphics', 'applications-internet',
'applications-multimedia', 'applications-office', 'applications-other',
'applications-science', 'applications-system',
'applications-utilities', 'preferences-desktop',
'preferences-desktop-peripherals', 'preferences-desktop-personal',
'preferences-other', 'preferences-system',
'preferences-system-network', 'system-help',
# StandardDeviceIcons
'audio-card', 'audio-input-microphone', 'battery', 'camera-photo',
'camera-video', 'camera-web', 'computer', 'drive-harddisk',
'drive-optical', 'drive-removable-media', 'input-gaming',
'input-keyboard', 'input-mouse', 'input-tablet', 'media-flash',
'media-floppy', 'media-optical', 'media-tape', 'modem',
'multimedia-player', 'network-wired', 'network-wireless', 'pda',
'phone', 'printer', 'scanner', 'video-display',
# StandardEmblemIcons
'emblem-default', 'emblem-documents', 'emblem-downloads',
'emblem-favorite', 'emblem-important', 'emblem-mail', 'emblem-photos',
'emblem-readonly', 'emblem-shared', 'emblem-symbolic-link',
'emblem-synchronized', 'emblem-system', 'emblem-unreadable',
# StandardEmotionIcons
'face-angel', 'face-angry', 'face-cool', 'face-crying',
'face-devilish', 'face-embarrassed', 'face-kiss', 'face-laugh',
'face-monkey', 'face-plain', 'face-raspberry', 'face-sad', 'face-sick',
'face-smile', 'face-smile-big', 'face-smirk', 'face-surprise',
'face-tired', 'face-uncertain', 'face-wink', 'face-worried',
# StandardInternationalIcons
'flag-aa',
# StandardMIMETypeIcons
'application-x-executable', 'audio-x-generic', 'font-x-generic',
'image-x-generic', 'package-x-generic', 'text-html', 'text-x-generic',
'text-x-generic-template', 'text-x-script', 'video-x-generic',
'x-office-address-book', 'x-office-calendar', 'x-office-document',
'x-office-presentation', 'x-office-spreadsheet',
# StandardPlaceIcons
'folder', 'folder-remote', 'network-server', 'network-workgroup',
'start-here', 'user-bookmarks', 'user-desktop', 'user-home',
'user-trash',
# StandardStatusIcons
'appointment-missed', 'appointment-soon', 'audio-volume-high',
'audio-volume-low', 'audio-volume-medium', 'audio-volume-muted',
'battery-caution', 'battery-low', 'dialog-error', 'dialog-information',
'dialog-password', 'dialog-question', 'dialog-warning',
'folder-drag-accept', 'folder-open', 'folder-visiting',
'image-loading', 'image-missing', 'mail-attachment', 'mail-unread',
'mail-read', 'mail-replied', 'mail-signed', 'mail-signed-verified',
'media-playlist-repeat', 'media-playlist-shuffle', 'network-error',
'network-idle', 'network-offline', 'network-receive',
'network-transmit', 'network-transmit-receive', 'printer-error',
'printer-printing', 'security-high', 'security-medium', 'security-low',
'software-update-available', 'software-update-urgent', 'sync-error',
'sync-synchronizing', 'task-due', 'task-past-due', 'user-available',
'user-away', 'user-idle', 'user-offline', 'user-trash-full',
'weather-clear', 'weather-clear-night', 'weather-few-clouds',
'weather-few-clouds-night', 'weather-fog', 'weather-overcast',
'weather-severe-alert', 'weather-showers', 'weather-showers-scattered',
'weather-snow', 'weather-storm'
}
| gpl-3.0 | 5,935,098,429,966,021,000 | 46.707547 | 89 | 0.601674 | false |
correl/Transmission-XBMC | resources/lib/basictypes/debug.py | 2 | 1615 | """Logging facilities for basictypes
If the logging package (from Python 2.3) is available,
we use it for our logging needs, otherwise we use a
simplistic locally-defined class for logging.
"""
import traceback, cStringIO
def getException(error):
"""Get formatted exception"""
exception = str(error)
file = cStringIO.StringIO()
try:
traceback.print_exc( limit=10, file = file )
exception = file.getvalue()
finally:
file.close()
return exception
try:
import logging
Log = logging.getLogger
logging.basicConfig()
WARN = logging.WARN
ERROR = logging.ERROR
INFO = logging.INFO
DEBUG = logging.DEBUG
logging.Logger.getException = staticmethod( getException )
logging.Logger.err = logging.Logger.error
except ImportError:
# does not have the logging package installed
import sys
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
class Log( object ):
"""Stand-in logging facility"""
level = WARN
def __init__( self, name ):
self.name = name
def debug(self, message, *arguments):
if self.level <= DEBUG:
sys.stderr.write( 'DEBUG:%s:%s\n'%(self.name, message%arguments))
def warn( self, message, *arguments ):
if self.level <= WARN:
sys.stderr.write( 'WARN:%s:%s\n'%(self.name, message%arguments))
def error( self, message, *arguments ):
if self.level <= ERROR:
sys.stderr.write( 'ERR :%s:%s\n'%(self.name, message%arguments))
def info( self, message, *arguments ):
if self.level <= INFO:
sys.stderr.write( 'INFO:%s:%s\n'%(self.name, message%arguments))
def setLevel( self, level ):
self.level = level
getException = staticmethod( getException )
| mit | -3,817,802,577,678,520,300 | 27.333333 | 69 | 0.695975 | false |
brianmckinneyrocks/django-social-auth | social_auth/context_processors.py | 2 | 2963 | from social_auth.backends import BACKENDS
from social_auth.utils import group_backend_by_type
from social_auth.models import User
# Note: social_auth_backends and social_auth_by_type_backends don't play nice
# together
def social_auth_backends(request):
"""Load Social Auth current user data to context.
Will add a output from backends_data to context under social_auth key.
"""
return {'social_auth': backends_data(request.user)}
def social_auth_by_type_backends(request):
"""Load Social Auth current user data to context.
Will add a output from backends_data to context under social_auth key where
each entry will be grouped by backend type (openid, oauth, oauth2).
"""
data = backends_data(request.user)
data['backends'] = group_backend_by_type(data['backends'])
data['not_associated'] = group_backend_by_type(data['not_associated'])
data['associated'] = group_backend_by_type(data['associated'],
key=lambda assoc: assoc.provider)
return {'social_auth': data}
def social_auth_by_name_backends(request):
"""Load Social Auth current user data to context.
Will add a social_auth object whose attribute names are the names of each
provider, e.g. social_auth.facebook would be the facebook association or
None, depending on the logged in user's current associations. Providers
with a hyphen have the hyphen replaced with an underscore, e.g.
google-oauth2 becomes google_oauth2 when referenced in templates.
"""
keys = BACKENDS.keys()
accounts = dict(zip(keys, [None] * len(keys)))
if isinstance(request.user, User) and request.user.is_authenticated():
for associated in request.user.social_auth.all():
accounts[associated.provider.replace('-', '_')] = associated
return {'social_auth': accounts}
def backends_data(user):
"""Return backends data for given user.
Will return a dict with values:
associated: UserSocialAuth model instances for currently
associated accounts
not_associated: Not associated (yet) backend names.
backends: All backend names.
If user is not authenticated, then first list is empty, and there's no
difference between the second and third lists.
"""
available = BACKENDS.keys()
values = {'associated': [],
'not_associated': available,
'backends': available}
# user comes from request.user usually, on /admin/ it will be an instance
# of auth.User and this code will fail if a custom User model was defined
if isinstance(user, User) and user.is_authenticated():
associated = user.social_auth.all()
not_associated = list(set(available) -
set(assoc.provider for assoc in associated))
values['associated'] = associated
values['not_associated'] = not_associated
return values
| bsd-3-clause | 4,949,616,726,671,713,000 | 40.152778 | 80 | 0.673979 | false |
dededong/goblin-core | llvm/3.4.2/llvm-3.4.2.src/test/CodeGen/SystemZ/Large/branch-range-06.py | 10 | 3534 | # Test 64-bit COMPARE IMMEDIATE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CGIJL instead...
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 56, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 101, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 102, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 103, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print 'define void @f1(i8 *%base, i8 *%stop) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%d = load volatile i8 *%%stop' % i
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%d = load volatile i8 *%%stop' % i
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| bsd-3-clause | -6,939,034,083,382,276,000 | 31.422018 | 77 | 0.575552 | false |
daherk2/authomatic | tests/functional_tests/expected_values/tumblr.py | 12 | 2365 | import re
import fixtures
import constants
from authomatic.providers import oauth1
conf = fixtures.get_configuration('tumblr')
CONFIG = {
'logout_url': 'https://www.tumblr.com/logout',
'login_xpath': '//*[@id="signup_email"]',
'password_xpath': '//*[@id="signup_password"]',
'consent_xpaths': [
'//*[@id="api_v1_oauth_authorize"]'
'/div[2]/div/div[1]/div/div/div[2]/form/button[2]',
],
'consent_wait_seconds': 3,
'class_': oauth1.Tumblr,
'user': {
'birth_date': None,
'city': None,
'country': None,
'email': None,
'gender': None,
'id': conf.user_id,
'first_name': None,
'last_name': None,
'link': None,
'locale': None,
'location': None,
'name': conf.user_name,
'nickname': None,
'phone': None,
'picture': None,
'postal_code': None,
'timezone': None,
'username': conf.user_username,
},
'content_should_contain': [
conf.user_id,
conf.user_name,
conf.user_username,
# User info JSON keys
'admin', 'ask', 'ask_anon', 'ask_page_title', 'blogs',
'can_send_fan_mail', 'default_post_format', 'description', 'drafts',
'facebook', 'facebook_opengraph_enabled', 'followed', 'followers',
'following', 'is_nsfw', 'likes', 'messages', 'meta', 'msg', 'name',
'posts', 'primary', 'queue', 'response', 'share_likes', 'status',
'title', 'tweet', 'twitter_enabled', 'twitter_send', 'type', 'updated',
'url', 'user'
],
# Case insensitive
'content_should_not_contain':
conf.no_birth_date +
conf.no_gender +
conf.no_nickname +
conf.no_phone +
conf.no_postal_code +
conf.no_timezone,
# True means that any thruthy value is expected
'credentials': {
'_expiration_time': None,
'_expire_in': True,
'consumer_key': True,
'consumer_secret': True,
'provider_id': None,
'provider_name': 'tumblr',
'provider_type': 'authomatic.providers.oauth1.OAuth1',
'provider_type_id': '1-6',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
'refresh_token': None,
'token': True,
'token_secret': True,
'token_type': None,
},
} | mit | -5,752,121,557,341,079,000 | 29.333333 | 79 | 0.542072 | false |
foxscotch/advent-of-code | 2015/07/p1.py | 1 | 3009 | # god, this is so overkill
class Board:
def __init__(self, file):
self.instructions = []
self.wires = WireManager()
for line in file:
tokens = [int(i) if i.isdigit() else i.strip() for i in line.split(' ')]
self.instructions.append(Instruction(self, tokens))
for instr in self.instructions:
for op in instr.operands:
if type(op) is str:
self.wires.add_wire(op)
self.wires.add_wire(instr.dest)
def run(self):
skipped = False
for instr in self.instructions:
if instr.is_runnable():
instr.run()
else:
skipped = True
if skipped:
self.run()
class WireManager:
def __init__(self):
self._wires = {}
def add_wire(self, wire):
wire = Wire(wire)
self._wires[wire.name] = wire
wire.manager = self
def get_wire(self, wire):
return self._wires[wire].value
def set_wire(self, wire, value):
self._wires[wire].value = value
def check_runnable(self, *wires):
for wire in wires:
if type(wire) is not int and self.get_wire(wire) is None:
return False
return True
class Wire:
def __init__(self, name):
self.name = name
self.value = None
self.manager = None
def __str__(self):
return '{}={}'.format(self.name, self.value)
class Instruction:
def __init__(self, board, tokens):
if len(tokens) == 4:
self.operator = 'NOT'
self.operands = [tokens[1]]
elif len(tokens) == 5:
self.operator = tokens[1]
self.operands = [tokens[0], tokens[2]]
else:
self.operator = 'ASSIGN'
self.operands = [tokens[0]]
self.dest = tokens[-1]
self.board = board
def is_runnable(self):
return self.board.wires.check_runnable(*self.operands)
def run(self):
operands = []
for op in self.operands:
if type(op) != int:
operands.append(self.board.wires.get_wire(op))
else:
operands.append(op)
if self.operator == 'NOT':
self.board.wires.set_wire(self.dest, ~operands[0])
elif self.operator == 'LSHIFT':
self.board.wires.set_wire(self.dest, operands[0] << operands[1])
elif self.operator == 'RSHIFT':
self.board.wires.set_wire(self.dest, operands[0] >> operands[1])
elif self.operator == 'AND':
self.board.wires.set_wire(self.dest, operands[0] & operands[1])
elif self.operator == 'OR':
self.board.wires.set_wire(self.dest, operands[0] | operands[1])
else:
# ASSIGN
self.board.wires.set_wire(self.dest, operands[0])
with open('input.txt', 'r') as file:
board = Board(file)
board.run()
print(board.wires.get_wire('a'))
| mit | -3,724,423,761,786,651,000 | 26.605505 | 84 | 0.532403 | false |
anisku11/sublimeku | Packages/pygments/all/pygments/formatters/img.py | 25 | 18002 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt, xrange
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mit | -1,553,359,144,998,655,000 | 31.146429 | 84 | 0.546828 | false |
stickypants/asset_manager | pygal/graph/dot.py | 1 | 4294 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Dot chart displaying values as a grid of dots, the bigger the value
the bigger the dot
"""
from __future__ import division
from math import log10
from pygal._compat import to_str
from pygal.graph.graph import Graph
from pygal.util import alter, cached_property, decorate, safe_enumerate
from pygal.view import ReverseView, View
class Dot(Graph):
"""Dot graph class"""
def dot(self, serie, r_max):
"""Draw a dot line"""
serie_node = self.svg.serie(serie)
view_values = list(map(self.view, serie.points))
for i, value in safe_enumerate(serie.values):
x, y = view_values[i]
if self.logarithmic:
log10min = log10(self._min) - 1
log10max = log10(self._max or 1)
if value != 0:
size = r_max * (
(log10(abs(value)) - log10min) /
(log10max - log10min)
)
else:
size = 0
else:
size = r_max * (abs(value) / (self._max or 1))
metadata = serie.metadata.get(i)
dots = decorate(
self.svg,
self.svg.node(serie_node['plot'], class_="dots"),
metadata)
alter(self.svg.node(
dots, 'circle',
cx=x, cy=y, r=size,
class_='dot reactive tooltip-trigger' + (
' negative' if value < 0 else '')), metadata)
val = self._format(serie, i)
self._tooltip_data(
dots, val, x, y, 'centered',
self._get_x_label(i))
self._static_value(serie_node, val, x, y, metadata)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
x_len = self._len
y_len = self._order
self._box.xmax = x_len
self._box.ymax = y_len
self._x_pos = [n / 2 for n in range(1, 2 * x_len, 2)]
self._y_pos = [n / 2 for n in reversed(range(1, 2 * y_len, 2))]
for j, serie in enumerate(self.series):
serie.points = [
(self._x_pos[i], self._y_pos[j])
for i in range(x_len)]
def _compute_y_labels(self):
self._y_labels = list(zip(
self.y_labels and map(to_str, self.y_labels) or [
serie.title['title']
if isinstance(serie.title, dict)
else serie.title or '' for serie in self.series],
self._y_pos))
def _set_view(self):
"""Assign a view to current graph"""
view_class = ReverseView if self.inverse_y_axis else View
self.view = view_class(
self.width - self.margin_box.x,
self.height - self.margin_box.y,
self._box)
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
return [abs(val) for val in super(Dot, self)._values if val != 0]
@cached_property
def _max(self):
"""Getter for the maximum series value"""
return (self.range[1] if (self.range and self.range[1] is not None)
else (max(map(abs, self._values)) if self._values else None))
def _plot(self):
"""Plot all dots for series"""
r_max = min(
self.view.x(1) - self.view.x(0),
(self.view.y(0) or 0) - self.view.y(1)) / (
2 * 1.05)
for serie in self.series:
self.dot(serie, r_max)
| gpl-3.0 | 8,052,539,895,964,645,000 | 32.80315 | 79 | 0.552294 | false |
blueburningcoder/nupic | src/nupic/regions/CLAClassifierRegion.py | 3 | 16974 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file implements the CLA Classifier region. See the comments in the class
definition of CLAClassifierRegion for a description.
"""
import warnings
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.bindings.algorithms import FastCLAClassifier
from nupic.algorithms.cla_classifier_factory import CLAClassifierFactory
from nupic.support.configuration import Configuration
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.CLAClassifierRegion_capnp import CLAClassifierRegionProto
class CLAClassifierRegion(PyRegion):
"""
CLAClassifierRegion implements a CLA specific classifier that accepts a binary
input from the level below (the "activationPattern") and information from the
sensor and encoders (the "classification") describing the input to the system
at that time step.
When learning, for every bit in activation pattern, it records a history of
the classification each time that bit was active. The history is bounded by a
maximum allowed age so that old entries are thrown away.
For inference, it takes an ensemble approach. For every active bit in the
activationPattern, it looks up the most likely classification(s) from the
history stored for that bit and then votes across these to get the resulting
classification(s).
The caller can choose to tell the region that the classifications for
iteration N+K should be aligned with the activationPattern for iteration N.
This results in the classifier producing predictions for K steps in advance.
Any number of different K's can be specified, allowing the classifier to learn
and infer multi-step predictions for a number of steps in advance.
"""
@classmethod
def getSpec(cls):
ns = dict(
description=CLAClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
categoryIn=dict(
description='Vector of categories of the input sample',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
predictedActiveCells=dict(
description="The cells that are active and predicted",
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
categoriesOut=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
actualValues=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
probabilities=dict(
description='Classification results',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False,
requireSplitterMap=False),
),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
maxCategoryCount=dict(
description='The maximal number of categories the '
'classifier will distinguish between.',
dataType='UInt32',
required=True,
count=1,
constraints='',
# arbitrarily large value for backward compatibility
defaultValue=1000,
accessMode='Create'),
steps=dict(
description='Comma separated list of the desired steps of '
'prediction that the classifier should learn',
dataType="Byte",
count=0,
constraints='',
defaultValue='0',
accessMode='Create'),
alpha=dict(
description='The alpha used to compute running averages of the '
'bucket duty cycles for each activation pattern bit. A '
'lower alpha results in longer term memory',
dataType="Real32",
count=1,
constraints='',
defaultValue=0.001,
accessMode='Create'),
implementation=dict(
description='The classifier implementation to use.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
verbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='ReadWrite'),
),
commands=dict()
)
return ns
def __init__(self,
steps='1',
alpha=0.001,
verbosity=0,
implementation=None,
maxCategoryCount=None
):
# Set default implementation
if implementation is None:
implementation = Configuration.get('nupic.opf.claClassifier.implementation')
# Convert the steps designation to a list
self.classifierImp = implementation
self.steps = steps
self.stepsList = eval("[%s]" % (steps))
self.alpha = alpha
self.verbosity = verbosity
# Initialize internal structures
self._claClassifier = CLAClassifierFactory.create(
steps=self.stepsList,
alpha=self.alpha,
verbosity=self.verbosity,
implementation=implementation,
)
self.learningMode = True
self.inferenceMode = False
self.maxCategoryCount = maxCategoryCount
self.recordNum = 0
self._initEphemerals()
# Flag to know if the compute() function is ever called. This is to
# prevent backward compatibilities issues with the customCompute() method
# being called at the same time as the compute() method. Only compute()
# should be called via network.run(). This flag will be removed once we
# get to cleaning up the clamodel.py file.
self._computeFlag = False
def _initEphemerals(self):
pass
def initialize(self, dims, splitterMaps):
pass
def clear(self):
self._claClassifier.clear()
def getAlgorithmInstance(self):
"""Returns instance of the underlying CLAClassifier algorithm object."""
return self._claClassifier
def getParameter(self, name, index=-1):
"""
Get the value of the parameter.
@param name -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Set the value of the parameter.
@param name -- the name of the parameter to update, as defined
by the Node Spec.
@param value -- the value to which the parameter is to be set.
"""
if name == "learningMode":
self.learningMode = bool(int(value))
elif name == "inferenceMode":
self.inferenceMode = bool(int(value))
else:
return PyRegion.setParameter(self, name, index, value)
@staticmethod
def getProtoType():
"""Return the pycapnp proto type that the class uses for serialization."""
return CLAClassifierRegionProto
def writeToProto(self, proto):
"""Write state to proto object.
proto: CLAClassifierRegionProto capnproto object
"""
proto.classifierImp = self.classifierImp
proto.steps = self.steps
proto.alpha = self.alpha
proto.verbosity = self.verbosity
proto.maxCategoryCount = self.maxCategoryCount
self._claClassifier.write(proto.claClassifier)
@classmethod
def readFromProto(cls, proto):
"""Read state from proto object.
proto: CLAClassifierRegionProto capnproto object
"""
instance = cls()
instance.classifierImp = proto.classifierImp
instance.steps = proto.steps
instance.alpha = proto.alpha
instance.verbosity = proto.verbosity
instance.maxCategoryCount = proto.maxCategoryCount
instance._claClassifier = CLAClassifierFactory.read(proto)
return instance
def reset(self):
pass
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
@param inputs -- inputs of the classifier region
@param outputs -- outputs of the classifier region
"""
# This flag helps to prevent double-computation, in case the deprecated
# customCompute() method is being called in addition to compute() called
# when network.run() is called
self._computeFlag = True
# An input can potentially belong to multiple categories.
# If a category value is < 0, it means that the input does not belong to
# that category.
categories = [category for category in inputs["categoryIn"]
if category >= 0]
activeCells = inputs["bottomUpIn"]
patternNZ = activeCells.nonzero()[0]
# ==========================================================================
# Allow to train on multiple input categories.
# Do inference first, and then train on all input categories.
# --------------------------------------------------------------------------
# 1. Call classifier. Don't train. Just inference. Train after.
# Dummy classification input, because this param is required. Learning is
# off, so the classifier is not learning this input. Inference only here.
classificationIn = {"actValue": 0, "bucketIdx": 0}
clResults = self._claClassifier.compute(recordNum=self.recordNum,
patternNZ=patternNZ,
classification=classificationIn,
learn=False,
infer=self.inferenceMode)
for category in categories:
classificationIn = {"bucketIdx": int(category), "actValue": int(category)}
# ------------------------------------------------------------------------
# 2. Train classifier, no inference
self._claClassifier.compute(recordNum=self.recordNum,
patternNZ=patternNZ,
classification=classificationIn,
learn=self.learningMode,
infer=False)
actualValues = clResults["actualValues"]
outputs['actualValues'][:len(actualValues)] = actualValues
for step in self.stepsList:
stepIndex = self.stepsList.index(step)
categoryOut = actualValues[clResults[step].argmax()]
outputs['categoriesOut'][stepIndex] = categoryOut
# Flatten the rest of the output. For example:
# Original dict {1 : [0.1, 0.3, 0.2, 0.7]
# 4 : [0.2, 0.4, 0.3, 0.5]}
# becomes: [0.1, 0.3, 0.2, 0.7, 0.2, 0.4, 0.3, 0.5]
stepProbabilities = clResults[step]
for categoryIndex in xrange(self.maxCategoryCount):
flatIndex = categoryIndex + stepIndex * self.maxCategoryCount
if categoryIndex < len(stepProbabilities):
outputs['probabilities'][flatIndex] = stepProbabilities[categoryIndex]
else:
outputs['probabilities'][flatIndex] = 0.0
self.recordNum += 1
def customCompute(self, recordNum, patternNZ, classification):
"""
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
WARNING: The method customCompute() is here to maintain backward
compatibility. This method is deprecated, and will be removed.
Use network.run() instead, which will call the compute() method.
Parameters:
--------------------------------------------------------------------
recordNum: Record number of the input sample.
patternNZ: List of the active indices from the output below
classification: Dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
retval: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# If the compute flag has not been initialized (for example if we
# restored a model from an old checkpoint) initialize it to False.
if not hasattr(self, "_computeFlag"):
self._computeFlag = False
if self._computeFlag:
# Will raise an exception if the deprecated method customCompute() is
# being used at the same time as the compute function.
warnings.simplefilter('error', DeprecationWarning)
warnings.warn("The customCompute() method should not be "
"called at the same time as the compute() "
"method. The compute() method is called "
"whenever network.run() is called.",
DeprecationWarning)
return self._claClassifier.compute(recordNum,
patternNZ,
classification,
self.learningMode,
self.inferenceMode)
def getOutputValues(self, outputName):
"""
Return the dictionary of output values. Note that these are normal Python
lists, rather than numpy arrays. This is to support lists with mixed scalars
and strings, as in the case of records with categorical variables
"""
return self._outputValues[outputName]
def getOutputElementCount(self, outputName):
"""Returns the width of dataOut."""
# Check if classifier has a 'maxCategoryCount' attribute
if not hasattr(self, "maxCategoryCount"):
# Large default value for backward compatibility
self.maxCategoryCount = 1000
if outputName == "categoriesOut":
return len(self.stepsList)
elif outputName == "probabilities":
return len(self.stepsList) * self.maxCategoryCount
elif outputName == "actualValues":
return self.maxCategoryCount
else:
raise ValueError("Unknown output {}.".format(outputName))
if __name__ == "__main__":
from nupic.engine import Network
n = Network()
classifier = n.addRegion(
'classifier',
'py.CLAClassifierRegion',
'{ steps: "1,2", maxAge: 1000}'
)
| agpl-3.0 | -7,845,002,573,289,658,000 | 33.016032 | 82 | 0.619006 | false |
tachang/python-saml | tests/src/OneLogin/saml2_tests/authn_request_test.py | 3 | 10526 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, OneLogin, Inc.
# All rights reserved.
from base64 import b64decode
import json
from os.path import dirname, join, exists
import unittest
from urlparse import urlparse, parse_qs
from zlib import decompress
from onelogin.saml2.authn_request import OneLogin_Saml2_Authn_Request
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.utils import OneLogin_Saml2_Utils
class OneLogin_Saml2_Authn_Request_Test(unittest.TestCase):
def loadSettingsJSON(self):
filename = join(dirname(__file__), '..', '..', '..', 'settings', 'settings1.json')
if exists(filename):
stream = open(filename, 'r')
settings = json.load(stream)
stream.close()
return settings
else:
raise Exception('Settings json file does not exist')
def setUp(self):
self.__settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
def testCreateRequest(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request
"""
saml_settings = self.loadSettingsJSON()
settings = OneLogin_Saml2_Settings(saml_settings)
settings._OneLogin_Saml2_Settings__organization = {
u'en-US': {
u'url': u'http://sp.example.com',
u'name': u'sp_test'
}
}
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertNotIn('ProviderName="SP test"', inflated)
saml_settings['organization'] = {}
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertNotIn('ProviderName="SP test"', inflated)
def testCreateRequestAuthContext(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request with defined AuthContext
"""
saml_settings = self.loadSettingsJSON()
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertIn(OneLogin_Saml2_Constants.AC_PASSWORD, inflated)
self.assertNotIn(OneLogin_Saml2_Constants.AC_X509, inflated)
saml_settings['security']['requestedAuthnContext'] = True
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertIn(OneLogin_Saml2_Constants.AC_PASSWORD_PROTECTED, inflated)
self.assertNotIn(OneLogin_Saml2_Constants.AC_X509, inflated)
del saml_settings['security']['requestedAuthnContext']
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertIn(OneLogin_Saml2_Constants.AC_PASSWORD_PROTECTED, inflated)
self.assertNotIn(OneLogin_Saml2_Constants.AC_X509, inflated)
saml_settings['security']['requestedAuthnContext'] = False
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertNotIn(OneLogin_Saml2_Constants.AC_PASSWORD_PROTECTED, inflated)
self.assertNotIn(OneLogin_Saml2_Constants.AC_X509, inflated)
saml_settings['security']['requestedAuthnContext'] = (OneLogin_Saml2_Constants.AC_PASSWORD_PROTECTED, OneLogin_Saml2_Constants.AC_X509)
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertIn(OneLogin_Saml2_Constants.AC_PASSWORD_PROTECTED, inflated)
self.assertIn(OneLogin_Saml2_Constants.AC_X509, inflated)
def testCreateRequestForceAuthN(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request with ForceAuthn="true"
"""
saml_settings = self.loadSettingsJSON()
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertNotIn('ForceAuthn="true"', inflated)
authn_request_2 = OneLogin_Saml2_Authn_Request(settings, False, False)
authn_request_encoded_2 = authn_request_2.get_request()
decoded_2 = b64decode(authn_request_encoded_2)
inflated_2 = decompress(decoded_2, -15)
self.assertRegexpMatches(inflated_2, '^<samlp:AuthnRequest')
self.assertNotIn('ForceAuthn="true"', inflated_2)
authn_request_3 = OneLogin_Saml2_Authn_Request(settings, True, False)
authn_request_encoded_3 = authn_request_3.get_request()
decoded_3 = b64decode(authn_request_encoded_3)
inflated_3 = decompress(decoded_3, -15)
self.assertRegexpMatches(inflated_3, '^<samlp:AuthnRequest')
self.assertIn('ForceAuthn="true"', inflated_3)
def testCreateRequestIsPassive(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request with IsPassive="true"
"""
saml_settings = self.loadSettingsJSON()
settings = OneLogin_Saml2_Settings(saml_settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
authn_request_encoded = authn_request.get_request()
decoded = b64decode(authn_request_encoded)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertNotIn('IsPassive="true"', inflated)
authn_request_2 = OneLogin_Saml2_Authn_Request(settings, False, False)
authn_request_encoded_2 = authn_request_2.get_request()
decoded_2 = b64decode(authn_request_encoded_2)
inflated_2 = decompress(decoded_2, -15)
self.assertRegexpMatches(inflated_2, '^<samlp:AuthnRequest')
self.assertNotIn('IsPassive="true"', inflated_2)
authn_request_3 = OneLogin_Saml2_Authn_Request(settings, False, True)
authn_request_encoded_3 = authn_request_3.get_request()
decoded_3 = b64decode(authn_request_encoded_3)
inflated_3 = decompress(decoded_3, -15)
self.assertRegexpMatches(inflated_3, '^<samlp:AuthnRequest')
self.assertIn('IsPassive="true"', inflated_3)
def testCreateDeflatedSAMLRequestURLParameter(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request
"""
authn_request = OneLogin_Saml2_Authn_Request(self.__settings)
parameters = {
'SAMLRequest': authn_request.get_request()
}
auth_url = OneLogin_Saml2_Utils.redirect('http://idp.example.com/SSOService.php', parameters, True)
self.assertRegexpMatches(auth_url, '^http://idp\.example\.com\/SSOService\.php\?SAMLRequest=')
exploded = urlparse(auth_url)
exploded = parse_qs(exploded[4])
payload = exploded['SAMLRequest'][0]
decoded = b64decode(payload)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
def testCreateEncSAMLRequest(self):
"""
Tests the OneLogin_Saml2_Authn_Request Constructor.
The creation of a deflated SAML Request
"""
settings = self.loadSettingsJSON()
settings['organization'] = {
'es': {
'name': 'sp_prueba',
'displayname': 'SP prueba',
'url': 'http://sp.example.com'
}
}
settings['security']['wantNameIdEncrypted'] = True
settings = OneLogin_Saml2_Settings(settings)
authn_request = OneLogin_Saml2_Authn_Request(settings)
parameters = {
'SAMLRequest': authn_request.get_request()
}
auth_url = OneLogin_Saml2_Utils.redirect('http://idp.example.com/SSOService.php', parameters, True)
self.assertRegexpMatches(auth_url, '^http://idp\.example\.com\/SSOService\.php\?SAMLRequest=')
exploded = urlparse(auth_url)
exploded = parse_qs(exploded[4])
payload = exploded['SAMLRequest'][0]
decoded = b64decode(payload)
inflated = decompress(decoded, -15)
self.assertRegexpMatches(inflated, '^<samlp:AuthnRequest')
self.assertRegexpMatches(inflated, 'AssertionConsumerServiceURL="http://stuff.com/endpoints/endpoints/acs.php">')
self.assertRegexpMatches(inflated, '<saml:Issuer>http://stuff.com/endpoints/metadata.php</saml:Issuer>')
self.assertRegexpMatches(inflated, 'Format="urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted"')
self.assertRegexpMatches(inflated, 'ProviderName="SP prueba"')
| bsd-3-clause | -3,479,289,380,598,456,000 | 45.575221 | 143 | 0.670055 | false |
marmyshev/transitions | openlp/plugins/songs/lib/xml.py | 1 | 34187 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`xml` module provides the XML functionality.
The basic XML for storing the lyrics in the song database looks like this::
<?xml version="1.0" encoding="UTF-8"?>
<song version="1.0">
<lyrics>
<verse type="c" label="1" lang="en">
<![CDATA[Chorus optional split 1[---]Chorus optional split 2]]>
</verse>
</lyrics>
</song>
The XML of an `OpenLyrics <http://openlyrics.info/>`_ song looks like this::
<song xmlns="http://openlyrics.info/namespace/2009/song"
version="0.7"
createdIn="OpenLP 1.9.0"
modifiedIn="ChangingSong 0.0.1"
modifiedDate="2010-01-28T13:15:30+01:00">
<properties>
<titles>
<title>Amazing Grace</title>
</titles>
</properties>
<lyrics>
<verse name="v1">
<lines>
<line>Amazing grace how sweet the sound</line>
</lines>
</verse>
</lyrics>
</song>
"""
import cgi
import logging
import re
from lxml import etree, objectify
from openlp.core.lib import FormattingTags, translate
from openlp.plugins.songs.lib import VerseType, clean_song
from openlp.plugins.songs.lib.db import Author, Book, Song, Topic
from openlp.core.utils import get_application_version
log = logging.getLogger(__name__)
NAMESPACE = u'http://openlyrics.info/namespace/2009/song'
NSMAP = '{' + NAMESPACE + '}' + '%s'
class SongXML(object):
"""
This class builds and parses the XML used to describe songs.
"""
log.info(u'SongXML Loaded')
def __init__(self):
"""
Set up the default variables.
"""
self.song_xml = objectify.fromstring(u'<song version="1.0" />')
self.lyrics = etree.SubElement(self.song_xml, u'lyrics')
def add_verse_to_lyrics(self, type, number, content, lang=None):
"""
Add a verse to the ``<lyrics>`` tag.
``type``
A string denoting the type of verse. Possible values are *v*,
*c*, *b*, *p*, *i*, *e* and *o*.
Any other type is **not** allowed, this also includes translated
types.
``number``
An integer denoting the number of the item, for example: verse 1.
``content``
The actual text of the verse to be stored.
``lang``
The verse's language code (ISO-639). This is not required, but
should be added if available.
"""
verse = etree.Element(u'verse', type=unicode(type),
label=unicode(number))
if lang:
verse.set(u'lang', lang)
verse.text = etree.CDATA(content)
self.lyrics.append(verse)
def extract_xml(self):
"""
Extract our newly created XML song.
"""
return etree.tostring(self.song_xml, encoding=u'UTF-8',
xml_declaration=True)
def get_verses(self, xml):
"""
Iterates through the verses in the XML and returns a list of verses
and their attributes.
``xml``
The XML of the song to be parsed.
The returned list has the following format::
[[{'type': 'v', 'label': '1'},
u"optional slide split 1[---]optional slide split 2"],
[{'lang': 'en', 'type': 'c', 'label': '1'}, u"English chorus"]]
"""
self.song_xml = None
verse_list = []
if not xml.startswith(u'<?xml') and not xml.startswith(u'<song'):
# This is an old style song, without XML. Let's handle it correctly
# by iterating through the verses, and then recreating the internal
# xml object as well.
self.song_xml = objectify.fromstring(u'<song version="1.0" />')
self.lyrics = etree.SubElement(self.song_xml, u'lyrics')
verses = xml.split(u'\n\n')
for count, verse in enumerate(verses):
verse_list.append([{u'type': u'v', u'label': unicode(count)}, unicode(verse)])
self.add_verse_to_lyrics(u'v', unicode(count), verse)
return verse_list
elif xml.startswith(u'<?xml'):
xml = xml[38:]
try:
self.song_xml = objectify.fromstring(xml)
except etree.XMLSyntaxError:
log.exception(u'Invalid xml %s', xml)
xml_iter = self.song_xml.getiterator()
for element in xml_iter:
if element.tag == u'verse':
if element.text is None:
element.text = u''
verse_list.append([element.attrib, unicode(element.text)])
return verse_list
def dump_xml(self):
"""
Debugging aid to dump XML so that we can see what we have.
"""
return etree.dump(self.song_xml)
class OpenLyrics(object):
"""
This class represents the converter for OpenLyrics XML (version 0.8)
to/from a song.
As OpenLyrics has a rich set of different features, we cannot support them
all. The following features are supported by the :class:`OpenLyrics` class:
``<authors>``
OpenLP does not support the attribute *type* and *lang*.
``<chord>``
This property is not supported.
``<comments>``
The ``<comments>`` property is fully supported. But comments in lyrics
are not supported.
``<copyright>``
This property is fully supported.
``<customVersion>``
This property is not supported.
``<key>``
This property is not supported.
``<format>``
The custom formatting tags are fully supported.
``<keywords>``
This property is not supported.
``<lines>``
The attribute *part* is not supported. The *break* attribute is
supported.
``<publisher>``
This property is not supported.
``<songbooks>``
As OpenLP does only support one songbook, we cannot consider more than
one songbook.
``<tempo>``
This property is not supported.
``<themes>``
Topics, as they are called in OpenLP, are fully supported, whereby only
the topic text (e. g. Grace) is considered, but neither the *id* nor
*lang*.
``<transposition>``
This property is not supported.
``<variant>``
This property is not supported.
``<verse name="v1a" lang="he" translit="en">``
The attribute *translit* is not supported. Note, the attribute *lang* is
considered, but there is not further functionality implemented yet. The
following verse "types" are supported by OpenLP:
* v
* c
* b
* p
* i
* e
* o
The verse "types" stand for *Verse*, *Chorus*, *Bridge*, *Pre-Chorus*,
*Intro*, *Ending* and *Other*. Any numeric value is allowed after the
verse type. The complete verse name in OpenLP always consists of the
verse type and the verse number. If not number is present *1* is
assumed.
OpenLP will merge verses which are split up by appending a letter to the
verse name, such as *v1a*.
``<verseOrder>``
OpenLP supports this property.
"""
IMPLEMENTED_VERSION = u'0.8'
START_TAGS_REGEX = re.compile(r'\{(\w+)\}')
END_TAGS_REGEX = re.compile(r'\{\/(\w+)\}')
VERSE_TAG_SPLITTER = re.compile(u'([a-zA-Z]+)([0-9]*)([a-zA-Z]?)')
def __init__(self, manager):
self.manager = manager
def song_to_xml(self, song):
"""
Convert the song to OpenLyrics Format.
"""
sxml = SongXML()
song_xml = objectify.fromstring(u'<song/>')
# Append the necessary meta data to the song.
song_xml.set(u'xmlns', NAMESPACE)
song_xml.set(u'version', OpenLyrics.IMPLEMENTED_VERSION)
application_name = u'OpenLP ' + get_application_version()[u'version']
song_xml.set(u'createdIn', application_name)
song_xml.set(u'modifiedIn', application_name)
# "Convert" 2012-08-27 11:49:15 to 2012-08-27T11:49:15.
song_xml.set(u'modifiedDate', unicode(song.last_modified).replace(u' ', u'T'))
properties = etree.SubElement(song_xml, u'properties')
titles = etree.SubElement(properties, u'titles')
self._add_text_to_element(u'title', titles, song.title)
if song.alternate_title:
self._add_text_to_element(u'title', titles, song.alternate_title)
if song.comments:
comments = etree.SubElement(properties, u'comments')
self._add_text_to_element(u'comment', comments, song.comments)
if song.copyright:
self._add_text_to_element(u'copyright', properties, song.copyright)
if song.verse_order:
self._add_text_to_element(
u'verseOrder', properties, song.verse_order.lower())
if song.ccli_number:
self._add_text_to_element(u'ccliNo', properties, song.ccli_number)
if song.authors:
authors = etree.SubElement(properties, u'authors')
for author in song.authors:
self._add_text_to_element(u'author', authors, author.display_name)
book = self.manager.get_object_filtered(Book, Book.id == song.song_book_id)
if book is not None:
book = book.name
songbooks = etree.SubElement(properties, u'songbooks')
element = self._add_text_to_element(u'songbook', songbooks, None, book)
if song.song_number:
element.set(u'entry', song.song_number)
if song.topics:
themes = etree.SubElement(properties, u'themes')
for topic in song.topics:
self._add_text_to_element(u'theme', themes, topic.name)
# Process the formatting tags.
# Have we any tags in song lyrics?
tags_element = None
match = re.search(u'\{/?\w+\}', song.lyrics, re.UNICODE)
if match:
# Named 'format_' - 'format' is built-in fuction in Python.
format_ = etree.SubElement(song_xml, u'format')
tags_element = etree.SubElement(format_, u'tags')
tags_element.set(u'application', u'OpenLP')
# Process the song's lyrics.
lyrics = etree.SubElement(song_xml, u'lyrics')
verse_list = sxml.get_verses(song.lyrics)
# Add a suffix letter to each verse
verse_tags = []
for verse in verse_list:
verse_tag = verse[0][u'type'][0].lower()
verse_number = verse[0][u'label']
verse_def = verse_tag + verse_number
verse_tags.append(verse_def)
# Create the letter from the number of duplicates
verse[0][u'suffix'] = chr(96 + verse_tags.count(verse_def))
# If the verse tag is a duplicate use the suffix letter
for verse in verse_list:
verse_tag = verse[0][u'type'][0].lower()
verse_number = verse[0][u'label']
verse_def = verse_tag + verse_number
if verse_tags.count(verse_def) > 1:
verse_def += verse[0][u'suffix']
verse_element = self._add_text_to_element(u'verse', lyrics, None, verse_def)
if u'lang' in verse[0]:
verse_element.set(u'lang', verse[0][u'lang'])
# Create a list with all "optional" verses.
optional_verses = cgi.escape(verse[1])
optional_verses = optional_verses.split(u'\n[---]\n')
start_tags = u''
end_tags = u''
for index, optional_verse in enumerate(optional_verses):
# Fix up missing end and start tags such as {r} or {/r}.
optional_verse = start_tags + optional_verse
start_tags, end_tags = self._get_missing_tags(optional_verse)
optional_verse += end_tags
# Add formatting tags to text
lines_element = self._add_text_with_tags_to_lines(verse_element, optional_verse, tags_element)
# Do not add the break attribute to the last lines element.
if index < len(optional_verses) - 1:
lines_element.set(u'break', u'optional')
return self._extract_xml(song_xml)
def _get_missing_tags(self, text):
"""
Tests the given text for not closed formatting tags and returns a tuple
consisting of two unicode strings::
(u'{st}{r}', u'{/r}{/st}')
The first unicode string are the start tags (for the next slide). The
second unicode string are the end tags.
``text``
The text to test. The text must **not** contain html tags, only
OpenLP formatting tags are allowed::
{st}{r}Text text text
"""
tags = []
for tag in FormattingTags.get_html_tags():
if tag[u'start tag'] == u'{br}':
continue
if text.count(tag[u'start tag']) != text.count(tag[u'end tag']):
tags.append((text.find(tag[u'start tag']), tag[u'start tag'], tag[u'end tag']))
# Sort the lists, so that the tags which were opened first on the first
# slide (the text we are checking) will be opened first on the next
# slide as well.
tags.sort(key=lambda tag: tag[0])
end_tags = []
start_tags = []
for tag in tags:
start_tags.append(tag[1])
end_tags.append(tag[2])
end_tags.reverse()
return u''.join(start_tags), u''.join(end_tags)
def xml_to_song(self, xml, parse_and_temporary_save=False):
"""
Create and save a song from OpenLyrics format xml to the database. Since
we also export XML from external sources (e. g. OpenLyrics import), we
cannot ensure, that it completely conforms to the OpenLyrics standard.
``xml``
The XML to parse (unicode).
``parse_and_temporary_save``
Switch to skip processing the whole song and storing the songs in
the database with a temporary flag. Defaults to ``False``.
"""
# No xml get out of here.
if not xml:
return None
if xml[:5] == u'<?xml':
xml = xml[38:]
song_xml = objectify.fromstring(xml)
if hasattr(song_xml, u'properties'):
properties = song_xml.properties
else:
return None
# Formatting tags are new in OpenLyrics 0.8
if float(song_xml.get(u'version')) > 0.7:
self._process_formatting_tags(song_xml, parse_and_temporary_save)
song = Song()
# Values will be set when cleaning the song.
song.search_lyrics = u''
song.verse_order = u''
song.search_title = u''
song.temporary = parse_and_temporary_save
self._process_copyright(properties, song)
self._process_cclinumber(properties, song)
self._process_titles(properties, song)
# The verse order is processed with the lyrics!
self._process_lyrics(properties, song_xml, song)
self._process_comments(properties, song)
self._process_authors(properties, song)
self._process_songbooks(properties, song)
self._process_topics(properties, song)
clean_song(self.manager, song)
self.manager.save_object(song)
return song
def _add_text_to_element(self, tag, parent, text=None, label=None):
if label:
element = etree.Element(tag, name=unicode(label))
else:
element = etree.Element(tag)
if text:
element.text = unicode(text)
parent.append(element)
return element
def _add_tag_to_formatting(self, tag_name, tags_element):
"""
Add new formatting tag to the element ``<format>`` if the tag is not
present yet.
"""
available_tags = FormattingTags.get_html_tags()
start_tag = '{%s}' % tag_name
for tag in available_tags:
if tag[u'start tag'] == start_tag:
# Create new formatting tag in openlyrics xml.
element = self._add_text_to_element(u'tag', tags_element)
element.set(u'name', tag_name)
element_open = self._add_text_to_element(u'open', element)
element_open.text = etree.CDATA(tag[u'start html'])
# Check if formatting tag contains end tag. Some formatting
# tags e.g. {br} has only start tag. If no end tag is present
# <close> element has not to be in OpenLyrics xml.
if tag['end tag']:
element_close = self._add_text_to_element(u'close', element)
element_close.text = etree.CDATA(tag[u'end html'])
def _add_text_with_tags_to_lines(self, verse_element, text, tags_element):
"""
Convert text with formatting tags from OpenLP format to OpenLyrics
format and append it to element ``<lines>``.
"""
start_tags = OpenLyrics.START_TAGS_REGEX.findall(text)
end_tags = OpenLyrics.END_TAGS_REGEX.findall(text)
# Replace start tags with xml syntax.
for tag in start_tags:
# Tags already converted to xml structure.
xml_tags = tags_element.xpath(u'tag/attribute::name')
# Some formatting tag has only starting part e.g. <br>.
# Handle this case.
if tag in end_tags:
text = text.replace(u'{%s}' % tag, u'<tag name="%s">' % tag)
else:
text = text.replace(u'{%s}' % tag, u'<tag name="%s"/>' % tag)
# Add tag to <format> element if tag not present.
if tag not in xml_tags:
self._add_tag_to_formatting(tag, tags_element)
# Replace end tags.
for tag in end_tags:
text = text.replace(u'{/%s}' % tag, u'</tag>')
# Replace \n with <br/>.
text = text.replace(u'\n', u'<br/>')
element = etree.XML(u'<lines>%s</lines>' % text)
verse_element.append(element)
return element
def _extract_xml(self, xml):
"""
Extract our newly created XML song.
"""
return etree.tostring(xml, encoding=u'UTF-8',
xml_declaration=True)
def _text(self, element):
"""
This returns the text of an element as unicode string.
``element``
The element.
"""
if element.text is not None:
return unicode(element.text)
return u''
def _process_authors(self, properties, song):
"""
Adds the authors specified in the XML to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
authors = []
if hasattr(properties, u'authors'):
for author in properties.authors.author:
display_name = self._text(author)
if display_name:
authors.append(display_name)
for display_name in authors:
author = self.manager.get_object_filtered(Author,
Author.display_name == display_name)
if author is None:
# We need to create a new author, as the author does not exist.
author = Author.populate(display_name=display_name,
last_name=display_name.split(u' ')[-1],
first_name=u' '.join(display_name.split(u' ')[:-1]))
song.authors.append(author)
def _process_cclinumber(self, properties, song):
"""
Adds the CCLI number to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
if hasattr(properties, u'ccliNo'):
song.ccli_number = self._text(properties.ccliNo)
def _process_comments(self, properties, song):
"""
Joins the comments specified in the XML and add it to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
if hasattr(properties, u'comments'):
comments_list = []
for comment in properties.comments.comment:
comment_text = self._text(comment)
if comment_text:
comments_list.append(comment_text)
song.comments = u'\n'.join(comments_list)
def _process_copyright(self, properties, song):
"""
Adds the copyright to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
if hasattr(properties, u'copyright'):
song.copyright = self._text(properties.copyright)
def _process_formatting_tags(self, song_xml, temporary):
"""
Process the formatting tags from the song and either add missing tags
temporary or permanently to the formatting tag list.
"""
if not hasattr(song_xml, u'format'):
return
found_tags = []
for tag in song_xml.format.tags.getchildren():
name = tag.get(u'name')
if name is None:
continue
start_tag = u'{%s}' % name[:5]
# Some tags have only start tag e.g. {br}
end_tag = u'{/' + name[:5] + u'}' if hasattr(tag, 'close') else u''
openlp_tag = {
u'desc': name,
u'start tag': start_tag,
u'end tag': end_tag,
u'start html': tag.open.text,
# Some tags have only start html e.g. {br}
u'end html': tag.close.text if hasattr(tag, 'close') else u'',
u'protected': False,
}
# Add 'temporary' key in case the formatting tag should not be
# saved otherwise it is supposed that formatting tag is permanent.
if temporary:
openlp_tag[u'temporary'] = temporary
found_tags.append(openlp_tag)
existing_tag_ids = [tag[u'start tag'] for tag in FormattingTags.get_html_tags()]
new_tags = [tag for tag in found_tags if tag[u'start tag'] not in existing_tag_ids]
FormattingTags.add_html_tags(new_tags)
FormattingTags.save_html_tags()
def _process_lines_mixed_content(self, element, newlines=True):
"""
Converts the xml text with mixed content to OpenLP representation.
Chords are skipped and formatting tags are converted.
``element``
The property object (lxml.etree.Element).
``newlines``
The switch to enable/disable processing of line breaks <br/>.
The <br/> is used since OpenLyrics 0.8.
"""
text = u''
use_endtag = True
# Skip <comment> elements - not yet supported.
if element.tag == NSMAP % u'comment':
if element.tail:
# Append tail text at chord element.
text += element.tail
return text
# Skip <chord> element - not yet supported.
elif element.tag == NSMAP % u'chord':
if element.tail:
# Append tail text at chord element.
text += element.tail
return text
# Convert line breaks <br/> to \n.
elif newlines and element.tag == NSMAP % u'br':
text += u'\n'
if element.tail:
text += element.tail
return text
# Start formatting tag.
if element.tag == NSMAP % u'tag':
text += u'{%s}' % element.get(u'name')
# Some formattings may have only start tag.
# Handle this case if element has no children and contains no text.
if not element and not element.text:
use_endtag = False
# Append text from element.
if element.text:
text += element.text
# Process nested formatting tags.
for child in element:
# Use recursion since nested formatting tags are allowed.
text += self._process_lines_mixed_content(child, newlines)
# Append text from tail and add formatting end tag.
if element.tag == NSMAP % 'tag' and use_endtag:
text += u'{/%s}' % element.get(u'name')
# Append text from tail.
if element.tail:
text += element.tail
return text
def _process_verse_lines(self, lines, version):
"""
Converts lyrics lines to OpenLP representation.
``lines``
The lines object (lxml.objectify.ObjectifiedElement).
"""
text = u''
# Convert lxml.objectify to lxml.etree representation.
lines = etree.tostring(lines)
element = etree.XML(lines)
# OpenLyrics 0.8 uses <br/> for new lines.
# Append text from "lines" element to verse text.
if version > '0.7':
text = self._process_lines_mixed_content(element)
# OpenLyrics version <= 0.7 contais <line> elements to represent lines.
# First child element is tested.
else:
# Loop over the "line" elements removing comments and chords.
for line in element:
# Skip comment lines.
if line.tag == NSMAP % u'comment':
continue
if text:
text += u'\n'
text += self._process_lines_mixed_content(line, newlines=False)
return text
def _process_lyrics(self, properties, song_xml, song_obj):
"""
Processes the verses and search_lyrics for the song.
``properties``
The properties object (lxml.objectify.ObjectifiedElement).
``song_xml``
The objectified song (lxml.objectify.ObjectifiedElement).
``song_obj``
The song object.
"""
sxml = SongXML()
verses = {}
verse_def_list = []
try:
lyrics = song_xml.lyrics
except AttributeError:
raise OpenLyricsError(OpenLyricsError.LyricsError, '<lyrics> tag is missing.',
translate('OpenLP.OpenLyricsImportError', '<lyrics> tag is missing.'))
try:
verse_list = lyrics.verse
except AttributeError:
raise OpenLyricsError(OpenLyricsError.VerseError, '<verse> tag is missing.',
translate('OpenLP.OpenLyricsImportError', '<verse> tag is missing.'))
# Loop over the "verse" elements.
for verse in verse_list:
text = u''
# Loop over the "lines" elements.
for lines in verse.lines:
if text:
text += u'\n'
# Append text from "lines" element to verse text.
text += self._process_verse_lines(lines,
version=song_xml.get(u'version'))
# Add an optional split to the verse text.
if lines.get(u'break') is not None:
text += u'\n[---]'
verse_def = verse.get(u'name', u' ').lower()
verse_tag, verse_number, verse_part = OpenLyrics.VERSE_TAG_SPLITTER.search(verse_def).groups()
if verse_tag not in VerseType.Tags:
verse_tag = VerseType.Tags[VerseType.Other]
# OpenLyrics allows e. g. "c", but we need "c1". However, this does
# not correct the verse order.
if not verse_number:
verse_number = u'1'
lang = verse.get(u'lang')
translit = verse.get(u'translit')
# In OpenLP 1.9.6 we used v1a, v1b ... to represent visual slide
# breaks. In OpenLyrics 0.7 an attribute has been added.
if song_xml.get(u'modifiedIn') in (u'1.9.6', u'OpenLP 1.9.6') and \
song_xml.get(u'version') == u'0.7' and (verse_tag, verse_number, lang, translit) in verses:
verses[(verse_tag, verse_number, lang, translit, None)] += u'\n[---]\n' + text
# Merge v1a, v1b, .... to v1.
elif (verse_tag, verse_number, lang, translit, verse_part) in verses:
verses[(verse_tag, verse_number, lang, translit, verse_part)] += u'\n' + text
else:
verses[(verse_tag, verse_number, lang, translit, verse_part)] = text
verse_def_list.append((verse_tag, verse_number, lang, translit, verse_part))
# We have to use a list to keep the order, as dicts are not sorted.
for verse in verse_def_list:
sxml.add_verse_to_lyrics(verse[0], verse[1], verses[verse], verse[2])
song_obj.lyrics = unicode(sxml.extract_xml(), u'utf-8')
# Process verse order
if hasattr(properties, u'verseOrder'):
song_obj.verse_order = self._text(properties.verseOrder)
def _process_songbooks(self, properties, song):
"""
Adds the song book and song number specified in the XML to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
song.song_book_id = None
song.song_number = u''
if hasattr(properties, u'songbooks'):
for songbook in properties.songbooks.songbook:
book_name = songbook.get(u'name', u'')
if book_name:
book = self.manager.get_object_filtered(Book, Book.name == book_name)
if book is None:
# We need to create a book, because it does not exist.
book = Book.populate(name=book_name, publisher=u'')
self.manager.save_object(book)
song.song_book_id = book.id
song.song_number = songbook.get(u'entry', u'')
# We only support one song book, so take the first one.
break
def _process_titles(self, properties, song):
"""
Processes the titles specified in the song's XML.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
for title in properties.titles.title:
if not song.title:
song.title = self._text(title)
song.alternate_title = u''
else:
song.alternate_title = self._text(title)
def _process_topics(self, properties, song):
"""
Adds the topics to the song.
``properties``
The property object (lxml.objectify.ObjectifiedElement).
``song``
The song object.
"""
if hasattr(properties, u'themes'):
for topic_text in properties.themes.theme:
topic_text = self._text(topic_text)
if topic_text:
topic = self.manager.get_object_filtered(Topic, Topic.name == topic_text)
if topic is None:
# We need to create a topic, because it does not exist.
topic = Topic.populate(name=topic_text)
self.manager.save_object(topic)
song.topics.append(topic)
def _dump_xml(self, xml):
"""
Debugging aid to dump XML so that we can see what we have.
"""
return etree.tostring(xml, encoding=u'UTF-8', xml_declaration=True, pretty_print=True)
class OpenLyricsError(Exception):
# XML tree is missing the lyrics tag
LyricsError = 1
# XML tree has no verse tags
VerseError = 2
def __init__(self, type, log_message, display_message):
Exception.__init__(self)
self.type = type
self.log_message = log_message
self.display_message = display_message
| gpl-2.0 | -371,223,998,917,800,600 | 39.076202 | 111 | 0.556121 | false |
nubark/odoo | addons/payment_paypal/models/paypal.py | 12 | 19220 | # -*- coding: utf-'8' "-*-"
import base64
import json
import logging
import urlparse
import werkzeug.urls
import urllib2
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AcquirerPaypal(osv.Model):
_inherit = 'payment.acquirer'
def _get_paypal_urls(self, cr, uid, environment, context=None):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerPaypal, self)._get_providers(cr, uid, context=context)
providers.append(['paypal', 'Paypal'])
return providers
_columns = {
'paypal_email_account': fields.char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user'),
'paypal_seller_account': fields.char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.'),
'paypal_use_ipn': fields.boolean('Use IPN', help='Paypal Instant Payment Notification', groups='base.group_user'),
# Server 2 server
'paypal_api_enabled': fields.boolean('Use Rest API'),
'paypal_api_username': fields.char('Rest API Username', groups='base.group_user'),
'paypal_api_password': fields.char('Rest API Password', groups='base.group_user'),
'paypal_api_access_token': fields.char('Access Token', groups='base.group_user'),
'paypal_api_access_token_validity': fields.datetime('Access Token Validity', groups='base.group_user'),
}
_defaults = {
'paypal_use_ipn': True,
'fees_active': False,
'fees_dom_fixed': 0.35,
'fees_dom_var': 3.4,
'fees_int_fixed': 0.35,
'fees_int_var': 3.9,
'paypal_api_enabled': False,
}
def _migrate_paypal_account(self, cr, uid, context=None):
""" COMPLETE ME """
cr.execute('SELECT id, paypal_account FROM res_company')
res = cr.fetchall()
for (company_id, company_paypal_account) in res:
if company_paypal_account:
company_paypal_ids = self.search(cr, uid, [('company_id', '=', company_id), ('provider', '=', 'paypal')], limit=1, context=context)
if company_paypal_ids:
self.write(cr, uid, company_paypal_ids, {'paypal_email_account': company_paypal_account}, context=context)
else:
paypal_view = self.pool['ir.model.data'].get_object(cr, uid, 'payment_paypal', 'paypal_acquirer_button')
self.create(cr, uid, {
'name': 'Paypal',
'provider': 'paypal',
'paypal_email_account': company_paypal_account,
'view_template_id': paypal_view.id,
}, context=context)
return True
def paypal_compute_fees(self, cr, uid, id, amount, currency_id, country_id, context=None):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
acquirer = self.browse(cr, uid, id, context=context)
if not acquirer.fees_active:
return 0.0
country = self.pool['res.country'].browse(cr, uid, country_id, context=context)
if country and acquirer.company_id.country_id.id == country.id:
percentage = acquirer.fees_dom_var
fixed = acquirer.fees_dom_fixed
else:
percentage = acquirer.fees_int_var
fixed = acquirer.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed ) / (1 - percentage / 100.0)
return fees
def paypal_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
paypal_tx_values = dict(values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': acquirer.paypal_email_account,
'item_name': '%s: %s' % (acquirer.company_id.name, values['reference']),
'item_number': values['reference'],
'amount': values['amount'],
'currency_code': values['currency'] and values['currency'].name or '',
'address1': values.get('partner_address'),
'city': values.get('partner_city'),
'country': values.get('partner_country') and values.get('partner_country').code or '',
'state': values.get('partner_state') and (values.get('partner_state').code or values.get('partner_state').name) or '',
'email': values.get('partner_email'),
'zip_code': values.get('partner_zip'),
'first_name': values.get('partner_first_name'),
'last_name': values.get('partner_last_name'),
'paypal_return': '%s' % urlparse.urljoin(base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(base_url, PaypalController._cancel_url),
'handling': '%.2f' % paypal_tx_values.pop('fees', 0.0) if acquirer.fees_active else False,
'custom': json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')}) if paypal_tx_values.get('return_url') else False,
})
return paypal_tx_values
def paypal_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_paypal_urls(cr, uid, acquirer.environment, context=context)['paypal_form_url']
def _paypal_s2s_get_access_token(self, cr, uid, ids, context=None):
"""
Note: see # see http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
for explanation why we use Authorization header instead of urllib2
password manager
"""
res = dict.fromkeys(ids, False)
parameters = werkzeug.url_encode({'grant_type': 'client_credentials'})
for acquirer in self.browse(cr, uid, ids, context=context):
tx_url = self._get_paypal_urls(cr, uid, acquirer.environment)['paypal_rest_url']
request = urllib2.Request(tx_url, parameters)
# add other headers (https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/)
request.add_header('Accept', 'application/json')
request.add_header('Accept-Language', 'en_US')
# add authorization header
base64string = base64.encodestring('%s:%s' % (
acquirer.paypal_api_username,
acquirer.paypal_api_password)
).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request = urllib2.urlopen(request)
result = request.read()
res[acquirer.id] = json.loads(result).get('access_token')
request.close()
return res
class TxPaypal(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'paypal_txn_type': fields.char('Transaction type'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _paypal_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = _('Paypal: received data with missing reference (%s) or txn_id (%s)') % (reference, txn_id)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.info(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context)
def _paypal_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('txn_id') != tx.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (tx.amount + tx.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % tx.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != tx.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), tx.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), tx.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), tx.fees))
# check buyer
if tx.payment_method_id and data.get('payer_id') != tx.payment_method_id.acquirer_ref:
invalid_parameters.append(('payer_id', data.get('payer_id'), tx.payment_method_id.acquirer_ref))
# check seller
if data.get('receiver_id') and tx.acquirer_id.paypal_seller_account and data['receiver_id'] != tx.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), tx.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not tx.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != tx.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), tx.acquirer_id.paypal_email_account))
return invalid_parameters
def _paypal_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('payment_status')
res = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (tx.reference))
res.update(state='done', date_validate=data.get('payment_date', fields.datetime.now()))
return tx.write(res)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (tx.reference))
res.update(state='pending', state_message=data.get('pending_reason', ''))
return tx.write(res)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
res.update(state='error', state_message=error)
return tx.write(res)
# --------------------------------------------------
# SERVER2SERVER RELATED METHODS
# --------------------------------------------------
def _paypal_try_url(self, request, tries=3, context=None):
""" Try to contact Paypal. Due to some issues, internal service errors
seem to be quite frequent. Several tries are done before considering
the communication as failed.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
done, res = False, None
while (not done and tries):
try:
res = urllib2.urlopen(request)
done = True
except urllib2.HTTPError as e:
res = e.read()
e.close()
if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':
_logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)
tries = tries - 1
if not res:
pass
# raise openerp.exceptions.
result = res.read()
res.close()
return result
def _paypal_s2s_send(self, cr, uid, values, cc_values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx_id = self.create(cr, uid, values, context=context)
tx = self.browse(cr, uid, tx_id, context=context)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
data = {
'intent': 'sale',
'transactions': [{
'amount': {
'total': '%.2f' % tx.amount,
'currency': tx.currency_id.name,
},
'description': tx.reference,
}]
}
if cc_values:
data['payer'] = {
'payment_method': 'credit_card',
'funding_instruments': [{
'credit_card': {
'number': cc_values['number'],
'type': cc_values['brand'],
'expire_month': cc_values['expiry_mm'],
'expire_year': cc_values['expiry_yy'],
'cvv2': cc_values['cvc'],
'first_name': tx.partner_name,
'last_name': tx.partner_name,
'billing_address': {
'line1': tx.partner_address,
'city': tx.partner_city,
'country_code': tx.partner_country_id.code,
'postal_code': tx.partner_zip,
}
}
}]
}
else:
# TODO: complete redirect URLs
data['redirect_urls'] = {
# 'return_url': 'http://example.com/your_redirect_url/',
# 'cancel_url': 'http://example.com/your_cancel_url/',
},
data['payer'] = {
'payment_method': 'paypal',
}
data = json.dumps(data)
request = urllib2.Request('https://api.sandbox.paypal.com/v1/payments/payment', data, headers)
result = self._paypal_try_url(request, tries=3, context=context)
return (tx_id, result)
def _paypal_s2s_get_invalid_parameters(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
invalid_parameters = []
return invalid_parameters
def _paypal_s2s_validate(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
values = json.loads(data)
status = values.get('state')
if status in ['approved']:
_logger.info('Validated Paypal s2s payment for tx %s: set as done' % (tx.reference))
tx.write({
'state': 'done',
'date_validate': values.get('udpate_time', fields.datetime.now()),
'paypal_txn_id': values['id'],
})
return True
elif status in ['pending', 'expired']:
_logger.info('Received notification for Paypal s2s payment %s: set as pending' % (tx.reference))
tx.write({
'state': 'pending',
# 'state_message': data.get('pending_reason', ''),
'paypal_txn_id': values['id'],
})
return True
else:
error = 'Received unrecognized status for Paypal s2s payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
tx.write({
'state': 'error',
# 'state_message': error,
'paypal_txn_id': values['id'],
})
return False
def _paypal_s2s_get_tx_status(self, cr, uid, tx, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
# TDETODO: check tx.paypal_txn_id is set
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
url = 'https://api.sandbox.paypal.com/v1/payments/payment/%s' % (tx.paypal_txn_id)
request = urllib2.Request(url, headers=headers)
data = self._paypal_try_url(request, tries=3, context=context)
return self.s2s_feedback(cr, uid, tx.id, data, context=context)
| gpl-3.0 | -5,452,991,303,755,321,000 | 45.650485 | 147 | 0.565088 | false |
pakit/recipes_bot | tests/conftest.py | 3 | 1284 | """
Used for pytest plugins & session scoped fixtures.
"""
from __future__ import absolute_import
import sys
import mock
import pytest
import tests.common as tc
@pytest.fixture(scope='session', autouse=True)
def setup_test_bed(request):
"""
Fixture sets up the testing environment for pakit as a whole.
Session scope, executes before all tests.
"""
request.addfinalizer(tc.env_teardown)
tc.env_setup()
@pytest.yield_fixture()
def mock_print():
"""
A fixture that mocks python's print function during test.
"""
if sys.version_info < (3, 0):
print_mod = '__builtin__.print'
else:
print_mod = 'builtins.print'
with mock.patch(print_mod) as mock_obj:
yield mock_obj
@pytest.yield_fixture()
def mock_input():
"""
A fixture that mocks python's print function during test.
"""
if sys.version_info < (3, 0):
input_mod = '__builtin__.raw_input'
else:
input_mod = 'builtins.input'
with mock.patch(input_mod) as mock_obj:
yield mock_obj
@pytest.yield_fixture(scope='function', autouse=True)
def around_all_tests():
"""
Executes before and after EVERY test.
Can be helpful for tracking bugs impacting test bed.
"""
# before
yield
# after
| bsd-3-clause | 199,374,408,169,850,370 | 21.526316 | 65 | 0.640966 | false |
Donkyhotay/MoonPy | zope/app/publication/tests/test_zopepublication.py | 1 | 20095 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Zope Publication Tests
$Id$
"""
import unittest
import sys
from cStringIO import StringIO
from persistent import Persistent
from ZODB.DB import DB
from ZODB.DemoStorage import DemoStorage
import transaction
import zope.component
from zope.interface.verify import verifyClass
from zope.interface import implements, classImplements, implementedBy
from zope.i18n.interfaces import IUserPreferredCharsets
from zope.component.interfaces import ComponentLookupError
from zope.publisher.base import TestPublication, TestRequest
from zope.publisher.http import IHTTPRequest, HTTPCharsets
from zope.publisher.interfaces import IRequest, IPublishTraverse
from zope.security import simplepolicies
from zope.security.management import setSecurityPolicy, queryInteraction
from zope.security.management import endInteraction
from zope.traversing.interfaces import IPhysicallyLocatable
from zope.location.interfaces import ILocation
from zope.app.testing.placelesssetup import PlacelessSetup
from zope.app.testing import setup, ztapi
from zope.app.error.interfaces import IErrorReportingUtility
from zope.app.security.principalregistry import principalRegistry
from zope.app.security.interfaces import IUnauthenticatedPrincipal, IPrincipal
from zope.app.publication.zopepublication import ZopePublication
from zope.app.folder import Folder, rootFolder
from zope.location import Location
from zope.app.security.interfaces import IAuthenticationUtility
class Principal(object):
implements(IPrincipal)
def __init__(self, id):
self.id = id
self.title = ''
self.description = ''
class UnauthenticatedPrincipal(Principal):
implements(IUnauthenticatedPrincipal)
class AuthUtility1(object):
def authenticate(self, request):
return None
def unauthenticatedPrincipal(self):
return UnauthenticatedPrincipal('test.anonymous')
def unauthorized(self, id, request):
pass
def getPrincipal(self, id):
return UnauthenticatedPrincipal(id)
class AuthUtility2(AuthUtility1):
def authenticate(self, request):
return Principal('test.bob')
def getPrincipal(self, id):
return Principal(id)
class ErrorReportingUtility(object):
implements(IErrorReportingUtility)
def __init__(self):
self.exceptions = []
def raising(self, info, request=None):
self.exceptions.append([info, request])
class LocatableObject(Location):
def foo(self):
pass
class TestRequest(TestRequest):
URL='http://test.url'
class BasePublicationTests(PlacelessSetup, unittest.TestCase):
def setUp(self):
super(BasePublicationTests, self).setUp()
from zope.security.management import endInteraction
endInteraction()
ztapi.provideAdapter(IHTTPRequest, IUserPreferredCharsets,
HTTPCharsets)
self.policy = setSecurityPolicy(
simplepolicies.PermissiveSecurityPolicy
)
self.storage = DemoStorage('test_storage')
self.db = db = DB(self.storage)
connection = db.open()
root = connection.root()
app = getattr(root, ZopePublication.root_name, None)
if app is None:
from zope.app.folder import rootFolder
app = rootFolder()
root[ZopePublication.root_name] = app
transaction.commit()
connection.close()
self.app = app
from zope.traversing.namespace import view, resource, etc
ztapi.provideNamespaceHandler('view', view)
ztapi.provideNamespaceHandler('resource', resource)
ztapi.provideNamespaceHandler('etc', etc)
self.request = TestRequest('/f1/f2')
self.user = Principal('test.principal')
self.request.setPrincipal(self.user)
from zope.interface import Interface
self.presentation_type = Interface
self.request._presentation_type = self.presentation_type
self.object = object()
self.publication = ZopePublication(self.db)
def tearDown(self):
super(BasePublicationTests, self).tearDown()
def testInterfacesVerify(self):
for interface in implementedBy(ZopePublication):
verifyClass(interface, TestPublication)
class ZopePublicationErrorHandling(BasePublicationTests):
def testRetryAllowed(self):
from ZODB.POSException import ConflictError
from zope.publisher.interfaces import Retry
try:
raise ConflictError
except:
self.assertRaises(Retry, self.publication.handleException,
self.object, self.request, sys.exc_info(), retry_allowed=True)
try:
raise Retry(sys.exc_info())
except:
self.assertRaises(Retry, self.publication.handleException,
self.object, self.request, sys.exc_info(), retry_allowed=True)
def testRetryNotAllowed(self):
from ZODB.POSException import ConflictError
from zope.publisher.interfaces import Retry
try:
raise ConflictError
except:
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
value = ''.join(self.request.response._result).split()
self.assertEqual(' '.join(value[:6]),
'Traceback (most recent call last): File')
self.assertEqual(' '.join(value[-8:]),
'in testRetryNotAllowed raise ConflictError'
' ConflictError: database conflict error')
try:
raise Retry(sys.exc_info())
except:
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
value = ''.join(self.request.response._result).split()
self.assertEqual(' '.join(value[:6]),
'Traceback (most recent call last): File')
self.assertEqual(' '.join(value[-8:]),
'in testRetryNotAllowed raise Retry(sys.exc_info())'
' Retry: database conflict error')
try:
raise Retry
except:
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
value = ''.join(self.request.response._result).split()
self.assertEqual(' '.join(value[:6]),
'Traceback (most recent call last): File')
self.assertEqual(' '.join(value[-6:]),
'in testRetryNotAllowed raise Retry'
' Retry: None')
def testViewOnException(self):
from zope.interface import Interface
class E1(Exception):
pass
ztapi.setDefaultViewName(E1, 'name',
layer=None,
type=self.presentation_type)
view_text = 'You had a conflict error'
ztapi.provideView(E1, self.presentation_type, Interface,
'name', lambda obj, request: lambda: view_text)
try:
raise E1
except:
pass
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
self.assertEqual(self.request.response._result, view_text)
def testHandlingSystemErrors(self):
# Generally, when there is a view for an excepton, we assume
# it is a user error, not a system error and we don't log it.
from zope.testing import loggingsupport
handler = loggingsupport.InstalledHandler('SiteError')
self.testViewOnException()
self.assertEqual(
str(handler),
'SiteError ERROR\n'
' Error while reporting an error to the Error Reporting utility')
# Here we got a single log record, because we havdn't
# installed an error reporting utility. That's OK.
handler.uninstall()
handler = loggingsupport.InstalledHandler('SiteError')
# Now, we'll register an exception view that indicates that we
# have a system error.
from zope.interface import Interface, implements
class E2(Exception):
pass
ztapi.setDefaultViewName(E2, 'name',
layer=self.presentation_type,
type=self.presentation_type)
view_text = 'You had a conflict error'
from zope.app.exception.interfaces import ISystemErrorView
class MyView:
implements(ISystemErrorView)
def __init__(self, context, request):
pass
def isSystemError(self):
return True
def __call__(self):
return view_text
ztapi.provideView(E2, self.presentation_type, Interface,
'name', MyView)
try:
raise E2
except:
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
# Now, since the view was a system error view, we should have
# a log entry for the E2 error (as well as the missing
# error reporting utility).
self.assertEqual(
str(handler),
'SiteError ERROR\n'
' Error while reporting an error to the Error Reporting utility\n'
'SiteError ERROR\n'
' http://test.url'
)
handler.uninstall()
def testNoViewOnClassicClassException(self):
from zope.interface import Interface
from types import ClassType
class ClassicError:
__metaclass__ = ClassType
class IClassicError(Interface):
pass
classImplements(ClassicError, IClassicError)
ztapi.setDefaultViewName(IClassicError, 'name', self.presentation_type)
view_text = 'You made a classic error ;-)'
ztapi.provideView(IClassicError, self.presentation_type, Interface,
'name', lambda obj,request: lambda: view_text)
try:
raise ClassicError
except:
pass
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
# check we don't get the view we registered
self.failIf(''.join(self.request.response._result) == view_text)
# check we do actually get something
self.failIf(''.join(self.request.response._result) == '')
def testExceptionSideEffects(self):
from zope.publisher.interfaces import IExceptionSideEffects
class SideEffects(object):
implements(IExceptionSideEffects)
def __init__(self, exception):
self.exception = exception
def __call__(self, obj, request, exc_info):
self.obj = obj
self.request = request
self.exception_type = exc_info[0]
self.exception_from_info = exc_info[1]
class SideEffectsFactory:
def __call__(self, exception):
self.adapter = SideEffects(exception)
return self.adapter
factory = SideEffectsFactory()
from ZODB.POSException import ConflictError
from zope.interface import Interface
class IConflictError(Interface):
pass
classImplements(ConflictError, IConflictError)
ztapi.provideAdapter(IConflictError, IExceptionSideEffects, factory)
exception = ConflictError()
try:
raise exception
except:
pass
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
adapter = factory.adapter
self.assertEqual(exception, adapter.exception)
self.assertEqual(exception, adapter.exception_from_info)
self.assertEqual(ConflictError, adapter.exception_type)
self.assertEqual(self.object, adapter.obj)
self.assertEqual(self.request, adapter.request)
def testExceptionResetsResponse(self):
from zope.publisher.browser import TestRequest
request = TestRequest()
request.response.setHeader('Content-Type', 'application/pdf')
request.response.setCookie('spam', 'eggs')
from ZODB.POSException import ConflictError
try:
raise ConflictError
except:
pass
self.publication.handleException(
self.object, request, sys.exc_info(), retry_allowed=False)
self.assertEqual(request.response.getHeader('Content-Type'),
'text/html;charset=utf-8')
self.assertEqual(request.response._cookies, {})
def testAbortOrCommitTransaction(self):
txn = transaction.get()
try:
raise Exception
except:
pass
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
# assert that we get a new transaction
self.assert_(txn is not transaction.get())
def testAbortTransactionWithErrorReportingUtility(self):
# provide our fake error reporting utility
zope.component.provideUtility(ErrorReportingUtility())
class FooError(Exception):
pass
last_txn_info = self.db.undoInfo()[0]
try:
raise FooError
except FooError:
pass
self.publication.handleException(
self.object, self.request, sys.exc_info(), retry_allowed=False)
# assert that the last transaction is NOT our transaction
new_txn_info = self.db.undoInfo()[0]
self.assertEqual(last_txn_info, new_txn_info)
# instead, we expect a message in our logging utility
error_log = zope.component.getUtility(IErrorReportingUtility)
self.assertEqual(len(error_log.exceptions), 1)
error_info, request = error_log.exceptions[0]
self.assertEqual(error_info[0], FooError)
self.assert_(isinstance(error_info[1], FooError))
self.assert_(request is self.request)
class ZopePublicationTests(BasePublicationTests):
def testPlacefulAuth(self):
setup.setUpTraversal()
setup.setUpSiteManagerLookup()
principalRegistry.defineDefaultPrincipal('anonymous', '')
root = self.db.open().root()
app = root[ZopePublication.root_name]
app['f1'] = rootFolder()
f1 = app['f1']
f1['f2'] = Folder()
sm1 = setup.createSiteManager(f1)
setup.addUtility(sm1, '', IAuthenticationUtility, AuthUtility1())
f2 = f1['f2']
sm2 = setup.createSiteManager(f2)
setup.addUtility(sm2, '', IAuthenticationUtility, AuthUtility2())
transaction.commit()
from zope.app.container.interfaces import ISimpleReadContainer
from zope.app.container.traversal import ContainerTraverser
ztapi.provideView(ISimpleReadContainer, IRequest, IPublishTraverse,
'', ContainerTraverser)
from zope.app.folder.interfaces import IFolder
from zope.security.checker import defineChecker, InterfaceChecker
defineChecker(Folder, InterfaceChecker(IFolder))
self.publication.beforeTraversal(self.request)
self.assertEqual(list(queryInteraction().participations),
[self.request])
self.assertEqual(self.request.principal.id, 'anonymous')
root = self.publication.getApplication(self.request)
self.publication.callTraversalHooks(self.request, root)
self.assertEqual(self.request.principal.id, 'anonymous')
ob = self.publication.traverseName(self.request, root, 'f1')
self.publication.callTraversalHooks(self.request, ob)
self.assertEqual(self.request.principal.id, 'test.anonymous')
ob = self.publication.traverseName(self.request, ob, 'f2')
self.publication.afterTraversal(self.request, ob)
self.assertEqual(self.request.principal.id, 'test.bob')
self.assertEqual(list(queryInteraction().participations),
[self.request])
self.publication.endRequest(self.request, ob)
self.assertEqual(queryInteraction(), None)
def testTransactionCommitAfterCall(self):
root = self.db.open().root()
txn = transaction.get()
# we just need a change in the database to make the
# transaction notable in the undo log
root['foo'] = object()
last_txn_info = self.db.undoInfo()[0]
self.publication.afterCall(self.request, self.object)
self.assert_(txn is not transaction.get())
new_txn_info = self.db.undoInfo()[0]
self.failIfEqual(last_txn_info, new_txn_info)
def testTransactionAnnotation(self):
from zope.interface import directlyProvides
from zope.location.traversing import LocationPhysicallyLocatable
from zope.location.interfaces import ILocation
from zope.traversing.interfaces import IPhysicallyLocatable
from zope.traversing.interfaces import IContainmentRoot
ztapi.provideAdapter(ILocation, IPhysicallyLocatable,
LocationPhysicallyLocatable)
root = self.db.open().root()
root['foo'] = foo = LocatableObject()
root['bar'] = bar = LocatableObject()
bar.__name__ = 'bar'
foo.__name__ = 'foo'
bar.__parent__ = foo
foo.__parent__ = root
directlyProvides(root, IContainmentRoot)
from zope.publisher.interfaces import IRequest
expected_path = "/foo/bar"
expected_user = "/ " + self.user.id
expected_request = IRequest.__module__ + '.' + IRequest.getName()
self.publication.afterCall(self.request, bar)
txn_info = self.db.undoInfo()[0]
self.assertEqual(txn_info['location'], expected_path)
self.assertEqual(txn_info['user_name'], expected_user)
self.assertEqual(txn_info['request_type'], expected_request)
# also, assert that we still get the right location when
# passing an instance method as object.
self.publication.afterCall(self.request, bar.foo)
self.assertEqual(txn_info['location'], expected_path)
def testSiteEvents(self):
from zope.app.publication.interfaces import IBeforeTraverseEvent
from zope.app.publication.interfaces import IEndRequestEvent
set = []
clear = []
ztapi.subscribe([IBeforeTraverseEvent], None, set.append)
ztapi.subscribe([IEndRequestEvent], None, clear.append)
ob = object()
# This should fire the BeforeTraverseEvent
self.publication.callTraversalHooks(self.request, ob)
self.assertEqual(len(set), 1)
self.assertEqual(len(clear), 0)
self.assertEqual(set[0].object, ob)
ob2 = object()
# This should fire the EndRequestEvent
self.publication.endRequest(self.request, ob2)
self.assertEqual(len(set), 1)
self.assertEqual(len(clear), 1)
self.assertEqual(clear[0].object, ob2)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ZopePublicationTests),
unittest.makeSuite(ZopePublicationErrorHandling),
))
if __name__ == '__main__':
unittest.TextTestRunner().run(test_suite())
| gpl-3.0 | -3,034,679,002,042,648,600 | 36.490672 | 79 | 0.639015 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.