sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def cpe_superset(cls, source, target):
"""
Compares two WFNs and returns True if the set-theoretic relation
between the names is (non-proper) SUPERSET.
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: True if the set relation between source and target
is SUPERSET, otherwise False.
:rtype: boolean
"""
# If any pairwise comparison returned something other than SUPERSET
# or EQUAL, then SUPERSET is False.
for att, result in CPESet2_3.compare_wfns(source, target):
isSuperset = result == CPESet2_3.LOGICAL_VALUE_SUPERSET
isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL
if (not isSuperset) and (not isEqual):
return False
return True | Compares two WFNs and returns True if the set-theoretic relation
between the names is (non-proper) SUPERSET.
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: True if the set relation between source and target
is SUPERSET, otherwise False.
:rtype: boolean | entailment |
def append(self, cpe):
"""
Adds a CPE element to the set if not already.
Only WFN CPE Names are valid, so this function converts the input CPE
object of version 2.3 to WFN style.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
"""
if cpe.VERSION != CPE2_3.VERSION:
errmsg = "CPE Name version {0} not valid, version 2.3 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe._str == k._str:
return None
if isinstance(cpe, CPE2_3_WFN):
self.K.append(cpe)
else:
# Convert the CPE Name to WFN
wfn = CPE2_3_WFN(cpe.as_wfn())
self.K.append(wfn) | Adds a CPE element to the set if not already.
Only WFN CPE Names are valid, so this function converts the input CPE
object of version 2.3 to WFN style.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name | entailment |
def name_match(self, wfn):
"""
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
"""
for N in self.K:
if CPESet2_3.cpe_superset(wfn, N):
return True
return False | Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean | entailment |
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# Check prefix and initial bracket of WFN
if self._str[0:5] != CPE2_3_WFN.CPE_PREFIX:
errmsg = "Bad-formed CPE Name: WFN prefix not found"
raise ValueError(errmsg)
# Check final backet
if self._str[-1:] != "]":
errmsg = "Bad-formed CPE Name: final bracket of WFN not found"
raise ValueError(errmsg)
content = self._str[5:-1]
if content != "":
# Dictionary with pairs attribute-value
components = dict()
# Split WFN in components
list_component = content.split(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Adds the defined components
for e in list_component:
# Whitespace not valid in component names and values
if e.find(" ") != -1:
msg = "Bad-formed CPE Name: WFN with too many whitespaces"
raise ValueError(msg)
# Split pair attribute-value
pair = e.split(CPEComponent2_3_WFN.SEPARATOR_PAIR)
att_name = pair[0]
att_value = pair[1]
# Check valid attribute name
if att_name not in CPEComponent.CPE_COMP_KEYS_EXTENDED:
msg = "Bad-formed CPE Name: invalid attribute name '{0}'".format(
att_name)
raise ValueError(msg)
if att_name in components:
# Duplicate attribute
msg = "Bad-formed CPE Name: attribute '{0}' repeated".format(
att_name)
raise ValueError(msg)
if not (att_value.startswith('"') and
att_value.endswith('"')):
# Logical value
strUpper = att_value.upper()
if strUpper == CPEComponent2_3_WFN.VALUE_ANY:
comp = CPEComponentAnyValue()
elif strUpper == CPEComponent2_3_WFN.VALUE_NA:
comp = CPEComponentNotApplicable()
else:
msg = "Invalid logical value '{0}'".format(att_value)
raise ValueError(msg)
elif att_value.startswith('"') and att_value.endswith('"'):
# String value
comp = CPEComponent2_3_WFN(att_value, att_name)
else:
# Bad value
msg = "Bad-formed CPE Name: invalid value '{0}'".format(
att_value)
raise ValueError(msg)
components[att_name] = comp
# Adds the undefined components
for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck not in components:
components[ck] = CPEComponentUndefined()
# #######################
# Storage of CPE Name #
# #######################
part_comp = components[CPEComponent.ATT_PART]
if isinstance(part_comp, CPEComponentLogical):
elements = []
elements.append(components)
self[CPE.KEY_UNDEFINED] = elements
else:
# Create internal structure of CPE Name in parts:
# one of them is filled with identified components,
# the rest are empty
part_value = part_comp.get_value()
# Del double quotes of value
system = part_value[1:-1]
if system in CPEComponent.SYSTEM_VALUES:
self._create_cpe_parts(system, components)
else:
self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED,
components)
# Fills the empty parts of internal structure of CPE Name
for pk in CPE.CPE_PART_KEYS:
if pk not in self.keys():
self[pk] = [] | Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name | entailment |
def language_match(self, cpeset, cpel_dom=None):
"""
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
"""
# Root element tag
TAG_ROOT = '#document'
# A container for child platform definitions
TAG_PLATSPEC = 'cpe:platform-specification'
# Information about a platform definition
TAG_PLATFORM = 'cpe:platform'
TAG_LOGITEST = 'cpe:logical-test'
TAG_CPE = 'cpe:fact-ref'
# Tag attributes
ATT_NAME = 'name'
ATT_OP = 'operator'
ATT_NEGATE = 'negate'
# Attribute values
ATT_OP_AND = 'AND'
ATT_OP_OR = 'OR'
ATT_NEGATE_TRUE = 'TRUE'
if cpel_dom is None:
cpel_dom = self.document
# Identify the root element
if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_PLATSPEC:
return self.language_match(cpeset, node)
if node.nodeName == TAG_PLATFORM:
return self.language_match(cpeset, node)
# Identify a platform element
elif cpel_dom.nodeName == TAG_PLATFORM:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_LOGITEST:
return self.language_match(cpeset, node)
# Identify a CPE element
elif cpel_dom.nodeName == TAG_CPE:
cpename = cpel_dom.getAttribute(ATT_NAME)
c = CPE2_2(cpename)
# Try to match a CPE name with CPE set
return cpeset.name_match(c)
# Identify a logical operator element
elif cpel_dom.nodeName == TAG_LOGITEST:
count = 0
len = 0
answer = False
for node in cpel_dom.childNodes:
if node.nodeName.find("#") == 0:
continue
len = len + 1
if self.language_match(cpeset, node):
count = count + 1
operator = cpel_dom.getAttribute(ATT_OP).upper()
if operator == ATT_OP_AND:
if count == len:
answer = True
elif operator == ATT_OP_OR:
if count > 0:
answer = True
operator_not = cpel_dom.getAttribute(ATT_NEGATE)
if operator_not:
if operator_not.upper() == ATT_NEGATE_TRUE:
answer = not answer
return answer
else:
return False | Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean | entailment |
def append(self, cpe):
"""
Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1)
"""
if cpe.VERSION != CPE.VERSION_2_2:
errmsg = "CPE Name version {0} not valid, version 2.2 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe.cpe_str == k.cpe_str:
return None
self.K.append(cpe) | Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1) | entailment |
def set_value(self, comp_str, comp_att):
"""
Set the value of component.
:param string comp_str: value of component
:param string comp_att: attribute associated with comp_str
:returns: None
:exception: ValueError - incorrect value of component
"""
# Del double quotes of value
str = comp_str[1:-1]
self._standard_value = str
# Parse the value
super(CPEComponent2_3_WFN, self).set_value(str, comp_att) | Set the value of component.
:param string comp_str: value of component
:param string comp_att: attribute associated with comp_str
:returns: None
:exception: ValueError - incorrect value of component | entailment |
def append(self, cpe):
"""
Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset1_1 import CPESet1_1
>>> from .cpe1_1 import CPE1_1
>>> uri1 = 'cpe://microsoft:windows:xp!vista'
>>> c1 = CPE1_1(uri1)
>>> s = CPESet1_1()
>>> s.append(c1)
"""
if cpe.VERSION != CPE.VERSION_1_1:
msg = "CPE Name version {0} not valid, version 1.1 expected".format(
cpe.VERSION)
raise ValueError(msg)
for k in self.K:
if cpe.cpe_str == k.cpe_str:
return None
self.K.append(cpe) | Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset1_1 import CPESet1_1
>>> from .cpe1_1 import CPE1_1
>>> uri1 = 'cpe://microsoft:windows:xp!vista'
>>> c1 = CPE1_1(uri1)
>>> s = CPESet1_1()
>>> s.append(c1) | entailment |
def name_match(self, cpe):
"""
Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
TEST: matching with identical CPE in set
>>> from .cpe1_1 import CPE1_1
>>> from .cpeset1_1 import CPESet1_1
>>> uri1 = 'cpe://microsoft:windows:xp!vista'
>>> uri2 = 'cpe:/cisco::3825;cisco:2:44/cisco:ios:12.3:enterprise'
>>> c1 = CPE1_1(uri1)
>>> c2 = CPE1_1(uri2)
>>> s = CPESet1_1()
>>> s.append(c1)
>>> s.append(c2)
>>> s.name_match(c2)
True
"""
# An empty set not matching with any CPE
if len(self) == 0:
return False
# If input CPE Name string is in set of CPE Name strings
# not do searching more because there is a matching
for k in self.K:
if (k.cpe_str == cpe.cpe_str):
return True
# There are not a CPE Name string in set equal to
# input CPE Name string
match = False
for p in CPE.CPE_PART_KEYS:
elems_cpe = cpe.get(p)
for ec in elems_cpe:
# Search of element of part of input CPE
# Each element ec of input cpe[p] is compared with
# each element ek of k[p] in set K
for k in self.K:
elems_k = k.get(p)
for ek in elems_k:
# Matching
# Each component in element ec is compared with
# each component in element ek
for ck in CPEComponent.CPE_COMP_KEYS:
comp_cpe = ec.get(ck)
comp_k = ek.get(ck)
match = comp_k in comp_cpe
if not match:
# Search compoment in another element ek[p]
break
# Component analyzed
if match:
# Element matched
break
if match:
break
# Next element in part in "cpe"
if not match:
# cpe part not match with parts in set
return False
# Next part in input CPE Name
# All parts in input CPE Name matched
return True | Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
TEST: matching with identical CPE in set
>>> from .cpe1_1 import CPE1_1
>>> from .cpeset1_1 import CPESet1_1
>>> uri1 = 'cpe://microsoft:windows:xp!vista'
>>> uri2 = 'cpe:/cisco::3825;cisco:2:44/cisco:ios:12.3:enterprise'
>>> c1 = CPE1_1(uri1)
>>> c2 = CPE1_1(uri2)
>>> s = CPESet1_1()
>>> s.append(c1)
>>> s.append(c2)
>>> s.name_match(c2)
True | entailment |
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# CPE Name must not have whitespaces
if (self._str.find(" ") != -1):
msg = "Bad-formed CPE Name: it must not have whitespaces"
raise ValueError(msg)
# Partitioning of CPE Name
parts_match = CPE2_3_FS._parts_rxc.match(self._str)
# Validation of CPE Name parts
if (parts_match is None):
msg = "Bad-formed CPE Name: validation of parts failed"
raise ValueError(msg)
components = dict()
parts_match_dict = parts_match.groupdict()
for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck in parts_match_dict:
value = parts_match.group(ck)
if (value == CPEComponent2_3_FS.VALUE_ANY):
comp = CPEComponentAnyValue()
elif (value == CPEComponent2_3_FS.VALUE_NA):
comp = CPEComponentNotApplicable()
else:
try:
comp = CPEComponent2_3_FS(value, ck)
except ValueError:
errmsg = "Bad-formed CPE Name: not correct value: {0}".format(
value)
raise ValueError(errmsg)
else:
errmsg = "Component {0} should be specified".format(ck)
raise ValueError(ck)
components[ck] = comp
# #######################
# Storage of CPE Name #
# #######################
part_comp = components[CPEComponent.ATT_PART]
if isinstance(part_comp, CPEComponentLogical):
elements = []
elements.append(components)
self[CPE.KEY_UNDEFINED] = elements
else:
# Create internal structure of CPE Name in parts:
# one of them is filled with identified components,
# the rest are empty
system = parts_match.group(CPEComponent.ATT_PART)
if system in CPEComponent.SYSTEM_VALUES:
self._create_cpe_parts(system, components)
else:
self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED,
components)
# Fills the empty parts of internal structure of CPE Name
for pk in CPE.CPE_PART_KEYS:
if pk not in self.keys():
# Empty part
self[pk] = [] | Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name | entailment |
def get_attribute_values(self, att_name):
"""
Returns the values of attribute "att_name" of CPE Name.
By default a only element in each part.
:param string att_name: Attribute name to get
:returns: List of attribute values
:rtype: list
:exception: ValueError - invalid attribute name
"""
lc = []
if not CPEComponent.is_valid_attribute(att_name):
errmsg = "Invalid attribute name: {0}".format(att_name)
raise ValueError(errmsg)
for pk in CPE.CPE_PART_KEYS:
elements = self.get(pk)
for elem in elements:
comp = elem.get(att_name)
if isinstance(comp, CPEComponentAnyValue):
value = CPEComponent2_3_FS.VALUE_ANY
elif isinstance(comp, CPEComponentNotApplicable):
value = CPEComponent2_3_FS.VALUE_NA
else:
value = comp.get_value()
lc.append(value)
return lc | Returns the values of attribute "att_name" of CPE Name.
By default a only element in each part.
:param string att_name: Attribute name to get
:returns: List of attribute values
:rtype: list
:exception: ValueError - invalid attribute name | entailment |
def _parse(self):
"""
Checks if CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# CPE Name must not have whitespaces
if (self._str.find(" ") != -1):
msg = "Bad-formed CPE Name: it must not have whitespaces"
raise ValueError(msg)
# Partitioning of CPE Name
parts_match = CPE2_2._parts_rxc.match(self._str)
# Validation of CPE Name parts
if (parts_match is None):
msg = "Bad-formed CPE Name: validation of parts failed"
raise ValueError(msg)
components = dict()
parts_match_dict = parts_match.groupdict()
for ck in CPEComponent.CPE_COMP_KEYS:
if ck in parts_match_dict:
value = parts_match.group(ck)
if (value == CPEComponent2_2.VALUE_UNDEFINED):
comp = CPEComponentUndefined()
elif (value == CPEComponent2_2.VALUE_EMPTY):
comp = CPEComponentEmpty()
else:
try:
comp = CPEComponent2_2(value, ck)
except ValueError:
errmsg = "Bad-formed CPE Name: not correct value: {0}".format(
value)
raise ValueError(errmsg)
else:
# Component not exist in this version of CPE
comp = CPEComponentUndefined()
components[ck] = comp
# Adds the components of version 2.3 of CPE not defined in version 2.2
for ck2 in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck2 not in components.keys():
components[ck2] = CPEComponentUndefined()
# #######################
# Storage of CPE Name #
# #######################
# If part component is undefined, store it in the part without name
if components[CPEComponent.ATT_PART] == CPEComponentUndefined():
system = CPEComponent.VALUE_PART_UNDEFINED
else:
system = parts_match.group(CPEComponent.ATT_PART)
self._create_cpe_parts(system, components)
# Adds the undefined parts
for sys in CPEComponent.SYSTEM_VALUES:
if sys != system:
pk = CPE._system_and_parts[sys]
self[pk] = [] | Checks if CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name | entailment |
def as_wfn(self):
"""
Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
"""
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for ck in CPEComponent.CPE_COMP_KEYS:
lc = self._get_attribute_components(ck)
comp = lc[0]
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty)):
# Do not set the attribute
continue
else:
v = []
v.append(ck)
v.append("=")
# Get the value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append("".join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return "".join(wfn) | Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version | entailment |
def _fact_ref_eval(cls, cpeset, wfn):
"""
Returns True if wfn is a non-proper superset (True superset
or equal to) any of the names in cpeset, otherwise False.
:param CPESet cpeset: list of CPE bound Names.
:param CPE2_3_WFN wfn: WFN CPE Name.
:returns: True if wfn is a non-proper superset any of the names in cpeset, otherwise False
:rtype: boolean
"""
for n in cpeset:
# Need to convert each n from bound form to WFN
if (CPESet2_3.cpe_superset(wfn, n)):
return True
return False | Returns True if wfn is a non-proper superset (True superset
or equal to) any of the names in cpeset, otherwise False.
:param CPESet cpeset: list of CPE bound Names.
:param CPE2_3_WFN wfn: WFN CPE Name.
:returns: True if wfn is a non-proper superset any of the names in cpeset, otherwise False
:rtype: boolean | entailment |
def _check_fact_ref_eval(cls, cpel_dom):
"""
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
"""
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
# Perform an OVAL check.
# First attribute is the URI of an OVAL definitions file.
# Second attribute is an OVAL definition ID.
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
# Perform an OCIL check.
# First attribute is the URI of an OCIL questionnaire file.
# Second attribute is OCIL questionnaire ID.
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
# Can add additional check systems here, with each returning a
# True, False, or Error value
return False | Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error | entailment |
def _unbind(cls, boundname):
"""
Unbinds a bound form to a WFN.
:param string boundname: CPE name
:returns: WFN object associated with boundname.
:rtype: CPE2_3_WFN
"""
try:
fs = CPE2_3_FS(boundname)
except:
# CPE name is not formatted string
try:
uri = CPE2_3_URI(boundname)
except:
# CPE name is not URI but WFN
return CPE2_3_WFN(boundname)
else:
return CPE2_3_WFN(uri.as_wfn())
else:
return CPE2_3_WFN(fs.as_wfn()) | Unbinds a bound form to a WFN.
:param string boundname: CPE name
:returns: WFN object associated with boundname.
:rtype: CPE2_3_WFN | entailment |
def language_match(self, cpeset, cpel_dom=None):
"""
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
"""
# Root element tag
TAG_ROOT = '#document'
# A container for child platform definitions
TAG_PLATSPEC = 'cpe:platform-specification'
# Information about a platform definition
TAG_PLATFORM = 'cpe:platform'
TAG_LOGITEST = 'cpe:logical-test'
TAG_CPE = 'cpe:fact-ref'
TAG_CHECK_CPE = 'check-fact-ref'
# Tag attributes
ATT_NAME = 'name'
ATT_OP = 'operator'
ATT_NEGATE = 'negate'
# Attribute values
ATT_OP_AND = 'AND'
ATT_OP_OR = 'OR'
ATT_NEGATE_TRUE = 'TRUE'
# Constant associated with an error in language matching
ERROR = 2
if cpel_dom is None:
cpel_dom = self.document
# Identify the root element
if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_PLATSPEC:
return self.language_match(cpeset, node)
if node.nodeName == TAG_PLATFORM:
return self.language_match(cpeset, node)
# Identify a platform element
elif cpel_dom.nodeName == TAG_PLATFORM:
# Parse through E's elements and ignore all but logical-test
for node in cpel_dom.childNodes:
if node.nodeName == TAG_LOGITEST:
# Call the function again, but with logical-test
# as the root element
return self.language_match(cpeset, node)
# Identify a CPE element
elif cpel_dom.nodeName == TAG_CPE:
# fact-ref's name attribute is a bound name,
# so we unbind it to a WFN before passing it
cpename = cpel_dom.getAttribute(ATT_NAME)
wfn = CPELanguage2_3._unbind(cpename)
return CPELanguage2_3._fact_ref_eval(cpeset, wfn)
# Identify a check of CPE names (OVAL, OCIL...)
elif cpel_dom.nodeName == TAG_CHECK_CPE:
return CPELanguage2_3._check_fact_ref_Eval(cpel_dom)
# Identify a logical operator element
elif cpel_dom.nodeName == TAG_LOGITEST:
count = 0
len = 0
answer = False
for node in cpel_dom.childNodes:
if node.nodeName.find("#") == 0:
continue
len = len + 1
result = self.language_match(cpeset, node)
if result:
count = count + 1
elif result == ERROR:
answer = ERROR
operator = cpel_dom.getAttribute(ATT_OP).upper()
if operator == ATT_OP_AND:
if count == len:
answer = True
elif operator == ATT_OP_OR:
if count > 0:
answer = True
operator_not = cpel_dom.getAttribute(ATT_NEGATE)
if operator_not:
if ((operator_not.upper() == ATT_NEGATE_TRUE) and
(answer != ERROR)):
answer = not answer
return answer
else:
return False | Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean | entailment |
def name_match(self, cpe):
"""
Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
"""
# An empty set not matching with any CPE
if len(self) == 0:
return False
# If input CPE Name string is in set of CPE Name strings
# not do searching more because there is a matching
for k in self.K:
if (k.cpe_str == cpe.cpe_str):
return True
# If "cpe" is an empty CPE Name any system matches
if len(cpe) == 0:
return True
# There are not a CPE Name string in set equal to
# input CPE Name string
match = False
for p in CPE.CPE_PART_KEYS:
elems_cpe = cpe.get(p)
for ec in elems_cpe:
# Search of element of part of input CPE
# Each element ec of input cpe[p] is compared with
# each element ek of k[p] in set K
for k in self.K:
if (len(k) >= len(cpe)):
elems_k = k.get(p)
for ek in elems_k:
# Matching
# Each component in element ec is compared with
# each component in element ek
for c in range(0, len(cpe)):
key = CPEComponent.ordered_comp_parts[c]
comp_cpe = ec.get(key)
comp_k = ek.get(key)
match = comp_k in comp_cpe
if not match:
# Search compoment in another element ek[p]
break
# Component analyzed
if match:
# Element matched
break
if match:
break
# Next element in part in "cpe"
if not match:
# cpe part not match with parts in set
return False
# Next part in input CPE Name
# All parts in input CPE Name matched
return True | Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean | entailment |
def _decode(self):
"""
Convert the encoded value of component to standard value (WFN value).
"""
result = []
idx = 0
s = self._encoded_value
while (idx < len(s)):
# Get the idx'th character of s
c = s[idx]
if (c in CPEComponent2_2.NON_STANDARD_VALUES):
# Escape character
result.append("\\")
result.append(c)
else:
# Do nothing
result.append(c)
idx += 1
self._standard_value = "".join(result) | Convert the encoded value of component to standard value (WFN value). | entailment |
def is_component(w, ids):
"""Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component
"""
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True | Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component | entailment |
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids) | Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>> | entailment |
def confirm(question, assume_yes=True):
"""
Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``affirmative=False``.
.. note::
If the user does not supplies input that is (case-insensitively) equal
to "y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the prompt.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`.
"""
# Set up suffix
if assume_yes:
suffix = "Y/n"
else:
suffix = "y/N"
# Loop till we get something we like
# TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit?
while True:
# TODO: ensure that this is Ctrl-C friendly, ISTR issues with
# raw_input/input on some Python versions blocking KeyboardInterrupt.
response = input("{0} [{1}] ".format(question, suffix))
response = response.lower().strip() # Normalize
# Default
if not response:
return assume_yes
# Yes
if response in ["y", "yes"]:
return True
# No
if response in ["n", "no"]:
return False
# Didn't get empty, yes or no, so complain and loop
err = "I didn't understand you. Please specify '(y)es' or '(n)o'."
print(err, file=sys.stderr) | Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``affirmative=False``.
.. note::
If the user does not supplies input that is (case-insensitively) equal
to "y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the prompt.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`. | entailment |
def jsonapi(f):
""" Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON
"""
@wraps(f)
def wrapper(*args, **kwargs):
rv = f(*args, **kwargs)
return make_json_response(rv)
return wrapper | Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON | entailment |
def _unpack(c, tmp, package, version, git_url=None):
"""
Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``.
"""
real_version = version[:]
source = None
if git_url:
pass
# git clone into tempdir
# git checkout <version>
# set target to checkout
# if version does not look SHA-ish:
# in the checkout, obtain SHA from that branch
# set real_version to that value
else:
cwd = os.getcwd()
print("Moving into temp dir %s" % tmp)
os.chdir(tmp)
try:
# Nab from index. Skip wheels; we want to unpack an sdist.
flags = "--download=. --build=build --no-use-wheel"
cmd = "pip install %s %s==%s" % (flags, package, version)
c.run(cmd)
# Identify basename
# TODO: glob is bad here because pip install --download gets all
# dependencies too! ugh. Figure out best approach for that.
globs = []
globexpr = ""
for extension, opener in (
("zip", "unzip"),
("tgz", "tar xzvf"),
("tar.gz", "tar xzvf"),
):
globexpr = "*.{0}".format(extension)
globs = glob(globexpr)
if globs:
break
archive = os.path.basename(globs[0])
source, _, _ = archive.rpartition(".{0}".format(extension))
c.run("{0} {1}".format(opener, globexpr))
finally:
os.chdir(cwd)
return real_version, source | Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``. | entailment |
def vendorize(
c,
distribution,
version,
vendor_dir,
package=None,
git_url=None,
license=None,
):
"""
Vendorize Python package ``distribution`` at version/SHA ``version``.
Specify the vendor folder (e.g. ``<mypackage>/vendor``) as ``vendor_dir``.
For Crate/PyPI releases, ``package`` should be the name of the software
entry on those sites, and ``version`` should be a specific version number.
E.g. ``vendorize('lexicon', '0.1.2')``.
For Git releases, ``package`` should be the name of the package folder
within the checkout that needs to be vendorized and ``version`` should be a
Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given,
something suitable for ``git clone <git_url>``.
For SVN releases: xxx.
For packages where the distribution name is not the same as the package
directory name, give ``package='name'``.
By default, no explicit license seeking is done -- we assume the license
info is in file headers or otherwise within the Python package vendorized.
This is not always true; specify ``license=/path/to/license/file`` to
trigger copying of a license into the vendored folder from the
checkout/download (relative to its root.)
"""
with tmpdir() as tmp:
package = package or distribution
target = os.path.join(vendor_dir, package)
# Unpack source
real_version, source = _unpack(c, tmp, distribution, version, git_url)
abs_source = os.path.join(tmp, source)
source_package = os.path.join(abs_source, package)
# Ensure source package exists
if not os.path.exists(source_package):
rel_package = os.path.join(source, package)
raise ValueError("Source package %s doesn't exist!" % rel_package)
# Nuke target if exists
if os.path.exists(target):
print("Removing pre-existing vendorized folder %s" % target)
rmtree(target)
# Perform the copy
print("Copying %s => %s" % (source_package, target))
copytree(source_package, target)
# Explicit license if needed
if license:
copy(os.path.join(abs_source, license), target) | Vendorize Python package ``distribution`` at version/SHA ``version``.
Specify the vendor folder (e.g. ``<mypackage>/vendor``) as ``vendor_dir``.
For Crate/PyPI releases, ``package`` should be the name of the software
entry on those sites, and ``version`` should be a specific version number.
E.g. ``vendorize('lexicon', '0.1.2')``.
For Git releases, ``package`` should be the name of the package folder
within the checkout that needs to be vendorized and ``version`` should be a
Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given,
something suitable for ``git clone <git_url>``.
For SVN releases: xxx.
For packages where the distribution name is not the same as the package
directory name, give ``package='name'``.
By default, no explicit license seeking is done -- we assume the license
info is in file headers or otherwise within the Python package vendorized.
This is not always true; specify ``license=/path/to/license/file`` to
trigger copying of a license into the vendored folder from the
checkout/download (relative to its root.) | entailment |
def make_sudouser(c):
"""
Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work.
"""
user = c.travis.sudo.user
password = c.travis.sudo.password
# --create-home because we need a place to put conf files, keys etc
# --groups travis because we must be in the Travis group to access the
# (created by Travis for us) virtualenv and other contents within
# /home/travis.
c.sudo("useradd {0} --create-home --groups travis".format(user))
# Password 'mypass' also arbitrary
c.run("echo {0}:{1} | sudo chpasswd".format(user, password))
# Set up new (glob-sourced) sudoers conf file for our user; easier than
# attempting to mutate or overwrite main sudoers conf.
conf = "/etc/sudoers.d/passworded"
cmd = "echo '{0} ALL=(ALL:ALL) PASSWD:ALL' > {1}".format(user, conf)
c.sudo('sh -c "{0}"'.format(cmd))
# Grant travis group write access to /home/travis as some integration tests
# may try writing conf files there. (TODO: shouldn't running the tests via
# 'sudo -H' mean that's no longer necessary?)
c.sudo("chmod g+w /home/travis") | Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work. | entailment |
def make_sshable(c):
"""
Set up passwordless SSH keypair & authorized_hosts access to localhost.
"""
user = c.travis.sudo.user
home = "~{0}".format(user)
# Run sudo() as the new sudo user; means less chown'ing, etc.
c.config.sudo.user = user
ssh_dir = "{0}/.ssh".format(home)
# TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this?
for cmd in ("mkdir {0}", "chmod 0700 {0}"):
c.sudo(cmd.format(ssh_dir, user))
c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir))
c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir)) | Set up passwordless SSH keypair & authorized_hosts access to localhost. | entailment |
def sudo_run(c, command):
"""
Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...)
"""
# NOTE: explicit shell wrapper because sourcing the venv works best here;
# test tasks currently use their own subshell to call e.g. 'pytest --blah',
# so the tactic of '$VIRTUAL_ENV/bin/inv coverage' doesn't help - only that
# intermediate process knows about the venv!
cmd = "source $VIRTUAL_ENV/bin/activate && {}".format(command)
c.sudo('bash -c "{0}"'.format(cmd), user=c.travis.sudo.user) | Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...) | entailment |
def blacken(c):
"""
Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+.
"""
if not PYTHON.startswith("3.6"):
msg = "Not blackening, since Python {} != Python 3.6".format(PYTHON)
print(msg, file=sys.stderr)
return
# Install, allowing config override of hardcoded default version
config = c.config.get("travis", {}).get("black", {})
version = config.get("version", "18.5b0")
c.run("pip install black=={}".format(version))
# Execute our blacken task, with diff + check, which will both error
# and emit diffs.
checks.blacken(c, check=True, diff=True) | Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+. | entailment |
def _calc(self, x, y):
"""
List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`.
"""
x = np.array(x)
y = np.array(y)
n = len(y)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
vals = y[perm]
ExtraY = 0
ExtraX = 0
ACount = 0
BCount = 0
CCount = 0
DCount = 0
ECount = 0
DCount = 0
Concordant = 0
Discordant = 0
# ids for left child
li = [None] * (n - 1)
# ids for right child
ri = [None] * (n - 1)
# number of left descendants for a node
ld = np.zeros(n)
# number of values equal to value i
nequal = np.zeros(n)
for i in range(1, n):
NumBefore = 0
NumEqual = 1
root = 0
x0 = x[perm[i - 1]]
y0 = y[perm[i - 1]]
x1 = x[perm[i]]
y1 = y[perm[i]]
if x0 != x1:
DCount = 0
ECount = 1
else:
if y0 == y1:
ECount += 1
else:
DCount += ECount
ECount = 1
root = 0
inserting = True
while inserting:
current = y[perm[i]]
if current > y[perm[root]]:
# right branch
NumBefore += 1 + ld[root] + nequal[root]
if ri[root] is None:
# insert as right child to root
ri[root] = i
inserting = False
else:
root = ri[root]
elif current < y[perm[root]]:
# increment number of left descendants
ld[root] += 1
if li[root] is None:
# insert as left child to root
li[root] = i
inserting = False
else:
root = li[root]
elif current == y[perm[root]]:
NumBefore += ld[root]
NumEqual += nequal[root] + 1
nequal[root] += 1
inserting = False
ACount = NumBefore - DCount
BCount = NumEqual - ECount
CCount = i - (ACount + BCount + DCount + ECount - 1)
ExtraY += DCount
ExtraX += BCount
Concordant += ACount
Discordant += CCount
cd = Concordant + Discordant
num = Concordant - Discordant
tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY))
v = (4. * n + 10) / (9. * n * (n - 1))
z = tau / np.sqrt(v)
pval = erfc(np.abs(z) / 1.4142136) # follow scipy
return tau, pval, Concordant, Discordant, ExtraX, ExtraY | List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`. | entailment |
def decorator(self, func):
""" Wrapper function to decorate a function """
if inspect.isfunction(func):
func._methodview = self
elif inspect.ismethod(func):
func.__func__._methodview = self
else:
raise AssertionError('Can only decorate function and methods, {} given'.format(func))
return func | Wrapper function to decorate a function | entailment |
def matches(self, verb, params):
""" Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool
"""
return (self.ifset is None or self.ifset <= params) and \
(self.ifnset is None or self.ifnset.isdisjoint(params)) and \
(self.methods is None or verb in self.methods) | Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool | entailment |
def _match_view(self, method, route_params):
""" Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None
"""
method = method.upper()
route_params = frozenset(k for k, v in route_params.items() if v is not None)
for view_name, info in self.methods_map[method].items():
if info.matches(method, route_params):
return getattr(self, view_name)
else:
return None | Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None | entailment |
def route_as_view(cls, app, name, rules, *class_args, **class_kwargs):
""" Register the view with an URL route
:param app: Flask application
:type app: flask.Flask|flask.Blueprint
:param name: Unique view name
:type name: str
:param rules: List of route rules to use
:type rules: Iterable[str|werkzeug.routing.Rule]
:param class_args: Args to pass to object constructor
:param class_kwargs: KwArgs to pass to object constructor
:return: View callable
:rtype: Callable
"""
view = super(MethodView, cls).as_view(name, *class_args, **class_kwargs)
for rule in rules:
app.add_url_rule(rule, view_func=view)
return view | Register the view with an URL route
:param app: Flask application
:type app: flask.Flask|flask.Blueprint
:param name: Unique view name
:type name: str
:param rules: List of route rules to use
:type rules: Iterable[str|werkzeug.routing.Rule]
:param class_args: Args to pass to object constructor
:param class_kwargs: KwArgs to pass to object constructor
:return: View callable
:rtype: Callable | entailment |
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row) | Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive). | entailment |
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) | Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`. | entailment |
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M)) | Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`. | entailment |
def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state | Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``. | entailment |
def status(c):
"""
Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons).
"""
# TODO: wants some holistic "you don't actually HAVE any changes to
# release" final status - i.e. all steps were at no-op status.
actions, state = _converge(c)
table = []
# NOTE: explicit 'sensible' sort (in rough order of how things are usually
# modified, and/or which depend on one another, e.g. tags are near the end)
for component in "changelog version tag".split():
table.append((component.capitalize(), actions[component].value))
print(tabulate(table))
return actions, state | Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons). | entailment |
def prepare(c):
"""
Edit changelog & version, git commit, and git tag, to set up for release.
"""
# Print dry-run/status/actions-to-take data & grab programmatic result
# TODO: maybe expand the enum-based stuff to have values that split up
# textual description, command string, etc. See the TODO up by their
# definition too, re: just making them non-enum classes period.
# TODO: otherwise, we at least want derived eg changelog/version/etc paths
# transmitted from status() into here...
actions, state = status(c)
# TODO: unless nothing-to-do in which case just say that & exit 0
if not confirm("Take the above actions?"):
sys.exit("Aborting.")
# TODO: factor out what it means to edit a file:
# - $EDITOR or explicit expansion of it in case no shell involved
# - pty=True and hide=False, because otherwise things can be bad
# - what else?
# Changelog! (pty for non shite editing, eg vim sure won't like non-pty)
if actions.changelog is Changelog.NEEDS_RELEASE:
# TODO: identify top of list and inject a ready-made line? Requires vim
# assumption...GREAT opportunity for class/method based tasks!
cmd = "$EDITOR {0.packaging.changelog_file}".format(c)
c.run(cmd, pty=True, hide=False)
# TODO: add a step for checking reqs.txt / setup.py vs virtualenv contents
# Version file!
if actions.version == VersionFile.NEEDS_BUMP:
# TODO: suggest the bump and/or overwrite the entire file? Assumes a
# specific file format. Could be bad for users which expose __version__
# but have other contents as well.
version_file = os.path.join(
_find_package(c),
c.packaging.get("version_module", "_version") + ".py",
)
cmd = "$EDITOR {0}".format(version_file)
c.run(cmd, pty=True, hide=False)
if actions.tag == Tag.NEEDS_CUTTING:
# Commit, if necessary, so the tag includes everything.
# NOTE: this strips out untracked files. effort.
cmd = 'git status --porcelain | egrep -v "^\\?"'
if c.run(cmd, hide=True, warn=True).ok:
c.run(
'git commit -am "Cut {0}"'.format(state.expected_version),
hide=False,
)
# Tag!
c.run("git tag {0}".format(state.expected_version), hide=False) | Edit changelog & version, git commit, and git tag, to set up for release. | entailment |
def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_ | Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch). | entailment |
def _versions_from_changelog(changelog):
"""
Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects.
"""
versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)]
return sorted(versions) | Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects. | entailment |
def _release_and_issues(changelog, branch, release_type):
"""
Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``.
"""
# Bugfix lines just use the branch to find issues
bucket = branch
# Features need a bit more logic
if release_type is Release.FEATURE:
bucket = _latest_feature_bucket(changelog)
# Issues is simply what's in the bucket
issues = changelog[bucket]
# Latest release is undefined for feature lines
release = None
# And requires scanning changelog, for bugfix lines
if release_type is Release.BUGFIX:
versions = [text_type(x) for x in _versions_from_changelog(changelog)]
release = [x for x in versions if x.startswith(bucket)][-1]
return release, issues | Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``. | entailment |
def _get_tags(c):
"""
Return sorted list of release-style tags as semver objects.
"""
tags_ = []
for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"):
try:
tags_.append(Version(tagstr))
# Ignore anything non-semver; most of the time they'll be non-release
# tags, and even if they are, we can't reason about anything
# non-semver anyways.
# TODO: perhaps log these to DEBUG
except ValueError:
pass
# Version objects sort semantically
return sorted(tags_) | Return sorted list of release-style tags as semver objects. | entailment |
def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
"""
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
sys.exit("Unable to find a local Python package!")
if len(packages) > 1:
sys.exit("Found multiple Python packages: {0!r}".format(packages))
return packages[0] | Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.) | entailment |
def build(c, sdist=True, wheel=False, directory=None, python=None, clean=True):
"""
Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters save ``directory`` honor config settings of the same name,
under the ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
True}})`` to force building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Two subdirectories will be created within this directory: one for
builds, and one for the dist archives.
When ``None`` or another false-y value, the current working directory
is used (and thus, local ``dist/`` and ``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to just ``python``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param bool clean:
Whether to clean out the local ``build/`` folder before building.
"""
# Config hooks
config = c.config.get("packaging", {})
# TODO: update defaults to be None, then flip the below so non-None runtime
# beats config.
sdist = config.get("sdist", sdist)
wheel = config.get("wheel", wheel)
python = config.get("python", python or "python") # buffalo buffalo
# Sanity
if not sdist and not wheel:
sys.exit(
"You said no sdists and no wheels..."
"what DO you want to build exactly?"
)
# Directory path/arg logic
if not directory:
directory = "" # os.path.join() doesn't like None
dist_dir = os.path.join(directory, "dist")
dist_arg = "-d {0}".format(dist_dir)
build_dir = os.path.join(directory, "build")
build_arg = "-b {0}".format(build_dir)
# Clean
if clean:
if os.path.exists(build_dir):
rmtree(build_dir)
# NOTE: not cleaning dist_dir, since this may be called >1 time within
# publish() trying to build up multiple wheels/etc.
# TODO: separate clean-build/clean-dist args? Meh
# Build
parts = [python, "setup.py"]
if sdist:
parts.extend(("sdist", dist_arg))
if wheel:
# Manually execute build in case we are using a custom build dir.
# Doesn't seem to be a way to tell bdist_wheel to do this directly.
parts.extend(("build", build_arg))
parts.extend(("bdist_wheel", dist_arg))
c.run(" ".join(parts)) | Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters save ``directory`` honor config settings of the same name,
under the ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
True}})`` to force building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Two subdirectories will be created within this directory: one for
builds, and one for the dist archives.
When ``None`` or another false-y value, the current working directory
is used (and thus, local ``dist/`` and ``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to just ``python``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param bool clean:
Whether to clean out the local ``build/`` folder before building. | entailment |
def publish(
c,
sdist=True,
wheel=False,
index=None,
sign=False,
dry_run=False,
directory=None,
dual_wheels=False,
alt_python=None,
check_desc=False,
):
"""
Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``.
"""
# Don't hide by default, this step likes to be verbose most of the time.
c.config.run.hide = False
# Config hooks
config = c.config.get("packaging", {})
index = config.get("index", index)
sign = config.get("sign", sign)
dual_wheels = config.get("dual_wheels", dual_wheels)
check_desc = config.get("check_desc", check_desc)
# Initial sanity check, if needed. Will die usefully.
if check_desc:
c.run("python setup.py check -r -s")
# Build, into controlled temp dir (avoids attempting to re-upload old
# files)
with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
# Build default archives
build(c, sdist=sdist, wheel=wheel, directory=tmp)
# Build opposing interpreter archive, if necessary
if dual_wheels:
if not alt_python:
alt_python = "python2"
if sys.version_info[0] == 2:
alt_python = "python3"
build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
# Do the thing!
upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run) | Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``. | entailment |
def upload(c, directory, index=None, sign=False, dry_run=False):
"""
Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
"""
# Obtain list of archive filenames, then ensure any wheels come first
# so their improved metadata is what PyPI sees initially (otherwise, it
# only honors the sdist's lesser data).
archives = list(
itertools.chain.from_iterable(
glob(os.path.join(directory, "dist", "*.{0}".format(extension)))
for extension in ("whl", "tar.gz")
)
)
# Sign each archive in turn
# TODO: twine has a --sign option; but the below is still nice insofar
# as it lets us dry-run, generate for web upload when pypi's API is
# being cranky, etc. Figure out which is better.
if sign:
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO(getpass.getpass(prompt) + "\n")
gpg_bin = find_gpg(c)
if not gpg_bin:
sys.exit(
"You need to have one of `gpg`, `gpg1` or `gpg2` "
"installed to GPG-sign!"
)
for archive in archives:
cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format(
gpg_bin
) # noqa
c.run(cmd.format(archive), in_stream=input_)
input_.seek(0) # So it can be replayed by subsequent iterations
# Upload
parts = ["twine", "upload"]
if index:
index_arg = "--repository {0}".format(index)
if index:
parts.append(index_arg)
paths = archives[:]
if sign:
paths.append(os.path.join(directory, "dist", "*.asc"))
parts.extend(paths)
cmd = " ".join(parts)
if dry_run:
print("Would publish via: {0}".format(cmd))
print("Files that would be published:")
c.run("ls -l {0}".format(" ".join(paths)))
else:
c.run(cmd) | Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts. | entailment |
def tmpdir(skip_cleanup=False, explicit=None):
"""
Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.)
"""
tmp = explicit if explicit is not None else mkdtemp()
try:
yield tmp
finally:
if not skip_cleanup:
rmtree(tmp) | Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.) | entailment |
def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative)) | Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval. | entailment |
def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax | Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | entailment |
def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim) | Plot vectors of positional transition of LISA values starting
from the same origin. | entailment |
def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax | Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | entailment |
def _clean(c):
"""
Nuke docs build target directory so next build is clean.
"""
if isdir(c.sphinx.target):
rmtree(c.sphinx.target) | Nuke docs build target directory so next build is clean. | entailment |
def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) | Open build target's index.html in a browser (using 'open'). | entailment |
def build(
c,
clean=False,
browse=False,
nitpick=False,
opts=None,
source=None,
target=None,
):
"""
Build the project's Sphinx docs.
"""
if clean:
_clean(c)
if opts is None:
opts = ""
if nitpick:
opts += " -n -W -T"
cmd = "sphinx-build{0} {1} {2}".format(
(" " + opts) if opts else "",
source or c.sphinx.source,
target or c.sphinx.target,
)
c.run(cmd, pty=True)
if browse:
_browse(c) | Build the project's Sphinx docs. | entailment |
def tree(c):
"""
Display documentation contents with the 'tree' program.
"""
ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates"
c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source)) | Display documentation contents with the 'tree' program. | entailment |
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# TODO: This is super lolzy but we haven't actually tackled nontrivial
# in-Python task calling yet, so we do this to get a copy of 'our' context,
# which has been updated with the per-collection config data of the
# docs/www subcollections.
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
# Must build both normally first to ensure good intersphinx inventory files
# exist =/ circular dependencies ahoy! Do it quietly to avoid pulluting
# output; only super-serious errors will bubble up.
# TODO: wants a 'temporarily tweak context settings' contextmanager
# TODO: also a fucking spinner cuz this confuses me every time I run it
# when the docs aren't already prebuilt
docs_c["run"].hide = True
www_c["run"].hide = True
docs["build"](docs_c)
www["build"](www_c)
docs_c["run"].hide = False
www_c["run"].hide = False
# Run the actual builds, with nitpick=True (nitpicks + tracebacks)
docs["build"](docs_c, nitpick=True)
www["build"](www_c, nitpick=True) | Build both doc sites w/ maxed nitpicking. | entailment |
def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates. | entailment |
def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
"""
np.random.shuffle(ids)
return X[ids, :][:, ids] | Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]]) | entailment |
def get_lower(matrix):
"""
Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]])
"""
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(np.array(lowerlist), (int(veclen), 1))
return lowvec | Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]]) | entailment |
def blacken(
c, line_length=79, folders=None, check=False, diff=False, find_opts=None
):
r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument.
"""
config = c.config.get("blacken", {})
default_folders = ["."]
configured_folders = config.get("folders", default_folders)
folders = folders or configured_folders
default_find_opts = ""
configured_find_opts = config.get("find_opts", default_find_opts)
find_opts = find_opts or configured_find_opts
black_command_line = "black -l {}".format(line_length)
if check:
black_command_line = "{} --check".format(black_command_line)
if diff:
black_command_line = "{} --diff".format(black_command_line)
if find_opts:
find_opts = " {}".format(find_opts)
else:
find_opts = ""
cmd = "find {} -name '*.py'{} | xargs {}".format(
" ".join(folders), find_opts, black_command_line
)
c.run(cmd, pty=True) | r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument. | entailment |
def normalize_response_value(rv):
""" Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers | Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None) | entailment |
def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers) | Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse | entailment |
def markov_mobility(p, measure="P", ini=None):
"""
Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637
"""
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0 / k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi | Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637 | entailment |
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof | chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions. | entailment |
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results | Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0' | entailment |
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr | Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074]) | entailment |
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title) | Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results. | entailment |
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.]) | entailment |
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P | Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques. | entailment |
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title) | A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`. | entailment |
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k | Helper method for classifying continuous data. | entailment |
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None | Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8 | entailment |
def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) | Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set | entailment |
def get_entity_loaded_propnames(entity):
""" Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = inspect(entity)
keynames = get_entity_propnames(ins)
# If the entity is not transient -- exclude unloaded keys
# Transient entities won't load these anyway, so it's safe to include all columns and get defaults
if not ins.transient:
keynames -= ins.unloaded
# If the entity is expired -- reload expired attributes as well
# Expired attributes are usually unloaded as well!
if ins.expired:
keynames |= ins.expired_attributes
# Finish
return keynames | Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set | entailment |
def next_minor(self):
"""
Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1.
"""
clone = self.clone()
clone.minor += 1
clone.patch = 0
return clone | Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1. | entailment |
def _check_encoding(name, encoding_to_check, alternative_encoding, source=None):
"""
Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding
"""
assert name is not None
if encoding_to_check not in (alternative_encoding, 'chardet', None):
try:
''.encode(encoding_to_check)
except LookupError:
raise pygount.common.OptionError(
'{0} is "{1}" but must be "{2}" or a known Python encoding'.format(
name, encoding_to_check, alternative_encoding),
source) | Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding | entailment |
def lines(text):
"""
Generator function to yield lines (delimited with ``'\n'``) stored in
``text``. This is useful when a regular expression should only match on a
per line basis in a memory efficient way.
"""
assert text is not None
assert '\r' not in text
previous_newline_index = 0
newline_index = text.find('\n')
while newline_index != -1:
yield text[previous_newline_index:newline_index]
previous_newline_index = newline_index + 1
newline_index = text.find('\n', previous_newline_index)
last_line = text[previous_newline_index:]
if last_line != '':
yield last_line | Generator function to yield lines (delimited with ``'\n'``) stored in
``text``. This is useful when a regular expression should only match on a
per line basis in a memory efficient way. | entailment |
def matching_number_line_and_regex(source_lines, generated_regexes, max_line_count=15):
"""
The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``.
"""
initial_numbers_and_lines = enumerate(itertools.islice(source_lines, max_line_count))
matching_number_line_and_regexps = (
(number, line, matching_regex)
for number, line in initial_numbers_and_lines
for matching_regex in generated_regexes
if matching_regex.match(line)
)
possible_first_matching_number_line_and_regexp = list(
itertools.islice(matching_number_line_and_regexps, 1))
result = (possible_first_matching_number_line_and_regexp + [None])[0]
return result | The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``. | entailment |
def _pythonized_comments(tokens):
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ':':
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text | Similar to tokens but converts strings after a colon (:) to comments. | entailment |
def encoding_for(source_path, encoding='automatic', fallback_encoding=None):
"""
The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value.
"""
assert encoding is not None
if encoding == 'automatic':
with open(source_path, 'rb') as source_file:
heading = source_file.read(128)
result = None
if len(heading) == 0:
# File is empty, assume a dummy encoding.
result = 'utf-8'
if result is None:
# Check for known BOMs.
for bom, encoding in _BOM_TO_ENCODING_MAP.items():
if heading[:len(bom)] == bom:
result = encoding
break
if result is None:
# Look for common headings that indicate the encoding.
ascii_heading = heading.decode('ascii', errors='replace')
ascii_heading = ascii_heading.replace('\r\n', '\n')
ascii_heading = ascii_heading.replace('\r', '\n')
ascii_heading = '\n'.join(ascii_heading.split('\n')[:2]) + '\n'
coding_magic_match = _CODING_MAGIC_REGEX.match(ascii_heading)
if coding_magic_match is not None:
result = coding_magic_match.group('encoding')
else:
first_line = ascii_heading.split('\n')[0]
xml_prolog_match = _XML_PROLOG_REGEX.match(first_line)
if xml_prolog_match is not None:
result = xml_prolog_match.group('encoding')
elif encoding == 'chardet':
assert _detector is not None, \
'without chardet installed, encoding="chardet" must be rejected before calling encoding_for()'
_detector.reset()
with open(source_path, 'rb') as source_file:
for line in source_file.readlines():
_detector.feed(line)
if _detector.done:
break
result = _detector.result['encoding']
if result is None:
_log.warning(
'%s: chardet cannot determine encoding, assuming fallback encoding %s',
source_path, fallback_encoding)
result = fallback_encoding
else:
# Simply use the specified encoding.
result = encoding
if result is None:
# Encoding 'automatic' or 'chardet' failed to detect anything.
if fallback_encoding is not None:
# If defined, use the fallback encoding.
result = fallback_encoding
else:
try:
# Attempt to read the file as UTF-8.
with open(source_path, 'r', encoding='utf-8') as source_file:
source_file.read()
result = 'utf-8'
except UnicodeDecodeError:
# UTF-8 did not work out, use the default as last resort.
result = DEFAULT_FALLBACK_ENCODING
_log.debug('%s: no fallback encoding specified, using %s', source_path, result)
assert result is not None
return result | The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value. | entailment |
def has_lexer(source_path):
"""
Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file.
"""
result = bool(pygments.lexers.find_lexer_class_for_filename(source_path))
if not result:
suffix = os.path.splitext(os.path.basename(source_path))[1].lstrip('.')
result = suffix in _SUFFIX_TO_FALLBACK_LEXER_MAP
return result | Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file. | entailment |
def source_analysis(
source_path, group, encoding='automatic', fallback_encoding='cp1252',
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool=None):
"""
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info('%s: is empty', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info('%s: is binary', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info('%s: unknown language', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info('%s: is a duplicate of %s', source_path, duplicate_path)
result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ('automatic', 'chardet'):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, 'r', encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error)
result = pseudo_source_analysis(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(
pygount.common.lines(source_code), generated_regexes
)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = 'line {0} matches {1}'.format(number, regex)
_log.info('%s: is generated code because %s', source_path, message)
result = pseudo_source_analysis(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ('xml' in language.lower()) or (language == 'Genshi'):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info('%s: analyze as %s using encoding %s', source_path, language, encoding)
mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = 'e'
for mark_to_check in ('d', 's', 'c'):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map['c'],
documentation=mark_to_count_map['d'],
empty=mark_to_count_map['e'],
string=mark_to_count_map['s'],
state=SourceState.analyzed.name,
state_info=None,
)
assert result is not None
return result | Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis` | entailment |
def polynomial(img, mask, inplace=False, replace_all=False,
max_dev=1e-5, max_iter=20, order=2):
'''
replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
if inplace:
out = img
else:
out = img.copy()
lastm = 0
for _ in range(max_iter):
out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace,
replace_all=replace_all)
if replace_all:
out = out2
break
res = (np.abs(out2 - out)).mean()
print('residuum: ', res)
if res < max_dev:
out = out2
break
out = out2
mask = _highGrad(out)
m = mask.sum()
if m == lastm or m == img.size:
break
lastm = m
out = np.clip(out, 0, 1, out=out) # if inplace else None)
return out | replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask | entailment |
def errorDist(scale, measExpTime, n_events_in_expTime,
event_duration, std,
points_per_time=100, n_repetitions=300):
'''
TODO
'''
ntimes = 10
s1 = measExpTime * scale * 10
# exp. time factor 1/16-->16:
p2 = np.logspace(-4, 4, 18, base=2)
t = np.linspace(0, s1, ntimes * points_per_time * s1)
err = None
for rr in range(n_repetitions):
f = _flux(t, n_events_in_expTime, event_duration, std)
e = np.array([_capture(f, t, measExpTime, pp) for pp in p2])
if err is None:
err = e
else:
err += e
err /= (rr + 1)
# normalize, so that error==1 at 1:
try:
fac = findXAt(err, p2, 1)
except:
fac = 1
err /= fac
return p2, err, t, f | TODO | entailment |
def exampleSignals(std=1, dur1=1, dur2=3, dur3=0.2,
n1=0.5, n2=0.5, n3=2):
'''
std ... standard deviation of every signal
dur1...dur3 --> event duration per second
n1...n3 --> number of events per second
'''
np.random.seed(123)
t = np.linspace(0, 10, 100)
f0 = _flux(t, n1, dur1, std, offs=0)
f1 = _flux(t, n2, dur2, std, offs=0)
f2 = _flux(t, n3, dur3, std, offs=0)
return t,f0,f1,f2 | std ... standard deviation of every signal
dur1...dur3 --> event duration per second
n1...n3 --> number of events per second | entailment |
def _flux(t, n, duration, std, offs=1):
'''
returns Gaussian shaped signal fluctuations [events]
t --> times to calculate event for
n --> numbers of events per sec
duration --> event duration per sec
std --> std of event if averaged over time
offs --> event offset
'''
duration *= len(t) / t[-1]
duration = int(max(duration, 1))
pos = []
n *= t[-1]
pp = np.arange(len(t))
valid = np.ones_like(t, dtype=bool)
for _ in range(int(round(n))):
try:
ppp = np.random.choice(pp[valid], 1)[0]
pos.append(ppp)
valid[max(0, ppp - duration - 1):ppp + duration + 1] = False
except ValueError:
break
sign = np.random.randint(0, 2, len(pos))
sign[sign == 0] = -1
out = np.zeros_like(t)
amps = np.random.normal(loc=0, scale=1, size=len(pos))
if duration > 2:
evt = gaussian(duration, duration)
evt -= evt[0]
else:
evt = np.ones(shape=duration)
for s, p, a in zip(sign, pos, amps):
pp = duration
if p + duration > len(out):
pp = len(out) - p
out[p:p + pp] = s * a * evt[:pp]
out /= out.std() / std
out += offs
return out | returns Gaussian shaped signal fluctuations [events]
t --> times to calculate event for
n --> numbers of events per sec
duration --> event duration per sec
std --> std of event if averaged over time
offs --> event offset | entailment |
def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) | capture signal and return its standard deviation
#TODO: more detail | entailment |
def genericCameraMatrix(shape, angularField=60):
'''
Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape
'''
# http://nghiaho.com/?page_id=576
# assume that the optical centre is in the middle:
cy = int(shape[0] / 2)
cx = int(shape[1] / 2)
# assume that the FOV is 60 DEG (webcam)
fx = fy = cx / np.tan(angularField / 2 * np.pi /
180) # camera focal length
# see
# http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
return np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]
], dtype=np.float32) | Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape | entailment |
def standardDeviation2d(img, ksize=5, blurred=None):
'''
calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time
'''
if ksize not in (list, tuple):
ksize = (ksize,ksize)
if blurred is None:
blurred = gaussian_filter(img, ksize)
else:
assert blurred.shape == img.shape
std = np.empty_like(img)
_calc(img, ksize[0], ksize[1], blurred, std)
return std | calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time | entailment |
def maskedFilter(arr, mask, ksize=30, fill_mask=True,
fn='median'):
'''
fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored
'''
if fill_mask:
mask1 = mask
out = arr
else:
mask1 = ~mask
out = np.full_like(arr, fill_value=np.nan)
mask2 = ~mask
if fn == 'mean':
_calcMean(arr, mask1, mask2, out, ksize // 2)
else:
buff = np.empty(shape=(ksize * ksize), dtype=arr.dtype)
_calcMedian(arr, mask1, mask2, out, ksize // 2, buff)
return out | fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored | entailment |
def vignettingFromDifferentObjects(imgs, bg):
'''
Extract vignetting from a set of images
containing different devices
The devices spatial inhomogeneities are averaged
This method is referred as 'Method C' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
f = FlatFieldFromImgFit(imgs, bg)
return f.result, f.mask | Extract vignetting from a set of images
containing different devices
The devices spatial inhomogeneities are averaged
This method is referred as 'Method C' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
--- | entailment |
def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
'''
Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image
'''
# ensure images are type float64 (double precision):
i1 = np.asfarray(i1)
i2 = np.asfarray(i2)
if ibg is not 0:
ibg = np.asfarray(ibg)
assert i1.shape == ibg.shape, 'all input images need to have the same resolution'
assert i1.shape == i2.shape, 'all input images need to have the same resolution'
if not allow_color_images:
assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'
# SNR calculation as defined in 'IEC TS 60904-13':
signal = 0.5 * (i1 + i2) - ibg
noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
if signal.ndim == 3: # color
signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
signal = signal.sum()
noise = noise.sum()
return signal / noise | Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image | entailment |
def _rotate(img, angle):
'''
angle [DEG]
'''
s = img.shape
if angle == 0:
return img
else:
M = cv2.getRotationMatrix2D((s[1] // 2,
s[0] // 2), angle, 1)
return cv2.warpAffine(img, M, (s[1], s[0])) | angle [DEG] | entailment |
def _findOverlap(self, img_rgb, overlap, overlapDeviation,
rotation, rotationDeviation):
'''
return offset(x,y) which fit best self._base_img
through template matching
'''
# get gray images
if len(img_rgb.shape) != len(img_rgb.shape):
raise Exception(
'number of channels(colors) for both images different')
if overlapDeviation == 0 and rotationDeviation == 0:
return (0, overlap, rotation)
s = self.base_img_rgb.shape
ho = int(round(overlap * 0.5))
overlap = int(round(overlap))
# create two image cuts to compare:
imgcut = self.base_img_rgb[s[0] - overlapDeviation - overlap:, :]
template = img_rgb[:overlap, ho:s[1] - ho]
def fn(angle):
rotTempl = self._rotate(template, angle)
# Apply template Matching
fn.res = cv2.matchTemplate(rotTempl.astype(np.float32),
imgcut.astype(np.float32),
cv2.TM_CCORR_NORMED)
return 1 / fn.res.mean()
if rotationDeviation == 0:
angle = rotation
fn(rotation)
else:
# find best rotation angle:
angle = brent(fn, brack=(rotation - rotationDeviation,
rotation + rotationDeviation))
loc = cv2.minMaxLoc(fn.res)[-1]
offsx = int(round(loc[0] - ho))
offsy = overlapDeviation + overlap - loc[1]
return offsx, offsy, angle | return offset(x,y) which fit best self._base_img
through template matching | entailment |
def estimateFromImages(imgs1, imgs2=None, mn_mx=None, nbins=100):
'''
estimate the noise level function as stDev over image intensity
from a set of 2 image groups
images at the same position have to show
the identical setup, so
imgs1[i] - imgs2[i] = noise
'''
if imgs2 is None:
imgs2 = [None] * len(imgs1)
else:
assert len(imgs1) == len(imgs2)
y_vals = np.empty((len(imgs1), nbins))
w_vals = np.zeros((len(imgs1), nbins))
if mn_mx is None:
print('estimating min and max image value')
mn = 1e6
mx = -1e6
# get min and max image value checking all first images:
for n, i1 in enumerate(imgs1):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
mmn, mmx = _getMinMax(i1)
mn = min(mn, mmn)
mx = mx = max(mx, mmx)
print('--> min(%s), max(%s)' % (mn, mx))
else:
mn, mx = mn_mx
x = None
print('get noise level function')
for n, (i1, i2) in enumerate(zip(imgs1, imgs2)):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
if i2 is not None:
i2 = imread(i2)
x, y, weights, _ = calcNLF(i1, i2, mn_mx_nbins=(mn, mx, nbins), x=x)
y_vals[n] = y
w_vals[n] = weights
# filter empty places:
filledPos = np.sum(w_vals, axis=0) != 0
w_vals = w_vals[:, filledPos]
y_vals = y_vals[:, filledPos]
x = x[filledPos]
y_avg = np.average(np.nan_to_num(y_vals),
weights=w_vals,
axis=0)
w_vals = np.sum(w_vals, axis=0)
w_vals /= w_vals.sum()
fitParams, fn, i = _evaluate(x, y_avg, w_vals)
return x, fn, y_avg, y_vals, w_vals, fitParams, i | estimate the noise level function as stDev over image intensity
from a set of 2 image groups
images at the same position have to show
the identical setup, so
imgs1[i] - imgs2[i] = noise | entailment |
def _evaluate(x, y, weights):
'''
get the parameters of the, needed by 'function'
through curve fitting
'''
i = _validI(x, y, weights)
xx = x[i]
y = y[i]
try:
fitParams = _fit(xx, y)
# bound noise fn to min defined y value:
minY = function(xx[0], *fitParams)
fitParams = np.insert(fitParams, 0, minY)
fn = lambda x, minY=minY: boundedFunction(x, *fitParams)
except RuntimeError:
print(
"couldn't fit noise function with filtered indices, use polynomial fit instead")
fitParams = None
fn = smooth(xx, y, weights[i])
return fitParams, fn, i | get the parameters of the, needed by 'function'
through curve fitting | entailment |
def boundedFunction(x, minY, ax, ay):
'''
limit [function] to a minimum y value
'''
y = function(x, ax, ay)
return np.maximum(np.nan_to_num(y), minY) | limit [function] to a minimum y value | entailment |
def function(x, ax, ay):
'''
general square root function
'''
with np.errstate(invalid='ignore'):
return ay * (x - ax)**0.5 | general square root function | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.