repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._add_non_batch | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | python | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEndIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['MndtIdNode'])
TX_nodes['MndtRltdInfNode'].append(TX_nodes['DtOfSgntrNode'])
TX_nodes['DrctDbtTxNode'].append(TX_nodes['MndtRltdInfNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DrctDbtTxNode'])
if 'BIC_DbtrAgt_Node' in TX_nodes and TX_nodes['BIC_DbtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_DbtrAgt_Node'].append(
TX_nodes['BIC_DbtrAgt_Node'])
TX_nodes['DbtrAgtNode'].append(TX_nodes['FinInstnId_DbtrAgt_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAgtNode'])
TX_nodes['DbtrNode'].append(TX_nodes['Nm_Dbtr_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrNode'])
TX_nodes['Id_DbtrAcct_Node'].append(TX_nodes['IBAN_DbtrAcct_Node'])
TX_nodes['DbtrAcctNode'].append(TX_nodes['Id_DbtrAcct_Node'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['DbtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['DrctDbtTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['DrctDbtTxInfNode'])
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L236-L309 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._finalize_batch | def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | python | def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
batch_meta_split = batch_meta.split("::")
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = batch_meta_split[0]
PmtInf_nodes['ReqdColltnDtNode'].text = batch_meta_split[1]
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['LclInstrmNode'].append(
PmtInf_nodes['Cd_LclInstrm_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['LclInstrmNode'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SeqTpNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdColltnDtNode'])
PmtInf_nodes['CdtrNode'].append(PmtInf_nodes['Nm_Cdtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrNode'])
PmtInf_nodes['Id_CdtrAcct_Node'].append(
PmtInf_nodes['IBAN_CdtrAcct_Node'])
PmtInf_nodes['CdtrAcctNode'].append(
PmtInf_nodes['Id_CdtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_CdtrAgt_Node'].append(
PmtInf_nodes['BIC_CdtrAgt_Node'])
PmtInf_nodes['CdtrAgtNode'].append(
PmtInf_nodes['FinInstnId_CdtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
if self.schema == 'pain.008.001.02':
PmtInf_nodes['CdtrSchmeIdNode'].append(PmtInf_nodes['Nm_CdtrSchmeId_Node'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['Id_Othr_Node'])
PmtInf_nodes['SchmeNmNode'].append(PmtInf_nodes['PrtryNode'])
PmtInf_nodes['OthrNode'].append(PmtInf_nodes['SchmeNmNode'])
PmtInf_nodes['PrvtIdNode'].append(PmtInf_nodes['OthrNode'])
PmtInf_nodes['Id_CdtrSchmeId_Node'].append(
PmtInf_nodes['PrvtIdNode'])
PmtInf_nodes['CdtrSchmeIdNode'].append(
PmtInf_nodes['Id_CdtrSchmeId_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CdtrSchmeIdNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
CstmrDrctDbtInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L362-L443 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer.check_config | def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation) | python | def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation) | Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L17-L34 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer.check_payment | def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | python | def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "BIC", "amount", "description"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L36-L61 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._create_header | def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node) | python | def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node) | Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L122-L153 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._create_PmtInf_node | def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED | python | def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED | Method to create the blank payment information nodes as a dict. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L155-L181 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_non_batch | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | python | def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L207-L260 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_batch | def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment) | python | def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment) | Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L262-L288 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._add_to_batch_list | def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount'] | python | def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount'] | Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L290-L307 |
raphaelm/python-sepaxml | sepaxml/transfer.py | SepaTransfer._finalize_batch | def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | python | def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode']) | Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L309-L369 |
jaijuneja/PyTLDR | pytldr/summarize/baseclass.py | BaseSummarizer._compute_matrix | def _compute_matrix(cls, sentences, weighting='frequency', norm=None):
"""
Compute the matrix of term frequencies given a list of sentences
"""
if norm not in ('l1', 'l2', None):
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
# Initialise vectorizer to convert text documents into matrix of token counts
if weighting.lower() == 'binary':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=True, stop_words=None)
elif weighting.lower() == 'frequency':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=False, stop_words=None)
elif weighting.lower() == 'tfidf':
vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1), stop_words=None)
else:
raise ValueError('Parameter "method" must take one of the values "binary", "frequency" or "tfidf".')
# Extract word features from sentences using sparse vectorizer
frequency_matrix = vectorizer.fit_transform(sentences).astype(float)
# Normalize the term vectors (i.e. each row adds to 1)
if norm in ('l1', 'l2'):
frequency_matrix = normalize(frequency_matrix, norm=norm, axis=1)
elif norm is not None:
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
return frequency_matrix | python | def _compute_matrix(cls, sentences, weighting='frequency', norm=None):
"""
Compute the matrix of term frequencies given a list of sentences
"""
if norm not in ('l1', 'l2', None):
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
# Initialise vectorizer to convert text documents into matrix of token counts
if weighting.lower() == 'binary':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=True, stop_words=None)
elif weighting.lower() == 'frequency':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=False, stop_words=None)
elif weighting.lower() == 'tfidf':
vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1), stop_words=None)
else:
raise ValueError('Parameter "method" must take one of the values "binary", "frequency" or "tfidf".')
# Extract word features from sentences using sparse vectorizer
frequency_matrix = vectorizer.fit_transform(sentences).astype(float)
# Normalize the term vectors (i.e. each row adds to 1)
if norm in ('l1', 'l2'):
frequency_matrix = normalize(frequency_matrix, norm=norm, axis=1)
elif norm is not None:
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
return frequency_matrix | Compute the matrix of term frequencies given a list of sentences | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/baseclass.py#L19-L46 |
jaijuneja/PyTLDR | pytldr/summarize/relevance.py | RelevanceSummarizer.summarize | def summarize(self, text, length=5, binary_matrix=True):
"""
Implements the method of summarization by relevance score, as described by Gong and Liu in the paper:
Y. Gong and X. Liu (2001). Generic text summarization using relevance measure and latent semantic analysis.
Proceedings of the 24th International Conference on Research in Information Retrieval (SIGIR ’01),
pp. 19–25.
This method computes and ranks the cosine similarity between each sentence vector and the overall
document.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
matrix = self._compute_matrix(sentences, weighting='frequency')
# Sum occurrences of terms over all sentences to obtain document frequency
doc_frequency = matrix.sum(axis=0)
if binary_matrix:
matrix = (matrix != 0).astype(int)
summary_sentences = []
for _ in range(length):
# Take the inner product of each sentence vector with the document vector
sentence_scores = matrix.dot(doc_frequency.transpose())
sentence_scores = np.array(sentence_scores.T)[0]
# Grab the top sentence and add it to the summary
top_sentence = sentence_scores.argsort()[-1]
summary_sentences.append(top_sentence)
# Remove all terms that appear in the top sentence from the document
terms_in_top_sentence = (matrix[top_sentence, :] != 0).toarray()
doc_frequency[terms_in_top_sentence] = 0
# Remove the top sentence from consideration by setting all its elements to zero
# This does the same as matrix[top_sentence, :] = 0, but is much faster for sparse matrices
matrix.data[matrix.indptr[top_sentence]:matrix.indptr[top_sentence+1]] = 0
matrix.eliminate_zeros()
# Return the sentences in the order in which they appear in the document
summary_sentences.sort()
return [unprocessed_sentences[i] for i in summary_sentences] | python | def summarize(self, text, length=5, binary_matrix=True):
"""
Implements the method of summarization by relevance score, as described by Gong and Liu in the paper:
Y. Gong and X. Liu (2001). Generic text summarization using relevance measure and latent semantic analysis.
Proceedings of the 24th International Conference on Research in Information Retrieval (SIGIR ’01),
pp. 19–25.
This method computes and ranks the cosine similarity between each sentence vector and the overall
document.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
matrix = self._compute_matrix(sentences, weighting='frequency')
# Sum occurrences of terms over all sentences to obtain document frequency
doc_frequency = matrix.sum(axis=0)
if binary_matrix:
matrix = (matrix != 0).astype(int)
summary_sentences = []
for _ in range(length):
# Take the inner product of each sentence vector with the document vector
sentence_scores = matrix.dot(doc_frequency.transpose())
sentence_scores = np.array(sentence_scores.T)[0]
# Grab the top sentence and add it to the summary
top_sentence = sentence_scores.argsort()[-1]
summary_sentences.append(top_sentence)
# Remove all terms that appear in the top sentence from the document
terms_in_top_sentence = (matrix[top_sentence, :] != 0).toarray()
doc_frequency[terms_in_top_sentence] = 0
# Remove the top sentence from consideration by setting all its elements to zero
# This does the same as matrix[top_sentence, :] = 0, but is much faster for sparse matrices
matrix.data[matrix.indptr[top_sentence]:matrix.indptr[top_sentence+1]] = 0
matrix.eliminate_zeros()
# Return the sentences in the order in which they appear in the document
summary_sentences.sort()
return [unprocessed_sentences[i] for i in summary_sentences] | Implements the method of summarization by relevance score, as described by Gong and Liu in the paper:
Y. Gong and X. Liu (2001). Generic text summarization using relevance measure and latent semantic analysis.
Proceedings of the 24th International Conference on Research in Information Retrieval (SIGIR ’01),
pp. 19–25.
This method computes and ranks the cosine similarity between each sentence vector and the overall
document.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:return: list of sentences for the summary | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/relevance.py#L8-L64 |
raphaelm/python-sepaxml | sepaxml/shared.py | SepaPaymentInitn._prepare_document | def _prepare_document(self):
"""
Build the main document node and set xml namespaces.
"""
self._xml = ET.Element("Document")
self._xml.set("xmlns",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
self._xml.set("xmlns:xsi",
"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace("",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
ET.register_namespace("xsi",
"http://www.w3.org/2001/XMLSchema-instance")
n = ET.Element(self.root_el)
self._xml.append(n) | python | def _prepare_document(self):
"""
Build the main document node and set xml namespaces.
"""
self._xml = ET.Element("Document")
self._xml.set("xmlns",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
self._xml.set("xmlns:xsi",
"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace("",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
ET.register_namespace("xsi",
"http://www.w3.org/2001/XMLSchema-instance")
n = ET.Element(self.root_el)
self._xml.append(n) | Build the main document node and set xml namespaces. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/shared.py#L36-L50 |
raphaelm/python-sepaxml | sepaxml/shared.py | SepaPaymentInitn.export | def export(self, validate=True):
"""
Method to output the xml as string. It will finalize the batches and
then calculate the checksums (amount sum and transaction count),
fill these into the group header and output the XML.
"""
self._finalize_batch()
ctrl_sum_total = 0
nb_of_txs_total = 0
for ctrl_sum in self._xml.iter('CtrlSum'):
if ctrl_sum.text is None:
continue
ctrl_sum_total += decimal_str_to_int(ctrl_sum.text)
for nb_of_txs in self._xml.iter('NbOfTxs'):
if nb_of_txs.text is None:
continue
nb_of_txs_total += int(nb_of_txs.text)
n = self._xml.find(self.root_el)
GrpHdr_node = n.find('GrpHdr')
CtrlSum_node = GrpHdr_node.find('CtrlSum')
NbOfTxs_node = GrpHdr_node.find('NbOfTxs')
CtrlSum_node.text = int_to_decimal_str(ctrl_sum_total)
NbOfTxs_node.text = str(nb_of_txs_total)
# Prepending the XML version is hacky, but cElementTree only offers this
# automatically if you write to a file, which we don't necessarily want.
out = b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + ET.tostring(
self._xml, "utf-8")
if validate and not is_valid_xml(out, self.schema):
raise ValidationError(
"The output SEPA file contains validation errors. This is likely due to an illegal value in one of "
"your input fields."
)
return out | python | def export(self, validate=True):
"""
Method to output the xml as string. It will finalize the batches and
then calculate the checksums (amount sum and transaction count),
fill these into the group header and output the XML.
"""
self._finalize_batch()
ctrl_sum_total = 0
nb_of_txs_total = 0
for ctrl_sum in self._xml.iter('CtrlSum'):
if ctrl_sum.text is None:
continue
ctrl_sum_total += decimal_str_to_int(ctrl_sum.text)
for nb_of_txs in self._xml.iter('NbOfTxs'):
if nb_of_txs.text is None:
continue
nb_of_txs_total += int(nb_of_txs.text)
n = self._xml.find(self.root_el)
GrpHdr_node = n.find('GrpHdr')
CtrlSum_node = GrpHdr_node.find('CtrlSum')
NbOfTxs_node = GrpHdr_node.find('NbOfTxs')
CtrlSum_node.text = int_to_decimal_str(ctrl_sum_total)
NbOfTxs_node.text = str(nb_of_txs_total)
# Prepending the XML version is hacky, but cElementTree only offers this
# automatically if you write to a file, which we don't necessarily want.
out = b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + ET.tostring(
self._xml, "utf-8")
if validate and not is_valid_xml(out, self.schema):
raise ValidationError(
"The output SEPA file contains validation errors. This is likely due to an illegal value in one of "
"your input fields."
)
return out | Method to output the xml as string. It will finalize the batches and
then calculate the checksums (amount sum and transaction count),
fill these into the group header and output the XML. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/shared.py#L58-L95 |
raphaelm/python-sepaxml | sepaxml/utils.py | get_rand_string | def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)]) | python | def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)]) | Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L16-L36 |
raphaelm/python-sepaxml | sepaxml/utils.py | make_msg_id | def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id | python | def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id | Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L39-L48 |
raphaelm/python-sepaxml | sepaxml/utils.py | make_id | def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r | python | def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r | Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L51-L61 |
raphaelm/python-sepaxml | sepaxml/utils.py | int_to_decimal_str | def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:] | python | def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) < 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:] | Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L64-L76 |
raphaelm/python-sepaxml | sepaxml/utils.py | decimal_str_to_int | def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string) | python | def decimal_str_to_int(decimal_string):
"""
Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents
"""
int_string = decimal_string.replace('.', '')
int_string = int_string.lstrip('0')
return int(int_string) | Helper to decimal currency string into integers (cents).
WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION,
FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param string The amount in currency with full stop decimal separator
@return integer The amount in cents | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L79-L89 |
NatLibFi/Skosify | skosify/rdftools/io.py | read_rdf | def read_rdf(sources, infmt):
"""Read a list of RDF files and/or RDF graphs. May raise an Exception."""
rdf = Graph()
for source in sources:
if isinstance(source, Graph):
for triple in source:
rdf.add(triple)
continue
if source == '-':
f = sys.stdin
else:
f = open(source, 'r')
if infmt:
fmt = infmt
else:
# determine format based on file extension
fmt = 'xml' # default
if source.endswith('n3'):
fmt = 'n3'
if source.endswith('ttl'):
fmt = 'n3'
if source.endswith('nt'):
fmt = 'nt'
logging.debug("Parsing input file %s (format: %s)", source, fmt)
rdf.parse(f, format=fmt)
return rdf | python | def read_rdf(sources, infmt):
"""Read a list of RDF files and/or RDF graphs. May raise an Exception."""
rdf = Graph()
for source in sources:
if isinstance(source, Graph):
for triple in source:
rdf.add(triple)
continue
if source == '-':
f = sys.stdin
else:
f = open(source, 'r')
if infmt:
fmt = infmt
else:
# determine format based on file extension
fmt = 'xml' # default
if source.endswith('n3'):
fmt = 'n3'
if source.endswith('ttl'):
fmt = 'n3'
if source.endswith('nt'):
fmt = 'nt'
logging.debug("Parsing input file %s (format: %s)", source, fmt)
rdf.parse(f, format=fmt)
return rdf | Read a list of RDF files and/or RDF graphs. May raise an Exception. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/io.py#L10-L40 |
NatLibFi/Skosify | skosify/skosify.py | mapping_get | def mapping_get(uri, mapping):
"""Look up the URI in the given mapping and return the result.
Throws KeyError if no matching mapping was found.
"""
ln = localname(uri)
# 1. try to match URI keys
for k, v in mapping.items():
if k == uri:
return v
# 2. try to match local names
for k, v in mapping.items():
if k == ln:
return v
# 3. try to match local names with * prefix
# try to match longest first, so sort the mapping by key length
l = list(mapping.items())
l.sort(key=lambda i: len(i[0]), reverse=True)
for k, v in l:
if k[0] == '*' and ln.endswith(k[1:]):
return v
raise KeyError(uri) | python | def mapping_get(uri, mapping):
"""Look up the URI in the given mapping and return the result.
Throws KeyError if no matching mapping was found.
"""
ln = localname(uri)
# 1. try to match URI keys
for k, v in mapping.items():
if k == uri:
return v
# 2. try to match local names
for k, v in mapping.items():
if k == ln:
return v
# 3. try to match local names with * prefix
# try to match longest first, so sort the mapping by key length
l = list(mapping.items())
l.sort(key=lambda i: len(i[0]), reverse=True)
for k, v in l:
if k[0] == '*' and ln.endswith(k[1:]):
return v
raise KeyError(uri) | Look up the URI in the given mapping and return the result.
Throws KeyError if no matching mapping was found. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L26-L48 |
NatLibFi/Skosify | skosify/skosify.py | mapping_match | def mapping_match(uri, mapping):
"""Determine whether the given URI matches one of the given mappings.
Returns True if a match was found, False otherwise.
"""
try:
val = mapping_get(uri, mapping)
return True
except KeyError:
return False | python | def mapping_match(uri, mapping):
"""Determine whether the given URI matches one of the given mappings.
Returns True if a match was found, False otherwise.
"""
try:
val = mapping_get(uri, mapping)
return True
except KeyError:
return False | Determine whether the given URI matches one of the given mappings.
Returns True if a match was found, False otherwise. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L51-L61 |
NatLibFi/Skosify | skosify/skosify.py | in_general_ns | def in_general_ns(uri):
"""Return True iff the URI is in a well-known general RDF namespace.
URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC."""
RDFuri = RDF.uri
RDFSuri = RDFS.uri
for ns in (RDFuri, RDFSuri, OWL, SKOS, DC):
if uri.startswith(ns):
return True
return False | python | def in_general_ns(uri):
"""Return True iff the URI is in a well-known general RDF namespace.
URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC."""
RDFuri = RDF.uri
RDFSuri = RDFS.uri
for ns in (RDFuri, RDFSuri, OWL, SKOS, DC):
if uri.startswith(ns):
return True
return False | Return True iff the URI is in a well-known general RDF namespace.
URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L64-L74 |
NatLibFi/Skosify | skosify/skosify.py | get_concept_scheme | def get_concept_scheme(rdf):
"""Return a skos:ConceptScheme contained in the model.
Returns None if no skos:ConceptScheme is present.
"""
# add explicit type
for s, o in rdf.subject_objects(SKOS.inScheme):
if not isinstance(o, Literal):
rdf.add((o, RDF.type, SKOS.ConceptScheme))
else:
logging.warning(
"Literal value %s for skos:inScheme detected, ignoring.", o)
css = list(rdf.subjects(RDF.type, SKOS.ConceptScheme))
if len(css) > 1:
css.sort()
cs = css[0]
logging.warning(
"Multiple concept schemes found. "
"Selecting %s as default concept scheme.", cs)
elif len(css) == 1:
cs = css[0]
else:
cs = None
return cs | python | def get_concept_scheme(rdf):
"""Return a skos:ConceptScheme contained in the model.
Returns None if no skos:ConceptScheme is present.
"""
# add explicit type
for s, o in rdf.subject_objects(SKOS.inScheme):
if not isinstance(o, Literal):
rdf.add((o, RDF.type, SKOS.ConceptScheme))
else:
logging.warning(
"Literal value %s for skos:inScheme detected, ignoring.", o)
css = list(rdf.subjects(RDF.type, SKOS.ConceptScheme))
if len(css) > 1:
css.sort()
cs = css[0]
logging.warning(
"Multiple concept schemes found. "
"Selecting %s as default concept scheme.", cs)
elif len(css) == 1:
cs = css[0]
else:
cs = None
return cs | Return a skos:ConceptScheme contained in the model.
Returns None if no skos:ConceptScheme is present. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L77-L101 |
NatLibFi/Skosify | skosify/skosify.py | detect_namespace | def detect_namespace(rdf):
"""Try to automatically detect the URI namespace of the vocabulary.
Return namespace as URIRef.
"""
# pick a concept
conc = rdf.value(None, RDF.type, SKOS.Concept, any=True)
if conc is None:
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
ln = localname(conc)
ns = URIRef(conc.replace(ln, ''))
if ns.strip() == '':
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
logging.info(
"Namespace auto-detected to '%s' "
"- you can override this with the --namespace option.", ns)
return ns | python | def detect_namespace(rdf):
"""Try to automatically detect the URI namespace of the vocabulary.
Return namespace as URIRef.
"""
# pick a concept
conc = rdf.value(None, RDF.type, SKOS.Concept, any=True)
if conc is None:
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
ln = localname(conc)
ns = URIRef(conc.replace(ln, ''))
if ns.strip() == '':
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
logging.info(
"Namespace auto-detected to '%s' "
"- you can override this with the --namespace option.", ns)
return ns | Try to automatically detect the URI namespace of the vocabulary.
Return namespace as URIRef. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L104-L130 |
NatLibFi/Skosify | skosify/skosify.py | create_concept_scheme | def create_concept_scheme(rdf, ns, lname=''):
"""Create a skos:ConceptScheme in the model and return it."""
ont = None
if not ns:
# see if there's an owl:Ontology and use that to determine namespace
onts = list(rdf.subjects(RDF.type, OWL.Ontology))
if len(onts) > 1:
onts.sort()
ont = onts[0]
logging.warning(
"Multiple owl:Ontology instances found. "
"Creating concept scheme from %s.", ont)
elif len(onts) == 1:
ont = onts[0]
else:
ont = None
if not ont:
logging.info(
"No skos:ConceptScheme or owl:Ontology found. "
"Using namespace auto-detection for creating concept scheme.")
ns = detect_namespace(rdf)
elif ont.endswith('/') or ont.endswith('#') or ont.endswith(':'):
ns = ont
else:
ns = ont + '/'
NS = Namespace(ns)
cs = NS[lname]
rdf.add((cs, RDF.type, SKOS.ConceptScheme))
if ont is not None:
rdf.remove((ont, RDF.type, OWL.Ontology))
# remove owl:imports declarations
for o in rdf.objects(ont, OWL.imports):
rdf.remove((ont, OWL.imports, o))
# remove protege specific properties
for p, o in rdf.predicate_objects(ont):
prot = URIRef(
'http://protege.stanford.edu/plugins/owl/protege#')
if p.startswith(prot):
rdf.remove((ont, p, o))
# move remaining properties (dc:title etc.) of the owl:Ontology into
# the skos:ConceptScheme
replace_uri(rdf, ont, cs)
return cs | python | def create_concept_scheme(rdf, ns, lname=''):
"""Create a skos:ConceptScheme in the model and return it."""
ont = None
if not ns:
# see if there's an owl:Ontology and use that to determine namespace
onts = list(rdf.subjects(RDF.type, OWL.Ontology))
if len(onts) > 1:
onts.sort()
ont = onts[0]
logging.warning(
"Multiple owl:Ontology instances found. "
"Creating concept scheme from %s.", ont)
elif len(onts) == 1:
ont = onts[0]
else:
ont = None
if not ont:
logging.info(
"No skos:ConceptScheme or owl:Ontology found. "
"Using namespace auto-detection for creating concept scheme.")
ns = detect_namespace(rdf)
elif ont.endswith('/') or ont.endswith('#') or ont.endswith(':'):
ns = ont
else:
ns = ont + '/'
NS = Namespace(ns)
cs = NS[lname]
rdf.add((cs, RDF.type, SKOS.ConceptScheme))
if ont is not None:
rdf.remove((ont, RDF.type, OWL.Ontology))
# remove owl:imports declarations
for o in rdf.objects(ont, OWL.imports):
rdf.remove((ont, OWL.imports, o))
# remove protege specific properties
for p, o in rdf.predicate_objects(ont):
prot = URIRef(
'http://protege.stanford.edu/plugins/owl/protege#')
if p.startswith(prot):
rdf.remove((ont, p, o))
# move remaining properties (dc:title etc.) of the owl:Ontology into
# the skos:ConceptScheme
replace_uri(rdf, ont, cs)
return cs | Create a skos:ConceptScheme in the model and return it. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L133-L181 |
NatLibFi/Skosify | skosify/skosify.py | initialize_concept_scheme | def initialize_concept_scheme(rdf, cs, label, language, set_modified):
"""Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp."""
# check whether the concept scheme is unlabeled, and label it if possible
labels = list(rdf.objects(cs, RDFS.label)) + \
list(rdf.objects(cs, SKOS.prefLabel))
if len(labels) == 0:
if not label:
logging.warning(
"Concept scheme has no label(s). "
"Use --label option to set the concept scheme label.")
else:
logging.info(
"Unlabeled concept scheme detected. Setting label to '%s'" %
label)
rdf.add((cs, RDFS.label, Literal(label, language)))
if set_modified:
curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
rdf.remove((cs, DCTERMS.modified, None))
rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime))) | python | def initialize_concept_scheme(rdf, cs, label, language, set_modified):
"""Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp."""
# check whether the concept scheme is unlabeled, and label it if possible
labels = list(rdf.objects(cs, RDFS.label)) + \
list(rdf.objects(cs, SKOS.prefLabel))
if len(labels) == 0:
if not label:
logging.warning(
"Concept scheme has no label(s). "
"Use --label option to set the concept scheme label.")
else:
logging.info(
"Unlabeled concept scheme detected. Setting label to '%s'" %
label)
rdf.add((cs, RDFS.label, Literal(label, language)))
if set_modified:
curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
rdf.remove((cs, DCTERMS.modified, None))
rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime))) | Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L184-L206 |
NatLibFi/Skosify | skosify/skosify.py | transform_sparql_update | def transform_sparql_update(rdf, update_query):
"""Perform a SPARQL Update transformation on the RDF data."""
logging.debug("performing SPARQL Update transformation")
if update_query[0] == '@': # actual query should be read from file
update_query = file(update_query[1:]).read()
logging.debug("update query: %s", update_query)
rdf.update(update_query) | python | def transform_sparql_update(rdf, update_query):
"""Perform a SPARQL Update transformation on the RDF data."""
logging.debug("performing SPARQL Update transformation")
if update_query[0] == '@': # actual query should be read from file
update_query = file(update_query[1:]).read()
logging.debug("update query: %s", update_query)
rdf.update(update_query) | Perform a SPARQL Update transformation on the RDF data. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L209-L218 |
NatLibFi/Skosify | skosify/skosify.py | transform_sparql_construct | def transform_sparql_construct(rdf, construct_query):
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph."""
logging.debug("performing SPARQL CONSTRUCT transformation")
if construct_query[0] == '@': # actual query should be read from file
construct_query = file(construct_query[1:]).read()
logging.debug("CONSTRUCT query: %s", construct_query)
newgraph = Graph()
for triple in rdf.query(construct_query):
newgraph.add(triple)
return newgraph | python | def transform_sparql_construct(rdf, construct_query):
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph."""
logging.debug("performing SPARQL CONSTRUCT transformation")
if construct_query[0] == '@': # actual query should be read from file
construct_query = file(construct_query[1:]).read()
logging.debug("CONSTRUCT query: %s", construct_query)
newgraph = Graph()
for triple in rdf.query(construct_query):
newgraph.add(triple)
return newgraph | Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L221-L235 |
NatLibFi/Skosify | skosify/skosify.py | transform_concepts | def transform_concepts(rdf, typemap):
"""Transform Concepts into new types, as defined by the config file."""
# find out all the types used in the model
types = set()
for s, o in rdf.subject_objects(RDF.type):
if o not in typemap and in_general_ns(o):
continue
types.add(o)
for t in types:
if mapping_match(t, typemap):
newval = mapping_get(t, typemap)
newuris = [v[0] for v in newval]
logging.debug("transform class %s -> %s", t, str(newuris))
if newuris[0] is None: # delete all instances
for inst in rdf.subjects(RDF.type, t):
delete_uri(rdf, inst)
delete_uri(rdf, t)
else:
replace_object(rdf, t, newuris, predicate=RDF.type)
else:
logging.info("Don't know what to do with type %s", t) | python | def transform_concepts(rdf, typemap):
"""Transform Concepts into new types, as defined by the config file."""
# find out all the types used in the model
types = set()
for s, o in rdf.subject_objects(RDF.type):
if o not in typemap and in_general_ns(o):
continue
types.add(o)
for t in types:
if mapping_match(t, typemap):
newval = mapping_get(t, typemap)
newuris = [v[0] for v in newval]
logging.debug("transform class %s -> %s", t, str(newuris))
if newuris[0] is None: # delete all instances
for inst in rdf.subjects(RDF.type, t):
delete_uri(rdf, inst)
delete_uri(rdf, t)
else:
replace_object(rdf, t, newuris, predicate=RDF.type)
else:
logging.info("Don't know what to do with type %s", t) | Transform Concepts into new types, as defined by the config file. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L238-L260 |
NatLibFi/Skosify | skosify/skosify.py | transform_literals | def transform_literals(rdf, literalmap):
"""Transform literal properties of Concepts, as defined by config file."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, Literal) \
and (p in literalmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, literalmap):
newval = mapping_get(p, literalmap)
newuris = [v[0] for v in newval]
logging.debug("transform literal %s -> %s", p, str(newuris))
replace_predicate(
rdf, p, newuris, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with literal %s", p) | python | def transform_literals(rdf, literalmap):
"""Transform literal properties of Concepts, as defined by config file."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, Literal) \
and (p in literalmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, literalmap):
newval = mapping_get(p, literalmap)
newuris = [v[0] for v in newval]
logging.debug("transform literal %s -> %s", p, str(newuris))
replace_predicate(
rdf, p, newuris, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with literal %s", p) | Transform literal properties of Concepts, as defined by config file. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L263-L285 |
NatLibFi/Skosify | skosify/skosify.py | transform_relations | def transform_relations(rdf, relationmap):
"""Transform YSO-style concept relations into SKOS equivalents."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, (URIRef, BNode)) \
and (p in relationmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, relationmap):
newval = mapping_get(p, relationmap)
logging.debug("transform relation %s -> %s", p, str(newval))
replace_predicate(
rdf, p, newval, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with relation %s", p) | python | def transform_relations(rdf, relationmap):
"""Transform YSO-style concept relations into SKOS equivalents."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, (URIRef, BNode)) \
and (p in relationmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, relationmap):
newval = mapping_get(p, relationmap)
logging.debug("transform relation %s -> %s", p, str(newval))
replace_predicate(
rdf, p, newval, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with relation %s", p) | Transform YSO-style concept relations into SKOS equivalents. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L288-L309 |
NatLibFi/Skosify | skosify/skosify.py | transform_aggregate_concepts | def transform_aggregate_concepts(rdf, cs, relationmap, aggregates):
"""Transform YSO-style AggregateConcepts into skos:Concepts within their
own skos:ConceptScheme, linked to the regular concepts with
SKOS.narrowMatch relationships. If aggregates is False, remove
all aggregate concepts instead."""
if not aggregates:
logging.debug("removing aggregate concepts")
aggregate_concepts = []
relation = relationmap.get(
OWL.equivalentClass, [(OWL.equivalentClass, False)])[0][0]
for conc, eq in rdf.subject_objects(relation):
eql = rdf.value(eq, OWL.unionOf, None)
if eql is None:
continue
if aggregates:
aggregate_concepts.append(conc)
for item in rdf.items(eql):
rdf.add((conc, SKOS.narrowMatch, item))
# remove the old equivalentClass-unionOf-rdf:List structure
rdf.remove((conc, relation, eq))
rdf.remove((eq, RDF.type, OWL.Class))
rdf.remove((eq, OWL.unionOf, eql))
# remove the rdf:List structure
delete_uri(rdf, eql)
if not aggregates:
delete_uri(rdf, conc)
if len(aggregate_concepts) > 0:
ns = cs.replace(localname(cs), '')
acs = create_concept_scheme(rdf, ns, 'aggregateconceptscheme')
logging.debug("creating aggregate concept scheme %s", acs)
for conc in aggregate_concepts:
rdf.add((conc, SKOS.inScheme, acs)) | python | def transform_aggregate_concepts(rdf, cs, relationmap, aggregates):
"""Transform YSO-style AggregateConcepts into skos:Concepts within their
own skos:ConceptScheme, linked to the regular concepts with
SKOS.narrowMatch relationships. If aggregates is False, remove
all aggregate concepts instead."""
if not aggregates:
logging.debug("removing aggregate concepts")
aggregate_concepts = []
relation = relationmap.get(
OWL.equivalentClass, [(OWL.equivalentClass, False)])[0][0]
for conc, eq in rdf.subject_objects(relation):
eql = rdf.value(eq, OWL.unionOf, None)
if eql is None:
continue
if aggregates:
aggregate_concepts.append(conc)
for item in rdf.items(eql):
rdf.add((conc, SKOS.narrowMatch, item))
# remove the old equivalentClass-unionOf-rdf:List structure
rdf.remove((conc, relation, eq))
rdf.remove((eq, RDF.type, OWL.Class))
rdf.remove((eq, OWL.unionOf, eql))
# remove the rdf:List structure
delete_uri(rdf, eql)
if not aggregates:
delete_uri(rdf, conc)
if len(aggregate_concepts) > 0:
ns = cs.replace(localname(cs), '')
acs = create_concept_scheme(rdf, ns, 'aggregateconceptscheme')
logging.debug("creating aggregate concept scheme %s", acs)
for conc in aggregate_concepts:
rdf.add((conc, SKOS.inScheme, acs)) | Transform YSO-style AggregateConcepts into skos:Concepts within their
own skos:ConceptScheme, linked to the regular concepts with
SKOS.narrowMatch relationships. If aggregates is False, remove
all aggregate concepts instead. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L413-L448 |
NatLibFi/Skosify | skosify/skosify.py | transform_deprecated_concepts | def transform_deprecated_concepts(rdf, cs):
"""Transform deprecated concepts so they are in their own concept
scheme."""
deprecated_concepts = []
for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept):
rdf.add((conc, RDF.type, SKOS.Concept))
rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean)))
deprecated_concepts.append(conc)
if len(deprecated_concepts) > 0:
ns = cs.replace(localname(cs), '')
dcs = create_concept_scheme(
rdf, ns, 'deprecatedconceptscheme')
logging.debug("creating deprecated concept scheme %s", dcs)
for conc in deprecated_concepts:
rdf.add((conc, SKOS.inScheme, dcs)) | python | def transform_deprecated_concepts(rdf, cs):
"""Transform deprecated concepts so they are in their own concept
scheme."""
deprecated_concepts = []
for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept):
rdf.add((conc, RDF.type, SKOS.Concept))
rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean)))
deprecated_concepts.append(conc)
if len(deprecated_concepts) > 0:
ns = cs.replace(localname(cs), '')
dcs = create_concept_scheme(
rdf, ns, 'deprecatedconceptscheme')
logging.debug("creating deprecated concept scheme %s", dcs)
for conc in deprecated_concepts:
rdf.add((conc, SKOS.inScheme, dcs)) | Transform deprecated concepts so they are in their own concept
scheme. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L451-L468 |
NatLibFi/Skosify | skosify/skosify.py | enrich_relations | def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive):
"""Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model.
"""
# 1. first enrich mapping relationships (because they affect regular ones)
if enrich_mappings:
infer.skos_symmetric_mappings(rdf)
infer.skos_hierarchical_mappings(rdf, use_narrower)
# 2. then enrich regular relationships
# related <-> related
infer.skos_related(rdf)
# broaderGeneric -> broader + inverse narrowerGeneric
for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric):
rdf.add((s, SKOS.broader, o))
# broaderPartitive -> broader + inverse narrowerPartitive
for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive):
rdf.add((s, SKOS.broader, o))
infer.skos_hierarchical(rdf, use_narrower)
# transitive closure: broaderTransitive and narrowerTransitive
if use_transitive:
infer.skos_transitive(rdf, use_narrower)
else:
# transitive relationships are not wanted, so remove them
for s, o in rdf.subject_objects(SKOS.broaderTransitive):
rdf.remove((s, SKOS.broaderTransitive, o))
for s, o in rdf.subject_objects(SKOS.narrowerTransitive):
rdf.remove((s, SKOS.narrowerTransitive, o))
infer.skos_topConcept(rdf) | python | def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive):
"""Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model.
"""
# 1. first enrich mapping relationships (because they affect regular ones)
if enrich_mappings:
infer.skos_symmetric_mappings(rdf)
infer.skos_hierarchical_mappings(rdf, use_narrower)
# 2. then enrich regular relationships
# related <-> related
infer.skos_related(rdf)
# broaderGeneric -> broader + inverse narrowerGeneric
for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric):
rdf.add((s, SKOS.broader, o))
# broaderPartitive -> broader + inverse narrowerPartitive
for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive):
rdf.add((s, SKOS.broader, o))
infer.skos_hierarchical(rdf, use_narrower)
# transitive closure: broaderTransitive and narrowerTransitive
if use_transitive:
infer.skos_transitive(rdf, use_narrower)
else:
# transitive relationships are not wanted, so remove them
for s, o in rdf.subject_objects(SKOS.broaderTransitive):
rdf.remove((s, SKOS.broaderTransitive, o))
for s, o in rdf.subject_objects(SKOS.narrowerTransitive):
rdf.remove((s, SKOS.narrowerTransitive, o))
infer.skos_topConcept(rdf) | Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L499-L543 |
NatLibFi/Skosify | skosify/skosify.py | setup_top_concepts | def setup_top_concepts(rdf, mark_top_concepts):
"""Determine the top concepts of each concept scheme and mark them using
hasTopConcept/topConceptOf."""
for cs in sorted(rdf.subjects(RDF.type, SKOS.ConceptScheme)):
for conc in sorted(rdf.subjects(SKOS.inScheme, cs)):
if (conc, RDF.type, SKOS.Concept) not in rdf:
continue # not a Concept, so can't be a top concept
# check whether it's a top concept
broader = rdf.value(conc, SKOS.broader, None, any=True)
if broader is None: # yes it is a top concept!
if (cs, SKOS.hasTopConcept, conc) not in rdf and \
(conc, SKOS.topConceptOf, cs) not in rdf:
if mark_top_concepts:
logging.info(
"Marking loose concept %s "
"as top concept of scheme %s", conc, cs)
rdf.add((cs, SKOS.hasTopConcept, conc))
rdf.add((conc, SKOS.topConceptOf, cs))
else:
logging.debug(
"Not marking loose concept %s as top concept "
"of scheme %s, as mark_top_concepts is disabled",
conc, cs) | python | def setup_top_concepts(rdf, mark_top_concepts):
"""Determine the top concepts of each concept scheme and mark them using
hasTopConcept/topConceptOf."""
for cs in sorted(rdf.subjects(RDF.type, SKOS.ConceptScheme)):
for conc in sorted(rdf.subjects(SKOS.inScheme, cs)):
if (conc, RDF.type, SKOS.Concept) not in rdf:
continue # not a Concept, so can't be a top concept
# check whether it's a top concept
broader = rdf.value(conc, SKOS.broader, None, any=True)
if broader is None: # yes it is a top concept!
if (cs, SKOS.hasTopConcept, conc) not in rdf and \
(conc, SKOS.topConceptOf, cs) not in rdf:
if mark_top_concepts:
logging.info(
"Marking loose concept %s "
"as top concept of scheme %s", conc, cs)
rdf.add((cs, SKOS.hasTopConcept, conc))
rdf.add((conc, SKOS.topConceptOf, cs))
else:
logging.debug(
"Not marking loose concept %s as top concept "
"of scheme %s, as mark_top_concepts is disabled",
conc, cs) | Determine the top concepts of each concept scheme and mark them using
hasTopConcept/topConceptOf. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L546-L569 |
NatLibFi/Skosify | skosify/skosify.py | setup_concept_scheme | def setup_concept_scheme(rdf, defaultcs):
"""Make sure all concepts have an inScheme property, using the given
default concept scheme if necessary."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
# check concept scheme
cs = rdf.value(conc, SKOS.inScheme, None, any=True)
if cs is None: # need to set inScheme
rdf.add((conc, SKOS.inScheme, defaultcs)) | python | def setup_concept_scheme(rdf, defaultcs):
"""Make sure all concepts have an inScheme property, using the given
default concept scheme if necessary."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
# check concept scheme
cs = rdf.value(conc, SKOS.inScheme, None, any=True)
if cs is None: # need to set inScheme
rdf.add((conc, SKOS.inScheme, defaultcs)) | Make sure all concepts have an inScheme property, using the given
default concept scheme if necessary. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L572-L579 |
NatLibFi/Skosify | skosify/skosify.py | cleanup_classes | def cleanup_classes(rdf):
"""Remove unnecessary class definitions: definitions of SKOS classes or
unused classes. If a class is also a skos:Concept or skos:Collection,
remove the 'classness' of it but leave the Concept/Collection."""
for t in (OWL.Class, RDFS.Class):
for cl in rdf.subjects(RDF.type, t):
# SKOS classes may be safely removed
if cl.startswith(SKOS):
logging.debug("removing SKOS class definition: %s", cl)
replace_subject(rdf, cl, None)
continue
# if there are instances of the class, keep the class def
if rdf.value(None, RDF.type, cl, any=True) is not None:
continue
# if the class is used in a domain/range/equivalentClass
# definition, keep the class def
if rdf.value(None, RDFS.domain, cl, any=True) is not None:
continue
if rdf.value(None, RDFS.range, cl, any=True) is not None:
continue
if rdf.value(None, OWL.equivalentClass, cl, any=True) is not None:
continue
# if the class is also a skos:Concept or skos:Collection, only
# remove its rdf:type
if (cl, RDF.type, SKOS.Concept) in rdf \
or (cl, RDF.type, SKOS.Collection) in rdf:
logging.debug("removing classiness of %s", cl)
rdf.remove((cl, RDF.type, t))
else: # remove it completely
logging.debug("removing unused class definition: %s", cl)
replace_subject(rdf, cl, None) | python | def cleanup_classes(rdf):
"""Remove unnecessary class definitions: definitions of SKOS classes or
unused classes. If a class is also a skos:Concept or skos:Collection,
remove the 'classness' of it but leave the Concept/Collection."""
for t in (OWL.Class, RDFS.Class):
for cl in rdf.subjects(RDF.type, t):
# SKOS classes may be safely removed
if cl.startswith(SKOS):
logging.debug("removing SKOS class definition: %s", cl)
replace_subject(rdf, cl, None)
continue
# if there are instances of the class, keep the class def
if rdf.value(None, RDF.type, cl, any=True) is not None:
continue
# if the class is used in a domain/range/equivalentClass
# definition, keep the class def
if rdf.value(None, RDFS.domain, cl, any=True) is not None:
continue
if rdf.value(None, RDFS.range, cl, any=True) is not None:
continue
if rdf.value(None, OWL.equivalentClass, cl, any=True) is not None:
continue
# if the class is also a skos:Concept or skos:Collection, only
# remove its rdf:type
if (cl, RDF.type, SKOS.Concept) in rdf \
or (cl, RDF.type, SKOS.Collection) in rdf:
logging.debug("removing classiness of %s", cl)
rdf.remove((cl, RDF.type, t))
else: # remove it completely
logging.debug("removing unused class definition: %s", cl)
replace_subject(rdf, cl, None) | Remove unnecessary class definitions: definitions of SKOS classes or
unused classes. If a class is also a skos:Concept or skos:Collection,
remove the 'classness' of it but leave the Concept/Collection. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L582-L613 |
NatLibFi/Skosify | skosify/skosify.py | cleanup_properties | def cleanup_properties(rdf):
"""Remove unnecessary property definitions.
Reemoves SKOS and DC property definitions and definitions of unused
properties."""
for t in (RDF.Property, OWL.DatatypeProperty, OWL.ObjectProperty,
OWL.SymmetricProperty, OWL.TransitiveProperty,
OWL.InverseFunctionalProperty, OWL.FunctionalProperty):
for prop in rdf.subjects(RDF.type, t):
if prop.startswith(SKOS):
logging.debug(
"removing SKOS property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
if prop.startswith(DC):
logging.debug("removing DC property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
# if there are triples using the property, keep the property def
if len(list(rdf.subject_objects(prop))) > 0:
continue
logging.debug("removing unused property definition: %s", prop)
replace_subject(rdf, prop, None) | python | def cleanup_properties(rdf):
"""Remove unnecessary property definitions.
Reemoves SKOS and DC property definitions and definitions of unused
properties."""
for t in (RDF.Property, OWL.DatatypeProperty, OWL.ObjectProperty,
OWL.SymmetricProperty, OWL.TransitiveProperty,
OWL.InverseFunctionalProperty, OWL.FunctionalProperty):
for prop in rdf.subjects(RDF.type, t):
if prop.startswith(SKOS):
logging.debug(
"removing SKOS property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
if prop.startswith(DC):
logging.debug("removing DC property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
# if there are triples using the property, keep the property def
if len(list(rdf.subject_objects(prop))) > 0:
continue
logging.debug("removing unused property definition: %s", prop)
replace_subject(rdf, prop, None) | Remove unnecessary property definitions.
Reemoves SKOS and DC property definitions and definitions of unused
properties. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L616-L640 |
NatLibFi/Skosify | skosify/skosify.py | find_reachable | def find_reachable(rdf, res):
"""Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
"""
starttime = time.time()
# This is almost a non-recursive breadth-first search algorithm, but a set
# is used as the "open" set instead of a FIFO, and an arbitrary element of
# the set is searched. This is slightly faster than DFS (using a stack)
# and much faster than BFS (using a FIFO).
seen = set() # used as the "closed" set
to_search = set([res]) # used as the "open" set
while len(to_search) > 0:
res = to_search.pop()
if res in seen:
continue
seen.add(res)
# res as subject
for p, o in rdf.predicate_objects(res):
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as predicate
for s, o in rdf.subject_objects(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as object
for s, p in rdf.subject_predicates(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
endtime = time.time()
logging.debug("find_reachable took %f seconds", (endtime - starttime))
return seen | python | def find_reachable(rdf, res):
"""Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
"""
starttime = time.time()
# This is almost a non-recursive breadth-first search algorithm, but a set
# is used as the "open" set instead of a FIFO, and an arbitrary element of
# the set is searched. This is slightly faster than DFS (using a stack)
# and much faster than BFS (using a FIFO).
seen = set() # used as the "closed" set
to_search = set([res]) # used as the "open" set
while len(to_search) > 0:
res = to_search.pop()
if res in seen:
continue
seen.add(res)
# res as subject
for p, o in rdf.predicate_objects(res):
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as predicate
for s, o in rdf.subject_objects(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as object
for s, p in rdf.subject_predicates(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
endtime = time.time()
logging.debug("find_reachable took %f seconds", (endtime - starttime))
return seen | Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L643-L688 |
NatLibFi/Skosify | skosify/skosify.py | cleanup_unreachable | def cleanup_unreachable(rdf):
"""Remove triples which cannot be reached from the concepts by graph
traversal."""
all_subjects = set(rdf.subjects())
logging.debug("total subject resources: %d", len(all_subjects))
reachable = find_reachable(rdf, SKOS.Concept)
nonreachable = all_subjects - reachable
logging.debug("deleting %s non-reachable resources", len(nonreachable))
for subj in nonreachable:
delete_uri(rdf, subj) | python | def cleanup_unreachable(rdf):
"""Remove triples which cannot be reached from the concepts by graph
traversal."""
all_subjects = set(rdf.subjects())
logging.debug("total subject resources: %d", len(all_subjects))
reachable = find_reachable(rdf, SKOS.Concept)
nonreachable = all_subjects - reachable
logging.debug("deleting %s non-reachable resources", len(nonreachable))
for subj in nonreachable:
delete_uri(rdf, subj) | Remove triples which cannot be reached from the concepts by graph
traversal. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L691-L705 |
NatLibFi/Skosify | skosify/skosify.py | check_hierarchy | def check_hierarchy(rdf, break_cycles, keep_related, mark_top_concepts,
eliminate_redundancy):
"""Check for, and optionally fix, problems in the skos:broader hierarchy
using a recursive depth first search algorithm.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix_cycles: Break cycles.
:param bool fix_disjoint_relations: Remoe skos:related overlapping with
skos:broaderTransitive.
:param bool fix_redundancy: Remove skos:broader between two concepts otherwise
connected by skos:broaderTransitive.
"""
starttime = time.time()
if check.hierarchy_cycles(rdf, break_cycles):
logging.info(
"Some concepts not reached in initial cycle detection. "
"Re-checking for loose concepts.")
setup_top_concepts(rdf, mark_top_concepts)
check.disjoint_relations(rdf, not keep_related)
check.hierarchical_redundancy(rdf, eliminate_redundancy)
endtime = time.time()
logging.debug("check_hierarchy took %f seconds", (endtime - starttime)) | python | def check_hierarchy(rdf, break_cycles, keep_related, mark_top_concepts,
eliminate_redundancy):
"""Check for, and optionally fix, problems in the skos:broader hierarchy
using a recursive depth first search algorithm.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix_cycles: Break cycles.
:param bool fix_disjoint_relations: Remoe skos:related overlapping with
skos:broaderTransitive.
:param bool fix_redundancy: Remove skos:broader between two concepts otherwise
connected by skos:broaderTransitive.
"""
starttime = time.time()
if check.hierarchy_cycles(rdf, break_cycles):
logging.info(
"Some concepts not reached in initial cycle detection. "
"Re-checking for loose concepts.")
setup_top_concepts(rdf, mark_top_concepts)
check.disjoint_relations(rdf, not keep_related)
check.hierarchical_redundancy(rdf, eliminate_redundancy)
endtime = time.time()
logging.debug("check_hierarchy took %f seconds", (endtime - starttime)) | Check for, and optionally fix, problems in the skos:broader hierarchy
using a recursive depth first search algorithm.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix_cycles: Break cycles.
:param bool fix_disjoint_relations: Remoe skos:related overlapping with
skos:broaderTransitive.
:param bool fix_redundancy: Remove skos:broader between two concepts otherwise
connected by skos:broaderTransitive. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L767-L791 |
NatLibFi/Skosify | skosify/skosify.py | skosify | def skosify(*sources, **config):
"""Convert, extend, and check SKOS vocabulary."""
cfg = Config()
for key in config:
if hasattr(cfg, key):
setattr(cfg, key, config[key])
config = cfg
namespaces = config.namespaces
typemap = config.types
literalmap = config.literals
relationmap = config.relations
logging.debug("Skosify starting. $Revision$")
starttime = time.time()
logging.debug("Phase 1: Parsing input files")
try:
voc = read_rdf(sources, config.from_format)
except:
logging.critical("Parsing failed. Exception: %s",
str(sys.exc_info()[1]))
sys.exit(1)
inputtime = time.time()
logging.debug("Phase 2: Performing inferences")
if config.update_query is not None:
transform_sparql_update(voc, config.update_query)
if config.construct_query is not None:
voc = transform_sparql_construct(voc, config.construct_query)
if config.infer:
logging.debug("doing RDFS subclass and properties inference")
infer.rdfs_classes(voc)
infer.rdfs_properties(voc)
logging.debug("Phase 3: Setting up namespaces")
for prefix, uri in namespaces.items():
voc.namespace_manager.bind(prefix, uri)
logging.debug("Phase 4: Transforming concepts, literals and relations")
# transform concepts, literals and concept relations
transform_concepts(voc, typemap)
transform_literals(voc, literalmap)
transform_relations(voc, relationmap)
# special transforms for labels: whitespace, prefLabel vs altLabel
transform_labels(voc, config.default_language)
# special transforms for collections + aggregate and deprecated concepts
transform_collections(voc)
# find/create concept scheme
cs = get_concept_scheme(voc)
if not cs:
cs = create_concept_scheme(voc, config.namespace)
initialize_concept_scheme(voc, cs, label=config.label,
language=config.default_language,
set_modified=config.set_modified)
transform_aggregate_concepts(
voc, cs, relationmap, config.aggregates)
transform_deprecated_concepts(voc, cs)
logging.debug("Phase 5: Performing SKOS enrichments")
# enrichments: broader <-> narrower, related <-> related
enrich_relations(voc, config.enrich_mappings,
config.narrower, config.transitive)
logging.debug("Phase 6: Cleaning up")
# clean up unused/unnecessary class/property definitions and unreachable
# triples
if config.cleanup_properties:
cleanup_properties(voc)
if config.cleanup_classes:
cleanup_classes(voc)
if config.cleanup_unreachable:
cleanup_unreachable(voc)
logging.debug("Phase 7: Setting up concept schemes and top concepts")
# setup inScheme and hasTopConcept
setup_concept_scheme(voc, cs)
setup_top_concepts(voc, config.mark_top_concepts)
logging.debug("Phase 8: Checking concept hierarchy")
# check hierarchy for cycles
check_hierarchy(voc, config.break_cycles,
config.keep_related, config.mark_top_concepts,
config.eliminate_redundancy)
logging.debug("Phase 9: Checking labels")
# check for duplicate labels
check_labels(voc, config.preflabel_policy)
processtime = time.time()
logging.debug("reading input file took %d seconds",
(inputtime - starttime))
logging.debug("processing took %d seconds",
(processtime - inputtime))
logging.debug("Phase 10: Writing output")
return voc | python | def skosify(*sources, **config):
"""Convert, extend, and check SKOS vocabulary."""
cfg = Config()
for key in config:
if hasattr(cfg, key):
setattr(cfg, key, config[key])
config = cfg
namespaces = config.namespaces
typemap = config.types
literalmap = config.literals
relationmap = config.relations
logging.debug("Skosify starting. $Revision$")
starttime = time.time()
logging.debug("Phase 1: Parsing input files")
try:
voc = read_rdf(sources, config.from_format)
except:
logging.critical("Parsing failed. Exception: %s",
str(sys.exc_info()[1]))
sys.exit(1)
inputtime = time.time()
logging.debug("Phase 2: Performing inferences")
if config.update_query is not None:
transform_sparql_update(voc, config.update_query)
if config.construct_query is not None:
voc = transform_sparql_construct(voc, config.construct_query)
if config.infer:
logging.debug("doing RDFS subclass and properties inference")
infer.rdfs_classes(voc)
infer.rdfs_properties(voc)
logging.debug("Phase 3: Setting up namespaces")
for prefix, uri in namespaces.items():
voc.namespace_manager.bind(prefix, uri)
logging.debug("Phase 4: Transforming concepts, literals and relations")
# transform concepts, literals and concept relations
transform_concepts(voc, typemap)
transform_literals(voc, literalmap)
transform_relations(voc, relationmap)
# special transforms for labels: whitespace, prefLabel vs altLabel
transform_labels(voc, config.default_language)
# special transforms for collections + aggregate and deprecated concepts
transform_collections(voc)
# find/create concept scheme
cs = get_concept_scheme(voc)
if not cs:
cs = create_concept_scheme(voc, config.namespace)
initialize_concept_scheme(voc, cs, label=config.label,
language=config.default_language,
set_modified=config.set_modified)
transform_aggregate_concepts(
voc, cs, relationmap, config.aggregates)
transform_deprecated_concepts(voc, cs)
logging.debug("Phase 5: Performing SKOS enrichments")
# enrichments: broader <-> narrower, related <-> related
enrich_relations(voc, config.enrich_mappings,
config.narrower, config.transitive)
logging.debug("Phase 6: Cleaning up")
# clean up unused/unnecessary class/property definitions and unreachable
# triples
if config.cleanup_properties:
cleanup_properties(voc)
if config.cleanup_classes:
cleanup_classes(voc)
if config.cleanup_unreachable:
cleanup_unreachable(voc)
logging.debug("Phase 7: Setting up concept schemes and top concepts")
# setup inScheme and hasTopConcept
setup_concept_scheme(voc, cs)
setup_top_concepts(voc, config.mark_top_concepts)
logging.debug("Phase 8: Checking concept hierarchy")
# check hierarchy for cycles
check_hierarchy(voc, config.break_cycles,
config.keep_related, config.mark_top_concepts,
config.eliminate_redundancy)
logging.debug("Phase 9: Checking labels")
# check for duplicate labels
check_labels(voc, config.preflabel_policy)
processtime = time.time()
logging.debug("reading input file took %d seconds",
(inputtime - starttime))
logging.debug("processing took %d seconds",
(processtime - inputtime))
logging.debug("Phase 10: Writing output")
return voc | Convert, extend, and check SKOS vocabulary. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L794-L898 |
NatLibFi/Skosify | skosify/config.py | expand_curielike | def expand_curielike(namespaces, curie):
"""Expand a CURIE (or a CURIE-like string with a period instead of colon
as separator) into URIRef. If the provided curie is not a CURIE, return it
unchanged."""
if curie == '':
return None
if sys.version < '3' and not isinstance(curie, type(u'')):
# Python 2 ConfigParser gives raw byte strings
curie = curie.decode('UTF-8') # ...make those into Unicode objects
if curie.startswith('[') and curie.endswith(']'):
# decode SafeCURIE
curie = curie[1:-1]
if ':' in curie:
ns, localpart = curie.split(':', 1)
elif '.' in curie:
ns, localpart = curie.split('.', 1)
else:
return curie
if ns in namespaces:
return URIRef(namespaces[ns].term(localpart))
else:
logging.warning("Unknown namespace prefix %s", ns)
return URIRef(curie) | python | def expand_curielike(namespaces, curie):
"""Expand a CURIE (or a CURIE-like string with a period instead of colon
as separator) into URIRef. If the provided curie is not a CURIE, return it
unchanged."""
if curie == '':
return None
if sys.version < '3' and not isinstance(curie, type(u'')):
# Python 2 ConfigParser gives raw byte strings
curie = curie.decode('UTF-8') # ...make those into Unicode objects
if curie.startswith('[') and curie.endswith(']'):
# decode SafeCURIE
curie = curie[1:-1]
if ':' in curie:
ns, localpart = curie.split(':', 1)
elif '.' in curie:
ns, localpart = curie.split('.', 1)
else:
return curie
if ns in namespaces:
return URIRef(namespaces[ns].term(localpart))
else:
logging.warning("Unknown namespace prefix %s", ns)
return URIRef(curie) | Expand a CURIE (or a CURIE-like string with a period instead of colon
as separator) into URIRef. If the provided curie is not a CURIE, return it
unchanged. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/config.py#L147-L173 |
NatLibFi/Skosify | skosify/config.py | expand_mapping_target | def expand_mapping_target(namespaces, val):
"""Expand a mapping target, expressed as a comma-separated list of
CURIE-like strings potentially prefixed with ^ to express inverse
properties, into a list of (uri, inverse) tuples, where uri is a URIRef
and inverse is a boolean."""
vals = [v.strip() for v in val.split(',')]
ret = []
for v in vals:
inverse = False
if v.startswith('^'):
inverse = True
v = v[1:]
ret.append((expand_curielike(namespaces, v), inverse))
return ret | python | def expand_mapping_target(namespaces, val):
"""Expand a mapping target, expressed as a comma-separated list of
CURIE-like strings potentially prefixed with ^ to express inverse
properties, into a list of (uri, inverse) tuples, where uri is a URIRef
and inverse is a boolean."""
vals = [v.strip() for v in val.split(',')]
ret = []
for v in vals:
inverse = False
if v.startswith('^'):
inverse = True
v = v[1:]
ret.append((expand_curielike(namespaces, v), inverse))
return ret | Expand a mapping target, expressed as a comma-separated list of
CURIE-like strings potentially prefixed with ^ to express inverse
properties, into a list of (uri, inverse) tuples, where uri is a URIRef
and inverse is a boolean. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/config.py#L176-L190 |
NatLibFi/Skosify | skosify/config.py | Config.read_file | def read_file(self, cfgparser, file):
"""Read configuration from file."""
if hasattr(file, 'readline'):
# we have a file object
if sys.version_info >= (3, 2):
cfgparser.read_file(file) # Added in Python 3.2
else:
cfgparser.readfp(file) # Deprecated since Python 3.2
else:
# we have a file name
cfgparser.read(file) | python | def read_file(self, cfgparser, file):
"""Read configuration from file."""
if hasattr(file, 'readline'):
# we have a file object
if sys.version_info >= (3, 2):
cfgparser.read_file(file) # Added in Python 3.2
else:
cfgparser.readfp(file) # Deprecated since Python 3.2
else:
# we have a file name
cfgparser.read(file) | Read configuration from file. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/config.py#L101-L113 |
NatLibFi/Skosify | skosify/rdftools/modify.py | replace_subject | def replace_subject(rdf, fromuri, touri):
"""Replace occurrences of fromuri as subject with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRefs, all values will be inserted.
"""
if fromuri == touri:
return
for p, o in rdf.predicate_objects(fromuri):
rdf.remove((fromuri, p, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((uri, p, o)) | python | def replace_subject(rdf, fromuri, touri):
"""Replace occurrences of fromuri as subject with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRefs, all values will be inserted.
"""
if fromuri == touri:
return
for p, o in rdf.predicate_objects(fromuri):
rdf.remove((fromuri, p, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((uri, p, o)) | Replace occurrences of fromuri as subject with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRefs, all values will be inserted. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/modify.py#L6-L21 |
NatLibFi/Skosify | skosify/rdftools/modify.py | replace_predicate | def replace_predicate(rdf, fromuri, touri, subjecttypes=None, inverse=False):
"""Replace occurrences of fromuri as predicate with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted. If
touri is a list of (URIRef, boolean) tuples, the boolean value will be
used to determine whether an inverse property is created (if True) or
not (if False). If a subjecttypes sequence is given, modify only those
triples where the subject is one of the provided types.
"""
if fromuri == touri:
return
for s, o in rdf.subject_objects(fromuri):
if subjecttypes is not None:
typeok = False
for t in subjecttypes:
if (s, RDF.type, t) in rdf:
typeok = True
if not typeok:
continue
rdf.remove((s, fromuri, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for val in touri:
if not isinstance(val, tuple):
val = (val, False)
uri, inverse = val
if uri is None:
continue
if inverse:
rdf.add((o, uri, s))
else:
rdf.add((s, uri, o)) | python | def replace_predicate(rdf, fromuri, touri, subjecttypes=None, inverse=False):
"""Replace occurrences of fromuri as predicate with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted. If
touri is a list of (URIRef, boolean) tuples, the boolean value will be
used to determine whether an inverse property is created (if True) or
not (if False). If a subjecttypes sequence is given, modify only those
triples where the subject is one of the provided types.
"""
if fromuri == touri:
return
for s, o in rdf.subject_objects(fromuri):
if subjecttypes is not None:
typeok = False
for t in subjecttypes:
if (s, RDF.type, t) in rdf:
typeok = True
if not typeok:
continue
rdf.remove((s, fromuri, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for val in touri:
if not isinstance(val, tuple):
val = (val, False)
uri, inverse = val
if uri is None:
continue
if inverse:
rdf.add((o, uri, s))
else:
rdf.add((s, uri, o)) | Replace occurrences of fromuri as predicate with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted. If
touri is a list of (URIRef, boolean) tuples, the boolean value will be
used to determine whether an inverse property is created (if True) or
not (if False). If a subjecttypes sequence is given, modify only those
triples where the subject is one of the provided types. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/modify.py#L24-L59 |
NatLibFi/Skosify | skosify/rdftools/modify.py | replace_object | def replace_object(rdf, fromuri, touri, predicate=None):
"""Replace all occurrences of fromuri as object with touri in the given
model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted.
If predicate is given, modify only triples with the given predicate.
"""
if fromuri == touri:
return
for s, p in rdf.subject_predicates(fromuri):
if predicate is not None and p != predicate:
continue
rdf.remove((s, p, fromuri))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((s, p, uri)) | python | def replace_object(rdf, fromuri, touri, predicate=None):
"""Replace all occurrences of fromuri as object with touri in the given
model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted.
If predicate is given, modify only triples with the given predicate.
"""
if fromuri == touri:
return
for s, p in rdf.subject_predicates(fromuri):
if predicate is not None and p != predicate:
continue
rdf.remove((s, p, fromuri))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((s, p, uri)) | Replace all occurrences of fromuri as object with touri in the given
model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted.
If predicate is given, modify only triples with the given predicate. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/modify.py#L62-L81 |
NatLibFi/Skosify | skosify/rdftools/modify.py | replace_uri | def replace_uri(rdf, fromuri, touri):
"""Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead.
"""
replace_subject(rdf, fromuri, touri)
replace_predicate(rdf, fromuri, touri)
replace_object(rdf, fromuri, touri) | python | def replace_uri(rdf, fromuri, touri):
"""Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead.
"""
replace_subject(rdf, fromuri, touri)
replace_predicate(rdf, fromuri, touri)
replace_object(rdf, fromuri, touri) | Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/modify.py#L84-L93 |
NatLibFi/Skosify | skosify/check.py | hierarchy_cycles | def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts | python | def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts | Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/check.py#L37-L58 |
NatLibFi/Skosify | skosify/check.py | disjoint_relations | def disjoint_relations(rdf, fix=False):
"""Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
"""
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)):
if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)):
if fix:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"removing skos:related",
conc1, conc2)
rdf.remove((conc1, SKOS.related, conc2))
rdf.remove((conc2, SKOS.related, conc1))
else:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"but keeping it because keep_related is enabled",
conc1, conc2) | python | def disjoint_relations(rdf, fix=False):
"""Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
"""
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)):
if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)):
if fix:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"removing skos:related",
conc1, conc2)
rdf.remove((conc1, SKOS.related, conc2))
rdf.remove((conc2, SKOS.related, conc1))
else:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"but keeping it because keep_related is enabled",
conc1, conc2) | Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/check.py#L61-L85 |
NatLibFi/Skosify | skosify/check.py | hierarchical_redundancy | def hierarchical_redundancy(rdf, fix=False):
"""Check for and optionally remove extraneous skos:broader relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:broader relations between
concepts that are otherwise connected by skos:broaderTransitive.
"""
for conc, parent1 in rdf.subject_objects(SKOS.broader):
for parent2 in rdf.objects(conc, SKOS.broader):
if parent1 == parent2:
continue # must be different
if parent2 in rdf.transitive_objects(parent1, SKOS.broader):
if fix:
logging.warning(
"Eliminating redundant hierarchical relationship: "
"%s skos:broader %s",
conc, parent2)
rdf.remove((conc, SKOS.broader, parent2))
rdf.remove((conc, SKOS.broaderTransitive, parent2))
rdf.remove((parent2, SKOS.narrower, conc))
rdf.remove((parent2, SKOS.narrowerTransitive, conc))
else:
logging.warning(
"Redundant hierarchical relationship "
"%s skos:broader %s found, but not eliminated "
"because eliminate_redundancy is not set",
conc, parent2) | python | def hierarchical_redundancy(rdf, fix=False):
"""Check for and optionally remove extraneous skos:broader relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:broader relations between
concepts that are otherwise connected by skos:broaderTransitive.
"""
for conc, parent1 in rdf.subject_objects(SKOS.broader):
for parent2 in rdf.objects(conc, SKOS.broader):
if parent1 == parent2:
continue # must be different
if parent2 in rdf.transitive_objects(parent1, SKOS.broader):
if fix:
logging.warning(
"Eliminating redundant hierarchical relationship: "
"%s skos:broader %s",
conc, parent2)
rdf.remove((conc, SKOS.broader, parent2))
rdf.remove((conc, SKOS.broaderTransitive, parent2))
rdf.remove((parent2, SKOS.narrower, conc))
rdf.remove((parent2, SKOS.narrowerTransitive, conc))
else:
logging.warning(
"Redundant hierarchical relationship "
"%s skos:broader %s found, but not eliminated "
"because eliminate_redundancy is not set",
conc, parent2) | Check for and optionally remove extraneous skos:broader relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:broader relations between
concepts that are otherwise connected by skos:broaderTransitive. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/check.py#L88-L114 |
NatLibFi/Skosify | skosify/rdftools/access.py | find_prop_overlap | def find_prop_overlap(rdf, prop1, prop2):
"""Generate (subject,object) pairs connected by two properties."""
for s, o in sorted(rdf.subject_objects(prop1)):
if (s, prop2, o) in rdf:
yield (s, o) | python | def find_prop_overlap(rdf, prop1, prop2):
"""Generate (subject,object) pairs connected by two properties."""
for s, o in sorted(rdf.subject_objects(prop1)):
if (s, prop2, o) in rdf:
yield (s, o) | Generate (subject,object) pairs connected by two properties. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/rdftools/access.py#L10-L14 |
NatLibFi/Skosify | skosify/infer.py | skos_related | def skos_related(rdf):
"""Make sure that skos:related is stated in both directions (S23)."""
for s, o in rdf.subject_objects(SKOS.related):
rdf.add((o, SKOS.related, s)) | python | def skos_related(rdf):
"""Make sure that skos:related is stated in both directions (S23)."""
for s, o in rdf.subject_objects(SKOS.related):
rdf.add((o, SKOS.related, s)) | Make sure that skos:related is stated in both directions (S23). | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L10-L13 |
NatLibFi/Skosify | skosify/infer.py | skos_topConcept | def skos_topConcept(rdf):
"""Infer skos:topConceptOf/skos:hasTopConcept (S8) and skos:inScheme (S7)."""
for s, o in rdf.subject_objects(SKOS.hasTopConcept):
rdf.add((o, SKOS.topConceptOf, s))
for s, o in rdf.subject_objects(SKOS.topConceptOf):
rdf.add((o, SKOS.hasTopConcept, s))
for s, o in rdf.subject_objects(SKOS.topConceptOf):
rdf.add((s, SKOS.inScheme, o)) | python | def skos_topConcept(rdf):
"""Infer skos:topConceptOf/skos:hasTopConcept (S8) and skos:inScheme (S7)."""
for s, o in rdf.subject_objects(SKOS.hasTopConcept):
rdf.add((o, SKOS.topConceptOf, s))
for s, o in rdf.subject_objects(SKOS.topConceptOf):
rdf.add((o, SKOS.hasTopConcept, s))
for s, o in rdf.subject_objects(SKOS.topConceptOf):
rdf.add((s, SKOS.inScheme, o)) | Infer skos:topConceptOf/skos:hasTopConcept (S8) and skos:inScheme (S7). | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L16-L23 |
NatLibFi/Skosify | skosify/infer.py | skos_hierarchical | def skos_hierarchical(rdf, narrower=True):
"""Infer skos:broader/skos:narrower (S25) but only keep skos:narrower on request.
:param bool narrower: If set to False, skos:narrower will not be added,
but rather removed.
"""
if narrower:
for s, o in rdf.subject_objects(SKOS.broader):
rdf.add((o, SKOS.narrower, s))
for s, o in rdf.subject_objects(SKOS.narrower):
rdf.add((o, SKOS.broader, s))
if not narrower:
rdf.remove((s, SKOS.narrower, o)) | python | def skos_hierarchical(rdf, narrower=True):
"""Infer skos:broader/skos:narrower (S25) but only keep skos:narrower on request.
:param bool narrower: If set to False, skos:narrower will not be added,
but rather removed.
"""
if narrower:
for s, o in rdf.subject_objects(SKOS.broader):
rdf.add((o, SKOS.narrower, s))
for s, o in rdf.subject_objects(SKOS.narrower):
rdf.add((o, SKOS.broader, s))
if not narrower:
rdf.remove((s, SKOS.narrower, o)) | Infer skos:broader/skos:narrower (S25) but only keep skos:narrower on request.
:param bool narrower: If set to False, skos:narrower will not be added,
but rather removed. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L26-L38 |
NatLibFi/Skosify | skosify/infer.py | skos_transitive | def skos_transitive(rdf, narrower=True):
"""Perform transitive closure inference (S22, S24)."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
for bt in rdf.transitive_objects(conc, SKOS.broader):
if bt == conc:
continue
rdf.add((conc, SKOS.broaderTransitive, bt))
if narrower:
rdf.add((bt, SKOS.narrowerTransitive, conc)) | python | def skos_transitive(rdf, narrower=True):
"""Perform transitive closure inference (S22, S24)."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
for bt in rdf.transitive_objects(conc, SKOS.broader):
if bt == conc:
continue
rdf.add((conc, SKOS.broaderTransitive, bt))
if narrower:
rdf.add((bt, SKOS.narrowerTransitive, conc)) | Perform transitive closure inference (S22, S24). | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L41-L49 |
NatLibFi/Skosify | skosify/infer.py | skos_symmetric_mappings | def skos_symmetric_mappings(rdf, related=True):
"""Ensure that the symmetric mapping properties (skos:relatedMatch,
skos:closeMatch and skos:exactMatch) are stated in both directions (S44).
:param bool related: Add the skos:related super-property for all
skos:relatedMatch relations (S41).
"""
for s, o in rdf.subject_objects(SKOS.relatedMatch):
rdf.add((o, SKOS.relatedMatch, s))
if related:
rdf.add((s, SKOS.related, o))
rdf.add((o, SKOS.related, s))
for s, o in rdf.subject_objects(SKOS.closeMatch):
rdf.add((o, SKOS.closeMatch, s))
for s, o in rdf.subject_objects(SKOS.exactMatch):
rdf.add((o, SKOS.exactMatch, s)) | python | def skos_symmetric_mappings(rdf, related=True):
"""Ensure that the symmetric mapping properties (skos:relatedMatch,
skos:closeMatch and skos:exactMatch) are stated in both directions (S44).
:param bool related: Add the skos:related super-property for all
skos:relatedMatch relations (S41).
"""
for s, o in rdf.subject_objects(SKOS.relatedMatch):
rdf.add((o, SKOS.relatedMatch, s))
if related:
rdf.add((s, SKOS.related, o))
rdf.add((o, SKOS.related, s))
for s, o in rdf.subject_objects(SKOS.closeMatch):
rdf.add((o, SKOS.closeMatch, s))
for s, o in rdf.subject_objects(SKOS.exactMatch):
rdf.add((o, SKOS.exactMatch, s)) | Ensure that the symmetric mapping properties (skos:relatedMatch,
skos:closeMatch and skos:exactMatch) are stated in both directions (S44).
:param bool related: Add the skos:related super-property for all
skos:relatedMatch relations (S41). | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L52-L69 |
NatLibFi/Skosify | skosify/infer.py | skos_hierarchical_mappings | def skos_hierarchical_mappings(rdf, narrower=True):
"""Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties
skos:broader/skos:narrower (S41).
:param bool narrower: If set to False, skos:narrowMatch will not be added,
but rather removed.
"""
for s, o in rdf.subject_objects(SKOS.broadMatch):
rdf.add((s, SKOS.broader, o))
if narrower:
rdf.add((o, SKOS.narrowMatch, s))
rdf.add((o, SKOS.narrower, s))
for s, o in rdf.subject_objects(SKOS.narrowMatch):
rdf.add((o, SKOS.broadMatch, s))
rdf.add((o, SKOS.broader, s))
if narrower:
rdf.add((s, SKOS.narrower, o))
else:
rdf.remove((s, SKOS.narrowMatch, o)) | python | def skos_hierarchical_mappings(rdf, narrower=True):
"""Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties
skos:broader/skos:narrower (S41).
:param bool narrower: If set to False, skos:narrowMatch will not be added,
but rather removed.
"""
for s, o in rdf.subject_objects(SKOS.broadMatch):
rdf.add((s, SKOS.broader, o))
if narrower:
rdf.add((o, SKOS.narrowMatch, s))
rdf.add((o, SKOS.narrower, s))
for s, o in rdf.subject_objects(SKOS.narrowMatch):
rdf.add((o, SKOS.broadMatch, s))
rdf.add((o, SKOS.broader, s))
if narrower:
rdf.add((s, SKOS.narrower, o))
else:
rdf.remove((s, SKOS.narrowMatch, o)) | Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties
skos:broader/skos:narrower (S41).
:param bool narrower: If set to False, skos:narrowMatch will not be added,
but rather removed. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L72-L91 |
NatLibFi/Skosify | skosify/infer.py | rdfs_classes | def rdfs_classes(rdf):
"""Perform RDFS subclass inference.
Mark all resources with a subclass type with the upper class."""
# find out the subclass mappings
upperclasses = {} # key: class val: set([superclass1, superclass2..])
for s, o in rdf.subject_objects(RDFS.subClassOf):
upperclasses.setdefault(s, set())
for uc in rdf.transitive_objects(s, RDFS.subClassOf):
if uc != s:
upperclasses[s].add(uc)
# set the superclass type information for subclass instances
for s, ucs in upperclasses.items():
logging.debug("setting superclass types: %s -> %s", s, str(ucs))
for res in rdf.subjects(RDF.type, s):
for uc in ucs:
rdf.add((res, RDF.type, uc)) | python | def rdfs_classes(rdf):
"""Perform RDFS subclass inference.
Mark all resources with a subclass type with the upper class."""
# find out the subclass mappings
upperclasses = {} # key: class val: set([superclass1, superclass2..])
for s, o in rdf.subject_objects(RDFS.subClassOf):
upperclasses.setdefault(s, set())
for uc in rdf.transitive_objects(s, RDFS.subClassOf):
if uc != s:
upperclasses[s].add(uc)
# set the superclass type information for subclass instances
for s, ucs in upperclasses.items():
logging.debug("setting superclass types: %s -> %s", s, str(ucs))
for res in rdf.subjects(RDF.type, s):
for uc in ucs:
rdf.add((res, RDF.type, uc)) | Perform RDFS subclass inference.
Mark all resources with a subclass type with the upper class. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L94-L112 |
NatLibFi/Skosify | skosify/infer.py | rdfs_properties | def rdfs_properties(rdf):
"""Perform RDFS subproperty inference.
Add superproperties where subproperties have been used."""
# find out the subproperty mappings
superprops = {} # key: property val: set([superprop1, superprop2..])
for s, o in rdf.subject_objects(RDFS.subPropertyOf):
superprops.setdefault(s, set())
for sp in rdf.transitive_objects(s, RDFS.subPropertyOf):
if sp != s:
superprops[s].add(sp)
# add the superproperty relationships
for p, sps in superprops.items():
logging.debug("setting superproperties: %s -> %s", p, str(sps))
for s, o in rdf.subject_objects(p):
for sp in sps:
rdf.add((s, sp, o)) | python | def rdfs_properties(rdf):
"""Perform RDFS subproperty inference.
Add superproperties where subproperties have been used."""
# find out the subproperty mappings
superprops = {} # key: property val: set([superprop1, superprop2..])
for s, o in rdf.subject_objects(RDFS.subPropertyOf):
superprops.setdefault(s, set())
for sp in rdf.transitive_objects(s, RDFS.subPropertyOf):
if sp != s:
superprops[s].add(sp)
# add the superproperty relationships
for p, sps in superprops.items():
logging.debug("setting superproperties: %s -> %s", p, str(sps))
for s, o in rdf.subject_objects(p):
for sp in sps:
rdf.add((s, sp, o)) | Perform RDFS subproperty inference.
Add superproperties where subproperties have been used. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L115-L133 |
stephenmcd/django-overextends | overextends/templatetags/overextends_tags.py | overextends | def overextends(parser, token):
"""
Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once "
"in the same template" % bits[0])
return OverExtendsNode(nodelist, parent_name, None) | python | def overextends(parser, token):
"""
Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once "
"in the same template" % bits[0])
return OverExtendsNode(nodelist, parent_name, None) | Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once. | https://github.com/stephenmcd/django-overextends/blob/5907bc94debd58acbddcbbc542472b9451475431/overextends/templatetags/overextends_tags.py#L136-L150 |
stephenmcd/django-overextends | overextends/templatetags/overextends_tags.py | OverExtendsNode.find_template | def find_template(self, name, context, peeking=False):
"""
Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur.
"""
# These imports want settings, which aren't available when this
# module is imported to ``add_to_builtins``, so do them here.
from django.conf import settings
# Find the app_template_dirs (moved in Django 1.8)
import django.template.loaders.app_directories as app_directories
try:
# Django >= 1.8
get_app_template_dirs = app_directories.get_app_template_dirs
app_template_dirs = get_app_template_dirs('templates')
except AttributeError:
# Django <= 1.7
app_template_dirs = app_directories.app_template_dirs
# Find the find_template_loader function, and appropriate template
# settings (changed in Django 1.8)
try:
# Django >= 1.8
find_template_loader = context.template.engine.find_template_loader
template_dirs = context.template.engine.dirs
template_loaders = context.template.engine.loaders
except AttributeError:
# Django <= 1.7
from django.template.loader import find_template_loader
template_dirs = list(settings.TEMPLATE_DIRS)
template_loaders = settings.TEMPLATE_LOADERS
# Store a dictionary in the template context mapping template
# names to the lists of template directories available to
# search for that template. Each time a template is loaded, its
# origin directory is removed from its directories list.
context_name = "OVEREXTENDS_DIRS"
if context_name not in context:
context[context_name] = {}
if name not in context[context_name]:
all_dirs = template_dirs + list(app_template_dirs)
# os.path.abspath is needed under uWSGI, and also ensures we
# have consistent path separators across different OSes.
context[context_name][name] = list(map(os.path.abspath, all_dirs))
# Build a list of template loaders to use. For loaders that wrap
# other loaders like the ``cached`` template loader, unwind its
# internal loaders and add those instead.
loaders = []
for loader_name in template_loaders:
loader = find_template_loader(loader_name)
loaders.extend(getattr(loader, "loaders", [loader]))
# Go through the loaders and try to find the template. When
# found, removed its absolute path from the context dict so
# that it won't be used again when the same relative name/path
# is requested.
for loader in loaders:
dirs = context[context_name][name]
if not dirs:
break
try:
source, path = loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
else:
# Only remove the absolute path for the initial call in
# get_parent, and not when we're peeking during the
# second call.
if not peeking:
remove_path = os.path.abspath(path[:-len(name) - 1])
context[context_name][name].remove(remove_path)
return Template(source)
raise TemplateDoesNotExist(name) | python | def find_template(self, name, context, peeking=False):
"""
Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur.
"""
# These imports want settings, which aren't available when this
# module is imported to ``add_to_builtins``, so do them here.
from django.conf import settings
# Find the app_template_dirs (moved in Django 1.8)
import django.template.loaders.app_directories as app_directories
try:
# Django >= 1.8
get_app_template_dirs = app_directories.get_app_template_dirs
app_template_dirs = get_app_template_dirs('templates')
except AttributeError:
# Django <= 1.7
app_template_dirs = app_directories.app_template_dirs
# Find the find_template_loader function, and appropriate template
# settings (changed in Django 1.8)
try:
# Django >= 1.8
find_template_loader = context.template.engine.find_template_loader
template_dirs = context.template.engine.dirs
template_loaders = context.template.engine.loaders
except AttributeError:
# Django <= 1.7
from django.template.loader import find_template_loader
template_dirs = list(settings.TEMPLATE_DIRS)
template_loaders = settings.TEMPLATE_LOADERS
# Store a dictionary in the template context mapping template
# names to the lists of template directories available to
# search for that template. Each time a template is loaded, its
# origin directory is removed from its directories list.
context_name = "OVEREXTENDS_DIRS"
if context_name not in context:
context[context_name] = {}
if name not in context[context_name]:
all_dirs = template_dirs + list(app_template_dirs)
# os.path.abspath is needed under uWSGI, and also ensures we
# have consistent path separators across different OSes.
context[context_name][name] = list(map(os.path.abspath, all_dirs))
# Build a list of template loaders to use. For loaders that wrap
# other loaders like the ``cached`` template loader, unwind its
# internal loaders and add those instead.
loaders = []
for loader_name in template_loaders:
loader = find_template_loader(loader_name)
loaders.extend(getattr(loader, "loaders", [loader]))
# Go through the loaders and try to find the template. When
# found, removed its absolute path from the context dict so
# that it won't be used again when the same relative name/path
# is requested.
for loader in loaders:
dirs = context[context_name][name]
if not dirs:
break
try:
source, path = loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
else:
# Only remove the absolute path for the initial call in
# get_parent, and not when we're peeking during the
# second call.
if not peeking:
remove_path = os.path.abspath(path[:-len(name) - 1])
context[context_name][name].remove(remove_path)
return Template(source)
raise TemplateDoesNotExist(name) | Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur. | https://github.com/stephenmcd/django-overextends/blob/5907bc94debd58acbddcbbc542472b9451475431/overextends/templatetags/overextends_tags.py#L35-L112 |
stephenmcd/django-overextends | overextends/templatetags/overextends_tags.py | OverExtendsNode.get_parent | def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
for node in template.nodelist:
if (isinstance(node, ExtendsNode) and
node.parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template | python | def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
for node in template.nodelist:
if (isinstance(node, ExtendsNode) and
node.parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template | Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list. | https://github.com/stephenmcd/django-overextends/blob/5907bc94debd58acbddcbbc542472b9451475431/overextends/templatetags/overextends_tags.py#L114-L132 |
NatLibFi/Skosify | skosify/cli.py | get_option_parser | def get_option_parser(defaults):
"""Create and return an OptionParser with the given defaults."""
# based on recipe from:
# http://stackoverflow.com/questions/1880404/using-a-file-to-store-optparse-arguments
# process command line parameters
# e.g. skosify yso.owl -o yso-skos.rdf
usage = "Usage: %prog [options] voc1 [voc2 ...]"
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', type='string',
help='Read default options '
'and transformation definitions '
'from the given configuration file.')
parser.add_option('-o', '--output', type='string',
help='Output file name. Default is "-" (stdout).')
parser.add_option('-D', '--debug', action="store_true",
help='Show debug output.')
parser.add_option('-d', '--no-debug', dest="debug",
action="store_false", help='Hide debug output.')
parser.add_option('-O', '--log', type='string',
help='Log file name. Default is to use standard error.')
group = optparse.OptionGroup(parser, "Input and Output Options")
group.add_option('-f', '--from-format', type='string',
help='Input format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('-F', '--to-format', type='string',
help='Output format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('--update-query', type='string',
help='SPARQL update query. '
'This query is executed against the input '
'data before processing it. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('--construct-query', type='string',
help='SPARQL CONSTRUCT query. '
'This query is executed against the input '
'data and the result graph is used as the '
'actual input. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('-I', '--infer', action="store_true",
help='Perform RDFS subclass/subproperty inference '
'before transforming input.')
group.add_option('-i', '--no-infer', dest="infer", action="store_false",
help="Don't perform RDFS subclass/subproperty inference "
"before transforming input.")
parser.add_option_group(group)
group = optparse.OptionGroup(
parser, "Concept Scheme and Labelling Options")
group.add_option('-s', '--namespace', type='string',
help='Namespace of vocabulary '
'(usually optional; used to create a ConceptScheme)')
group.add_option('-L', '--label', type='string',
help='Label/title for the vocabulary '
'(usually optional; used to label a ConceptScheme)')
group.add_option('-l', '--default-language', type='string',
help='Language tag to set for labels '
'with no defined language.')
group.add_option('-p', '--preflabel-policy', type='string',
help='Policy for handling multiple prefLabels '
'with the same language tag. '
'Possible values: shortest, longest, all.')
group.add_option('--set-modified', dest="set_modified",
action="store_true",
help='Set modification date on the ConceptScheme')
group.add_option('--no-set-modified', dest="set_modified",
action="store_false",
help="Don't set modification date on the ConceptScheme")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Vocabulary Structure Options")
group.add_option('-E', '--mark-top-concepts', action="store_true",
help='Mark top-level concepts in the hierarchy '
'as top concepts (entry points).')
group.add_option('-e', '--no-mark-top-concepts',
dest="mark_top_concepts", action="store_false",
help="Don't mark top-level concepts in the hierarchy "
"as top concepts.")
group.add_option('-N', '--narrower', action="store_true",
help='Include narrower/narrowerGeneric/narrowerPartitive '
'relationships in the output vocabulary.')
group.add_option('-n', '--no-narrower',
dest="narrower", action="store_false",
help="Don't include "
"narrower/narrowerGeneric/narrowerPartitive "
"relationships in the output vocabulary.")
group.add_option('-T', '--transitive', action="store_true",
help='Include transitive hierarchy relationships '
'in the output vocabulary.')
group.add_option('-t', '--no-transitive',
dest="transitive", action="store_false",
help="Don't include transitive hierarchy relationships "
"in the output vocabulary.")
group.add_option('-M', '--enrich-mappings', action="store_true",
help='Perform SKOS enrichments on mapping relationships.')
group.add_option('-m', '--no-enrich-mappings', dest="enrich_mappings",
action="store_false",
help="Don't perform SKOS enrichments "
"on mapping relationships.")
group.add_option('-A', '--aggregates', action="store_true",
help='Keep AggregateConcepts completely '
'in the output vocabulary.')
group.add_option('-a', '--no-aggregates',
dest="aggregates", action="store_false",
help='Remove AggregateConcepts completely '
'from the output vocabulary.')
group.add_option('-R', '--keep-related', action="store_true",
help="Keep skos:related relationships "
"within the same hierarchy.")
group.add_option('-r', '--no-keep-related',
dest="keep_related", action="store_false",
help="Remove skos:related relationships "
"within the same hierarchy.")
group.add_option('-B', '--break-cycles', action="store_true",
help="Break any cycles in the skos:broader hierarchy.")
group.add_option('-b', '--no-break-cycles', dest="break_cycles",
action="store_false",
help="Don't break cycles in the skos:broader hierarchy.")
group.add_option('--eliminate-redundancy', action="store_true",
help="Eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
group.add_option('--no-eliminate-redundancy', dest="eliminate_redundancy",
action="store_false",
help="Don't eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Cleanup Options")
group.add_option('--cleanup-classes', action="store_true",
help="Remove definitions of classes with no instances.")
group.add_option('--no-cleanup-classes', dest='cleanup_classes',
action="store_false",
help="Don't remove definitions "
"of classes with no instances.")
group.add_option('--cleanup-properties', action="store_true",
help="Remove definitions of properties "
"which have not been used.")
group.add_option('--no-cleanup-properties', action="store_false",
dest='cleanup_properties',
help="Don't remove definitions of properties "
"which have not been used.")
group.add_option('--cleanup-unreachable', action="store_true",
help="Remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
group.add_option('--no-cleanup-unreachable', action="store_false",
dest='cleanup_unreachable',
help="Don't remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
parser.add_option_group(group)
return parser | python | def get_option_parser(defaults):
"""Create and return an OptionParser with the given defaults."""
# based on recipe from:
# http://stackoverflow.com/questions/1880404/using-a-file-to-store-optparse-arguments
# process command line parameters
# e.g. skosify yso.owl -o yso-skos.rdf
usage = "Usage: %prog [options] voc1 [voc2 ...]"
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', type='string',
help='Read default options '
'and transformation definitions '
'from the given configuration file.')
parser.add_option('-o', '--output', type='string',
help='Output file name. Default is "-" (stdout).')
parser.add_option('-D', '--debug', action="store_true",
help='Show debug output.')
parser.add_option('-d', '--no-debug', dest="debug",
action="store_false", help='Hide debug output.')
parser.add_option('-O', '--log', type='string',
help='Log file name. Default is to use standard error.')
group = optparse.OptionGroup(parser, "Input and Output Options")
group.add_option('-f', '--from-format', type='string',
help='Input format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('-F', '--to-format', type='string',
help='Output format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('--update-query', type='string',
help='SPARQL update query. '
'This query is executed against the input '
'data before processing it. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('--construct-query', type='string',
help='SPARQL CONSTRUCT query. '
'This query is executed against the input '
'data and the result graph is used as the '
'actual input. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('-I', '--infer', action="store_true",
help='Perform RDFS subclass/subproperty inference '
'before transforming input.')
group.add_option('-i', '--no-infer', dest="infer", action="store_false",
help="Don't perform RDFS subclass/subproperty inference "
"before transforming input.")
parser.add_option_group(group)
group = optparse.OptionGroup(
parser, "Concept Scheme and Labelling Options")
group.add_option('-s', '--namespace', type='string',
help='Namespace of vocabulary '
'(usually optional; used to create a ConceptScheme)')
group.add_option('-L', '--label', type='string',
help='Label/title for the vocabulary '
'(usually optional; used to label a ConceptScheme)')
group.add_option('-l', '--default-language', type='string',
help='Language tag to set for labels '
'with no defined language.')
group.add_option('-p', '--preflabel-policy', type='string',
help='Policy for handling multiple prefLabels '
'with the same language tag. '
'Possible values: shortest, longest, all.')
group.add_option('--set-modified', dest="set_modified",
action="store_true",
help='Set modification date on the ConceptScheme')
group.add_option('--no-set-modified', dest="set_modified",
action="store_false",
help="Don't set modification date on the ConceptScheme")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Vocabulary Structure Options")
group.add_option('-E', '--mark-top-concepts', action="store_true",
help='Mark top-level concepts in the hierarchy '
'as top concepts (entry points).')
group.add_option('-e', '--no-mark-top-concepts',
dest="mark_top_concepts", action="store_false",
help="Don't mark top-level concepts in the hierarchy "
"as top concepts.")
group.add_option('-N', '--narrower', action="store_true",
help='Include narrower/narrowerGeneric/narrowerPartitive '
'relationships in the output vocabulary.')
group.add_option('-n', '--no-narrower',
dest="narrower", action="store_false",
help="Don't include "
"narrower/narrowerGeneric/narrowerPartitive "
"relationships in the output vocabulary.")
group.add_option('-T', '--transitive', action="store_true",
help='Include transitive hierarchy relationships '
'in the output vocabulary.')
group.add_option('-t', '--no-transitive',
dest="transitive", action="store_false",
help="Don't include transitive hierarchy relationships "
"in the output vocabulary.")
group.add_option('-M', '--enrich-mappings', action="store_true",
help='Perform SKOS enrichments on mapping relationships.')
group.add_option('-m', '--no-enrich-mappings', dest="enrich_mappings",
action="store_false",
help="Don't perform SKOS enrichments "
"on mapping relationships.")
group.add_option('-A', '--aggregates', action="store_true",
help='Keep AggregateConcepts completely '
'in the output vocabulary.')
group.add_option('-a', '--no-aggregates',
dest="aggregates", action="store_false",
help='Remove AggregateConcepts completely '
'from the output vocabulary.')
group.add_option('-R', '--keep-related', action="store_true",
help="Keep skos:related relationships "
"within the same hierarchy.")
group.add_option('-r', '--no-keep-related',
dest="keep_related", action="store_false",
help="Remove skos:related relationships "
"within the same hierarchy.")
group.add_option('-B', '--break-cycles', action="store_true",
help="Break any cycles in the skos:broader hierarchy.")
group.add_option('-b', '--no-break-cycles', dest="break_cycles",
action="store_false",
help="Don't break cycles in the skos:broader hierarchy.")
group.add_option('--eliminate-redundancy', action="store_true",
help="Eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
group.add_option('--no-eliminate-redundancy', dest="eliminate_redundancy",
action="store_false",
help="Don't eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Cleanup Options")
group.add_option('--cleanup-classes', action="store_true",
help="Remove definitions of classes with no instances.")
group.add_option('--no-cleanup-classes', dest='cleanup_classes',
action="store_false",
help="Don't remove definitions "
"of classes with no instances.")
group.add_option('--cleanup-properties', action="store_true",
help="Remove definitions of properties "
"which have not been used.")
group.add_option('--no-cleanup-properties', action="store_false",
dest='cleanup_properties',
help="Don't remove definitions of properties "
"which have not been used.")
group.add_option('--cleanup-unreachable', action="store_true",
help="Remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
group.add_option('--no-cleanup-unreachable', action="store_false",
dest='cleanup_unreachable',
help="Don't remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
parser.add_option_group(group)
return parser | Create and return an OptionParser with the given defaults. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/cli.py#L15-L173 |
NatLibFi/Skosify | skosify/cli.py | main | def main():
"""Read command line parameters and make a transform based on them."""
config = Config()
# additional options for command line client only
defaults = vars(config)
defaults['to_format'] = None
defaults['output'] = '-'
defaults['log'] = None
defaults['debug'] = False
options, remainingArgs = get_option_parser(defaults).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
# configure logging, messages to stderr by default
logformat = '%(levelname)s: %(message)s'
loglevel = logging.INFO
if options.debug:
loglevel = logging.DEBUG
if options.log:
logging.basicConfig(filename=options.log,
format=logformat, level=loglevel)
else:
logging.basicConfig(format=logformat, level=loglevel)
output = options.output
to_format = options.to_format
# read config file as defaults and override from command line arguments
if options.config is not None:
config.read_and_parse_config_file(options.config)
options, remainingArgs = get_option_parser(vars(config)).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
if remainingArgs:
inputfiles = remainingArgs
else:
inputfiles = ['-']
voc = skosify(*inputfiles, **vars(config))
write_rdf(voc, output, to_format) | python | def main():
"""Read command line parameters and make a transform based on them."""
config = Config()
# additional options for command line client only
defaults = vars(config)
defaults['to_format'] = None
defaults['output'] = '-'
defaults['log'] = None
defaults['debug'] = False
options, remainingArgs = get_option_parser(defaults).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
# configure logging, messages to stderr by default
logformat = '%(levelname)s: %(message)s'
loglevel = logging.INFO
if options.debug:
loglevel = logging.DEBUG
if options.log:
logging.basicConfig(filename=options.log,
format=logformat, level=loglevel)
else:
logging.basicConfig(format=logformat, level=loglevel)
output = options.output
to_format = options.to_format
# read config file as defaults and override from command line arguments
if options.config is not None:
config.read_and_parse_config_file(options.config)
options, remainingArgs = get_option_parser(vars(config)).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
if remainingArgs:
inputfiles = remainingArgs
else:
inputfiles = ['-']
voc = skosify(*inputfiles, **vars(config))
write_rdf(voc, output, to_format) | Read command line parameters and make a transform based on them. | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/cli.py#L176-L221 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.set_handler | def set_handler(self, handler):
"""
Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None
"""
if self._handler:
raise Exception('Handler was already set')
if handler:
self._handler = async_task(handler, loop=self._loop) | python | def set_handler(self, handler):
"""
Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None
"""
if self._handler:
raise Exception('Handler was already set')
if handler:
self._handler = async_task(handler, loop=self._loop) | Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L371-L383 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.start_request | def start_request(self, headers, *, end_stream=False):
"""
Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication.
"""
yield from _wait_for_events(self._resumed, self._stream_creatable)
stream_id = self._conn.get_next_available_stream_id()
self._priority.insert_stream(stream_id)
self._priority.block(stream_id)
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
return stream_id | python | def start_request(self, headers, *, end_stream=False):
"""
Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication.
"""
yield from _wait_for_events(self._resumed, self._stream_creatable)
stream_id = self._conn.get_next_available_stream_id()
self._priority.insert_stream(stream_id)
self._priority.block(stream_id)
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
return stream_id | Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L389-L413 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.start_response | def start_response(self, stream_id, headers, *, end_stream=False):
"""
Start a response by sending given headers on the given stream.
This may block until the underlying transport becomes writable.
:param stream_id: Which stream to send response on.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a response without body, set `end_stream` to
`True` (default `False`).
"""
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush() | python | def start_response(self, stream_id, headers, *, end_stream=False):
"""
Start a response by sending given headers on the given stream.
This may block until the underlying transport becomes writable.
:param stream_id: Which stream to send response on.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a response without body, set `end_stream` to
`True` (default `False`).
"""
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush() | Start a response by sending given headers on the given stream.
This may block until the underlying transport becomes writable.
:param stream_id: Which stream to send response on.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a response without body, set `end_stream` to
`True` (default `False`). | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L416-L429 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.send_data | def send_data(self, stream_id, data, *, end_stream=False):
"""
Send request or response body on the given stream.
This will block until either whole data is sent, or the stream gets
closed. Meanwhile, a paused underlying transport or a closed flow
control window will also help waiting. If the peer increase the flow
control window, this method will start sending automatically.
This can be called multiple times, but it must be called after a
`start_request` or `start_response` with the returning stream ID, and
before any `end_stream` instructions; Otherwise it will fail.
The given data may be automatically split into smaller frames in order
to fit in the configured frame size or flow control window.
Each stream can only have one `send_data` running, others calling this
will be blocked on a per-stream lock (wlock), so that coroutines
sending data concurrently won't mess up with each other.
Similarly, the completion of the call to this method does not mean the
data is delivered.
:param stream_id: Which stream to send data on
:param data: Bytes to send
:param end_stream: To finish sending a request or response, set this to
`True` to close the given stream locally after data
is sent (default `False`).
:raise: `SendException` if there is an error sending data. Data left
unsent can be found in `data` of the exception.
"""
try:
with (yield from self._get_stream(stream_id).wlock):
while True:
yield from _wait_for_events(
self._resumed, self._get_stream(stream_id).window_open)
self._priority.unblock(stream_id)
waiter = asyncio.Future()
if not self._priority_events:
self._loop.call_soon(self._priority_step)
self._priority_events[stream_id] = waiter
try:
yield from waiter
data_size = len(data)
size = min(
data_size,
self._conn.local_flow_control_window(stream_id),
self._conn.max_outbound_frame_size)
if data_size == 0 or size == data_size:
self._conn.send_data(stream_id, data,
end_stream=end_stream)
self._flush()
break
elif size > 0:
self._conn.send_data(stream_id, data[:size])
data = data[size:]
self._flush()
finally:
self._priority_events.pop(stream_id, None)
self._priority.block(stream_id)
if self._priority_events:
self._loop.call_soon(self._priority_step)
except ProtocolError:
raise exceptions.SendException(data) | python | def send_data(self, stream_id, data, *, end_stream=False):
"""
Send request or response body on the given stream.
This will block until either whole data is sent, or the stream gets
closed. Meanwhile, a paused underlying transport or a closed flow
control window will also help waiting. If the peer increase the flow
control window, this method will start sending automatically.
This can be called multiple times, but it must be called after a
`start_request` or `start_response` with the returning stream ID, and
before any `end_stream` instructions; Otherwise it will fail.
The given data may be automatically split into smaller frames in order
to fit in the configured frame size or flow control window.
Each stream can only have one `send_data` running, others calling this
will be blocked on a per-stream lock (wlock), so that coroutines
sending data concurrently won't mess up with each other.
Similarly, the completion of the call to this method does not mean the
data is delivered.
:param stream_id: Which stream to send data on
:param data: Bytes to send
:param end_stream: To finish sending a request or response, set this to
`True` to close the given stream locally after data
is sent (default `False`).
:raise: `SendException` if there is an error sending data. Data left
unsent can be found in `data` of the exception.
"""
try:
with (yield from self._get_stream(stream_id).wlock):
while True:
yield from _wait_for_events(
self._resumed, self._get_stream(stream_id).window_open)
self._priority.unblock(stream_id)
waiter = asyncio.Future()
if not self._priority_events:
self._loop.call_soon(self._priority_step)
self._priority_events[stream_id] = waiter
try:
yield from waiter
data_size = len(data)
size = min(
data_size,
self._conn.local_flow_control_window(stream_id),
self._conn.max_outbound_frame_size)
if data_size == 0 or size == data_size:
self._conn.send_data(stream_id, data,
end_stream=end_stream)
self._flush()
break
elif size > 0:
self._conn.send_data(stream_id, data[:size])
data = data[size:]
self._flush()
finally:
self._priority_events.pop(stream_id, None)
self._priority.block(stream_id)
if self._priority_events:
self._loop.call_soon(self._priority_step)
except ProtocolError:
raise exceptions.SendException(data) | Send request or response body on the given stream.
This will block until either whole data is sent, or the stream gets
closed. Meanwhile, a paused underlying transport or a closed flow
control window will also help waiting. If the peer increase the flow
control window, this method will start sending automatically.
This can be called multiple times, but it must be called after a
`start_request` or `start_response` with the returning stream ID, and
before any `end_stream` instructions; Otherwise it will fail.
The given data may be automatically split into smaller frames in order
to fit in the configured frame size or flow control window.
Each stream can only have one `send_data` running, others calling this
will be blocked on a per-stream lock (wlock), so that coroutines
sending data concurrently won't mess up with each other.
Similarly, the completion of the call to this method does not mean the
data is delivered.
:param stream_id: Which stream to send data on
:param data: Bytes to send
:param end_stream: To finish sending a request or response, set this to
`True` to close the given stream locally after data
is sent (default `False`).
:raise: `SendException` if there is an error sending data. Data left
unsent can be found in `data` of the exception. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L432-L495 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.send_trailers | def send_trailers(self, stream_id, headers):
"""
Send trailers on the given stream, closing the stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to send trailers on.
:param headers: A list of key-value tuples as trailers.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=True)
self._flush() | python | def send_trailers(self, stream_id, headers):
"""
Send trailers on the given stream, closing the stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to send trailers on.
:param headers: A list of key-value tuples as trailers.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=True)
self._flush() | Send trailers on the given stream, closing the stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to send trailers on.
:param headers: A list of key-value tuples as trailers. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L498-L511 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.end_stream | def end_stream(self, stream_id):
"""
Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.end_stream(stream_id)
self._flush() | python | def end_stream(self, stream_id):
"""
Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.end_stream(stream_id)
self._flush() | Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L514-L526 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.read_stream | def read_stream(self, stream_id, size=None):
"""
Read data from the given stream.
By default (`size=None`), this returns all data left in current HTTP/2
frame. In other words, default behavior is to receive frame by frame.
If size is given a number above zero, method will try to return as much
bytes as possible up to the given size, block until enough bytes are
ready or stream is remotely closed.
If below zero, it will read until the stream is remotely closed and
return everything at hand.
`size=0` is a special case that does nothing but returns `b''`. The
same result `b''` is also returned under other conditions if there is
no more data on the stream to receive, even under `size=None` and peer
sends an empty frame - you can use `b''` to safely identify the end of
the given stream.
Flow control frames will be automatically sent while reading clears the
buffer, allowing more data to come in.
:param stream_id: Stream to read
:param size: Expected size to read, `-1` for all, default frame.
:return: Bytes read or empty if there is no more to expect.
"""
rv = []
try:
with (yield from self._get_stream(stream_id).rlock):
if size is None:
rv.append((
yield from self._get_stream(stream_id).read_frame()))
self._flow_control(stream_id)
elif size < 0:
while True:
rv.extend((
yield from self._get_stream(stream_id).read_all()))
self._flow_control(stream_id)
else:
while size > 0:
bufs, count = yield from self._get_stream(
stream_id).read(size)
rv.extend(bufs)
size -= count
self._flow_control(stream_id)
except StreamClosedError:
pass
except _StreamEndedException as e:
try:
self._flow_control(stream_id)
except StreamClosedError:
pass
rv.extend(e.bufs)
return b''.join(rv) | python | def read_stream(self, stream_id, size=None):
"""
Read data from the given stream.
By default (`size=None`), this returns all data left in current HTTP/2
frame. In other words, default behavior is to receive frame by frame.
If size is given a number above zero, method will try to return as much
bytes as possible up to the given size, block until enough bytes are
ready or stream is remotely closed.
If below zero, it will read until the stream is remotely closed and
return everything at hand.
`size=0` is a special case that does nothing but returns `b''`. The
same result `b''` is also returned under other conditions if there is
no more data on the stream to receive, even under `size=None` and peer
sends an empty frame - you can use `b''` to safely identify the end of
the given stream.
Flow control frames will be automatically sent while reading clears the
buffer, allowing more data to come in.
:param stream_id: Stream to read
:param size: Expected size to read, `-1` for all, default frame.
:return: Bytes read or empty if there is no more to expect.
"""
rv = []
try:
with (yield from self._get_stream(stream_id).rlock):
if size is None:
rv.append((
yield from self._get_stream(stream_id).read_frame()))
self._flow_control(stream_id)
elif size < 0:
while True:
rv.extend((
yield from self._get_stream(stream_id).read_all()))
self._flow_control(stream_id)
else:
while size > 0:
bufs, count = yield from self._get_stream(
stream_id).read(size)
rv.extend(bufs)
size -= count
self._flow_control(stream_id)
except StreamClosedError:
pass
except _StreamEndedException as e:
try:
self._flow_control(stream_id)
except StreamClosedError:
pass
rv.extend(e.bufs)
return b''.join(rv) | Read data from the given stream.
By default (`size=None`), this returns all data left in current HTTP/2
frame. In other words, default behavior is to receive frame by frame.
If size is given a number above zero, method will try to return as much
bytes as possible up to the given size, block until enough bytes are
ready or stream is remotely closed.
If below zero, it will read until the stream is remotely closed and
return everything at hand.
`size=0` is a special case that does nothing but returns `b''`. The
same result `b''` is also returned under other conditions if there is
no more data on the stream to receive, even under `size=None` and peer
sends an empty frame - you can use `b''` to safely identify the end of
the given stream.
Flow control frames will be automatically sent while reading clears the
buffer, allowing more data to come in.
:param stream_id: Stream to read
:param size: Expected size to read, `-1` for all, default frame.
:return: Bytes read or empty if there is no more to expect. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L561-L615 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.wait_functional | def wait_functional(self):
"""
Wait until the connection becomes functional.
The connection is count functional if it was active within last few
seconds (defined by `functional_timeout`), where a newly-made
connection and received data indicate activeness.
:return: Most recently calculated round-trip time if any.
"""
while not self._is_functional():
self._rtt = None
self._ping_index += 1
self._ping_time = self._loop.time()
self._conn.ping(struct.pack('Q', self._ping_index))
self._flush()
try:
yield from asyncio.wait_for(self._functional.wait(),
self._functional_timeout)
except asyncio.TimeoutError:
pass
return self._rtt | python | def wait_functional(self):
"""
Wait until the connection becomes functional.
The connection is count functional if it was active within last few
seconds (defined by `functional_timeout`), where a newly-made
connection and received data indicate activeness.
:return: Most recently calculated round-trip time if any.
"""
while not self._is_functional():
self._rtt = None
self._ping_index += 1
self._ping_time = self._loop.time()
self._conn.ping(struct.pack('Q', self._ping_index))
self._flush()
try:
yield from asyncio.wait_for(self._functional.wait(),
self._functional_timeout)
except asyncio.TimeoutError:
pass
return self._rtt | Wait until the connection becomes functional.
The connection is count functional if it was active within last few
seconds (defined by `functional_timeout`), where a newly-made
connection and received data indicate activeness.
:return: Most recently calculated round-trip time if any. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L622-L643 |
decentfox/aioh2 | aioh2/protocol.py | H2Protocol.reprioritize | def reprioritize(self, stream_id,
depends_on=None, weight=16, exclusive=False):
"""
Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent.
"""
self._priority.reprioritize(stream_id, depends_on, weight, exclusive) | python | def reprioritize(self, stream_id,
depends_on=None, weight=16, exclusive=False):
"""
Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent.
"""
self._priority.reprioritize(stream_id, depends_on, weight, exclusive) | Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent. | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L645-L658 |
muccg/django-iprestrict | iprestrict/decorators.py | superuser_required | def superuser_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url='admin:login'):
"""
Decorator for views that checks that the user is logged in and is a superuser
member, redirecting to the login page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_superuser,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator | python | def superuser_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url='admin:login'):
"""
Decorator for views that checks that the user is logged in and is a superuser
member, redirecting to the login page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_superuser,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator | Decorator for views that checks that the user is logged in and is a superuser
member, redirecting to the login page if necessary. | https://github.com/muccg/django-iprestrict/blob/f5ea013b7b856866c7164df146f1e205772677db/iprestrict/decorators.py#L7-L20 |
henriquebastos/django-aggregate-if | aggregate_if.py | SqlAggregate._condition_as_sql | def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
def escape(value):
if isinstance(value, bool):
value = str(int(value))
if isinstance(value, six.string_types):
# Escape params used with LIKE
if '%' in value:
value = value.replace('%', '%%')
# Escape single quotes
if "'" in value:
value = value.replace("'", "''")
# Add single quote to text values
value = "'" + value + "'"
return value
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param) | python | def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
def escape(value):
if isinstance(value, bool):
value = str(int(value))
if isinstance(value, six.string_types):
# Escape params used with LIKE
if '%' in value:
value = value.replace('%', '%%')
# Escape single quotes
if "'" in value:
value = value.replace("'", "''")
# Add single quote to text values
value = "'" + value + "'"
return value
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param) | Return sql for condition. | https://github.com/henriquebastos/django-aggregate-if/blob/588c1487bc88a8996d4ee9c2c9d50fa4a4484872/aggregate_if.py#L54-L75 |
karimbahgat/Pytess | pytess/tesselator.py | computeVoronoiDiagram | def computeVoronoiDiagram(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a 3-tuple of:
(1) a list of 2-tuples, which are the x,y coordinates of the
Voronoi diagram vertices
(2) a list of 3-tuples (a,b,c) which are the equations of the
lines in the Voronoi diagram: a*x + b*y = c
(3) a list of 3-tuples, (l, v1, v2) representing edges of the
Voronoi diagram. l is the index of the line, v1 and v2 are
the indices of the vetices at the end of the edge. If
v1 or v2 is -1, the line extends to infinity.
"""
siteList = SiteList(points)
context = Context()
voronoi(siteList,context)
return (context.vertices, context.edges, context.polygons) | python | def computeVoronoiDiagram(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a 3-tuple of:
(1) a list of 2-tuples, which are the x,y coordinates of the
Voronoi diagram vertices
(2) a list of 3-tuples (a,b,c) which are the equations of the
lines in the Voronoi diagram: a*x + b*y = c
(3) a list of 3-tuples, (l, v1, v2) representing edges of the
Voronoi diagram. l is the index of the line, v1 and v2 are
the indices of the vetices at the end of the edge. If
v1 or v2 is -1, the line extends to infinity.
"""
siteList = SiteList(points)
context = Context()
voronoi(siteList,context)
return (context.vertices, context.edges, context.polygons) | Takes a list of point objects (which must have x and y fields).
Returns a 3-tuple of:
(1) a list of 2-tuples, which are the x,y coordinates of the
Voronoi diagram vertices
(2) a list of 3-tuples (a,b,c) which are the equations of the
lines in the Voronoi diagram: a*x + b*y = c
(3) a list of 3-tuples, (l, v1, v2) representing edges of the
Voronoi diagram. l is the index of the line, v1 and v2 are
the indices of the vetices at the end of the edge. If
v1 or v2 is -1, the line extends to infinity. | https://github.com/karimbahgat/Pytess/blob/026d0c6bfc281361d850d9a44af1da6fed45b170/pytess/tesselator.py#L772-L788 |
karimbahgat/Pytess | pytess/tesselator.py | computeDelaunayTriangulation | def computeDelaunayTriangulation(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a list of 3-tuples: the indices of the points that form a
Delaunay triangle.
"""
siteList = SiteList(points)
context = Context()
context.triangulate = True
voronoi(siteList,context)
return context.triangles | python | def computeDelaunayTriangulation(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a list of 3-tuples: the indices of the points that form a
Delaunay triangle.
"""
siteList = SiteList(points)
context = Context()
context.triangulate = True
voronoi(siteList,context)
return context.triangles | Takes a list of point objects (which must have x and y fields).
Returns a list of 3-tuples: the indices of the points that form a
Delaunay triangle. | https://github.com/karimbahgat/Pytess/blob/026d0c6bfc281361d850d9a44af1da6fed45b170/pytess/tesselator.py#L791-L800 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | build_messages_metrics | def build_messages_metrics(messages):
"""Build reports's metrics"""
count_types = collections.Counter(
line.get('type') or None
for line in messages)
count_modules = collections.Counter(
line.get('module') or None
for line in messages)
count_symbols = collections.Counter(
line.get('symbol') or None
for line in messages)
count_paths = collections.Counter(
line.get('path') or None
for line in messages)
return {
'types': count_types,
'modules': count_modules,
'symbols': count_symbols,
'paths': count_paths,
} | python | def build_messages_metrics(messages):
"""Build reports's metrics"""
count_types = collections.Counter(
line.get('type') or None
for line in messages)
count_modules = collections.Counter(
line.get('module') or None
for line in messages)
count_symbols = collections.Counter(
line.get('symbol') or None
for line in messages)
count_paths = collections.Counter(
line.get('path') or None
for line in messages)
return {
'types': count_types,
'modules': count_modules,
'symbols': count_symbols,
'paths': count_paths,
} | Build reports's metrics | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L26-L46 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | build_messages_modules | def build_messages_modules(messages):
"""Build and yield sorted list of messages per module.
:param list messages: List of dict of messages
:return: Tuple of 2 values: first is the module info, second is the list
of messages sorted by line number
"""
data = collections.defaultdict(list)
for line in messages:
module_name = line.get('module')
module_path = line.get('path')
module_info = ModuleInfo(
module_name,
module_path,
)
data[module_info].append(line)
for module, module_messages in data.items():
yield (
module,
sorted(module_messages, key=lambda x: x.get('line'))) | python | def build_messages_modules(messages):
"""Build and yield sorted list of messages per module.
:param list messages: List of dict of messages
:return: Tuple of 2 values: first is the module info, second is the list
of messages sorted by line number
"""
data = collections.defaultdict(list)
for line in messages:
module_name = line.get('module')
module_path = line.get('path')
module_info = ModuleInfo(
module_name,
module_path,
)
data[module_info].append(line)
for module, module_messages in data.items():
yield (
module,
sorted(module_messages, key=lambda x: x.get('line'))) | Build and yield sorted list of messages per module.
:param list messages: List of dict of messages
:return: Tuple of 2 values: first is the module info, second is the list
of messages sorted by line number | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L49-L69 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | stats_evaluation | def stats_evaluation(stats):
"""Generate an evaluation for the given pylint ``stats``."""
statement = stats.get('statement')
error = stats.get('error', 0)
warning = stats.get('warning', 0)
refactor = stats.get('refactor', 0)
convention = stats.get('convention', 0)
if not statement or statement <= 0:
return None
malus = float(5 * error + warning + refactor + convention)
malus_ratio = malus / statement
return 10.0 - (malus_ratio * 10) | python | def stats_evaluation(stats):
"""Generate an evaluation for the given pylint ``stats``."""
statement = stats.get('statement')
error = stats.get('error', 0)
warning = stats.get('warning', 0)
refactor = stats.get('refactor', 0)
convention = stats.get('convention', 0)
if not statement or statement <= 0:
return None
malus = float(5 * error + warning + refactor + convention)
malus_ratio = malus / statement
return 10.0 - (malus_ratio * 10) | Generate an evaluation for the given pylint ``stats``. | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L72-L85 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | build_command_parser | def build_command_parser():
"""Build command parser using ``argparse`` module."""
parser = argparse.ArgumentParser(
description='Transform Pylint JSON report to HTML')
parser.add_argument(
'filename',
metavar='FILENAME',
type=argparse.FileType('r'),
nargs='?',
default=sys.stdin,
help='Pylint JSON report input file (or stdin)')
parser.add_argument(
'-o', '--output',
metavar='FILENAME',
type=argparse.FileType('w'),
default=sys.stdout,
help='Pylint HTML report output file (or stdout)')
parser.add_argument(
'-f', '--input-format',
metavar='FORMAT',
choices=[SIMPLE_JSON, EXTENDED_JSON],
action='store',
dest='input_format',
default='json',
help='Pylint JSON Report input type (json or jsonextended)')
return parser | python | def build_command_parser():
"""Build command parser using ``argparse`` module."""
parser = argparse.ArgumentParser(
description='Transform Pylint JSON report to HTML')
parser.add_argument(
'filename',
metavar='FILENAME',
type=argparse.FileType('r'),
nargs='?',
default=sys.stdin,
help='Pylint JSON report input file (or stdin)')
parser.add_argument(
'-o', '--output',
metavar='FILENAME',
type=argparse.FileType('w'),
default=sys.stdout,
help='Pylint HTML report output file (or stdout)')
parser.add_argument(
'-f', '--input-format',
metavar='FORMAT',
choices=[SIMPLE_JSON, EXTENDED_JSON],
action='store',
dest='input_format',
default='json',
help='Pylint JSON Report input type (json or jsonextended)')
return parser | Build command parser using ``argparse`` module. | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L207-L233 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | main | def main():
"""Pylint JSON to HTML Main Entry Point"""
parser = build_command_parser()
options = parser.parse_args()
file_pointer = options.filename
input_format = options.input_format
with file_pointer:
json_data = json.load(file_pointer)
if input_format == SIMPLE_JSON:
report = Report(json_data)
elif input_format == EXTENDED_JSON:
report = Report(
json_data.get('messages'),
json_data.get('stats'),
json_data.get('previous'))
print(report.render(), file=options.output) | python | def main():
"""Pylint JSON to HTML Main Entry Point"""
parser = build_command_parser()
options = parser.parse_args()
file_pointer = options.filename
input_format = options.input_format
with file_pointer:
json_data = json.load(file_pointer)
if input_format == SIMPLE_JSON:
report = Report(json_data)
elif input_format == EXTENDED_JSON:
report = Report(
json_data.get('messages'),
json_data.get('stats'),
json_data.get('previous'))
print(report.render(), file=options.output) | Pylint JSON to HTML Main Entry Point | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L236-L254 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | Report.render | def render(self):
"""Render report to HTML"""
template = self.get_template()
return template.render(
messages=self._messages,
metrics=self.metrics,
report=self) | python | def render(self):
"""Render report to HTML"""
template = self.get_template()
return template.render(
messages=self._messages,
metrics=self.metrics,
report=self) | Render report to HTML | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L118-L124 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | JsonExtendedReporter.handle_message | def handle_message(self, msg):
"""Store new message for later use.
.. seealso:: :meth:`~JsonExtendedReporter.on_close`
"""
self._messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
'message': str(msg.msg) or '',
'message-id': msg.msg_id,
}) | python | def handle_message(self, msg):
"""Store new message for later use.
.. seealso:: :meth:`~JsonExtendedReporter.on_close`
"""
self._messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
'message': str(msg.msg) or '',
'message-id': msg.msg_id,
}) | Store new message for later use.
.. seealso:: :meth:`~JsonExtendedReporter.on_close` | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L157-L172 |
Exirel/pylint-json2html | pylint_json2html/__init__.py | JsonExtendedReporter.on_close | def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) | python | def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) | Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run | https://github.com/Exirel/pylint-json2html/blob/7acdb4b7ea2f82a39a67d8ed3a43839c91cc423b/pylint_json2html/__init__.py#L188-L199 |
karimbahgat/Pytess | pytess/main.py | triangulate | def triangulate(points):
"""
Connects an input list of xy tuples with lines forming a set of
smallest possible Delauney triangles between them.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
Returns:
- A list of triangle polygons. If the input coordinate points contained
a third z value then the output triangles will also have these z values.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Compute Delauney
triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)
# Get vertices from result indexes
triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]
return triangles | python | def triangulate(points):
"""
Connects an input list of xy tuples with lines forming a set of
smallest possible Delauney triangles between them.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
Returns:
- A list of triangle polygons. If the input coordinate points contained
a third z value then the output triangles will also have these z values.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Compute Delauney
triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)
# Get vertices from result indexes
triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]
return triangles | Connects an input list of xy tuples with lines forming a set of
smallest possible Delauney triangles between them.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
Returns:
- A list of triangle polygons. If the input coordinate points contained
a third z value then the output triangles will also have these z values. | https://github.com/karimbahgat/Pytess/blob/026d0c6bfc281361d850d9a44af1da6fed45b170/pytess/main.py#L20-L46 |
karimbahgat/Pytess | pytess/main.py | voronoi | def voronoi(points, buffer_percent=100):
"""
Surrounds each point in an input list of xy tuples with a
unique Voronoi polygon.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
- **buffer_percent** (optional): Controls how much bigger than
the original bbox of the input points to set the bbox of fake points,
used to account for lacking values around the edges (default is 100 percent).
Returns:
- Returns a list of 2-tuples, with the first item in each tuple being the
original input point (or None for each corner of the bounding box buffer),
and the second item being the point's corressponding Voronoi polygon.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Create fake sitepoints around the point extent to correct for infinite polygons
# For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity
xs,ys = list(zip(*uniqpoints))[:2]
pointswidth = max(xs) - min(xs)
pointsheight = max(ys) - min(ys)
xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )
midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )
#bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer
bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer
classpoints.extend([_Point(*corner) for corner in bufferbox])
# Compute Voronoi
vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)
# Turn unordered result edges into ordered polygons
polygons = list()
for sitepoint,polyedges in list(poly_dict.items()):
polyedges = [edge[1:] for edge in polyedges]
poly = list()
firststart,firstend = polyedges.pop(0)
poly.append(firstend)
while polyedges:
curend = poly[-1]
for i,other in enumerate(polyedges):
otherstart,otherend = other
if otherstart == curend:
poly.append(otherend)
##print otherstart,otherend
polyedges.pop(i)
break
elif otherend == curend:
##print otherend,otherstart
poly.append(otherstart)
polyedges.pop(i)
break
# Get vertices from indexes
try: sitepoint = uniqpoints[sitepoint]
except IndexError:
sitepoint = None # fake bbox sitepoints shouldnt be in the results
poly = [vertices[vi] for vi in poly if vi != -1]
polygons.append((sitepoint, poly))
# Maybe clip parts of polygons that stick outside screen?
# ...
return polygons | python | def voronoi(points, buffer_percent=100):
"""
Surrounds each point in an input list of xy tuples with a
unique Voronoi polygon.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
- **buffer_percent** (optional): Controls how much bigger than
the original bbox of the input points to set the bbox of fake points,
used to account for lacking values around the edges (default is 100 percent).
Returns:
- Returns a list of 2-tuples, with the first item in each tuple being the
original input point (or None for each corner of the bounding box buffer),
and the second item being the point's corressponding Voronoi polygon.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Create fake sitepoints around the point extent to correct for infinite polygons
# For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity
xs,ys = list(zip(*uniqpoints))[:2]
pointswidth = max(xs) - min(xs)
pointsheight = max(ys) - min(ys)
xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )
midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )
#bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer
bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer
classpoints.extend([_Point(*corner) for corner in bufferbox])
# Compute Voronoi
vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)
# Turn unordered result edges into ordered polygons
polygons = list()
for sitepoint,polyedges in list(poly_dict.items()):
polyedges = [edge[1:] for edge in polyedges]
poly = list()
firststart,firstend = polyedges.pop(0)
poly.append(firstend)
while polyedges:
curend = poly[-1]
for i,other in enumerate(polyedges):
otherstart,otherend = other
if otherstart == curend:
poly.append(otherend)
##print otherstart,otherend
polyedges.pop(i)
break
elif otherend == curend:
##print otherend,otherstart
poly.append(otherstart)
polyedges.pop(i)
break
# Get vertices from indexes
try: sitepoint = uniqpoints[sitepoint]
except IndexError:
sitepoint = None # fake bbox sitepoints shouldnt be in the results
poly = [vertices[vi] for vi in poly if vi != -1]
polygons.append((sitepoint, poly))
# Maybe clip parts of polygons that stick outside screen?
# ...
return polygons | Surrounds each point in an input list of xy tuples with a
unique Voronoi polygon.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
- **buffer_percent** (optional): Controls how much bigger than
the original bbox of the input points to set the bbox of fake points,
used to account for lacking values around the edges (default is 100 percent).
Returns:
- Returns a list of 2-tuples, with the first item in each tuple being the
original input point (or None for each corner of the bounding box buffer),
and the second item being the point's corressponding Voronoi polygon. | https://github.com/karimbahgat/Pytess/blob/026d0c6bfc281361d850d9a44af1da6fed45b170/pytess/main.py#L48-L117 |
ecordell/pymacaroons | pymacaroons/utils.py | equals | def equals(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 | python | def equals(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 | Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/utils.py#L80-L95 |
ecordell/pymacaroons | pymacaroons/serializers/binary_serializer.py | _encode_uvarint | def _encode_uvarint(data, n):
''' Encodes integer into variable-length format into data.'''
if n < 0:
raise ValueError('only support positive integer')
while True:
this_byte = n & 0x7f
n >>= 7
if n == 0:
data.append(this_byte)
break
data.append(this_byte | 0x80) | python | def _encode_uvarint(data, n):
''' Encodes integer into variable-length format into data.'''
if n < 0:
raise ValueError('only support positive integer')
while True:
this_byte = n & 0x7f
n >>= 7
if n == 0:
data.append(this_byte)
break
data.append(this_byte | 0x80) | Encodes integer into variable-length format into data. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/binary_serializer.py#L301-L311 |
ecordell/pymacaroons | pymacaroons/serializers/binary_serializer.py | BinarySerializer._parse_section_v2 | def _parse_section_v2(self, data):
''' Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
prev_field_type = -1
packets = []
while True:
if len(data) == 0:
raise MacaroonDeserializationException(
'section extends past end of buffer')
rest, packet = self._parse_packet_v2(data)
if packet.field_type == self._EOS:
return rest, packets
if packet.field_type <= prev_field_type:
raise MacaroonDeserializationException('fields out of order')
packets.append(packet)
prev_field_type = packet.field_type
data = rest | python | def _parse_section_v2(self, data):
''' Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
prev_field_type = -1
packets = []
while True:
if len(data) == 0:
raise MacaroonDeserializationException(
'section extends past end of buffer')
rest, packet = self._parse_packet_v2(data)
if packet.field_type == self._EOS:
return rest, packets
if packet.field_type <= prev_field_type:
raise MacaroonDeserializationException('fields out of order')
packets.append(packet)
prev_field_type = packet.field_type
data = rest | Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2 | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/binary_serializer.py#L249-L272 |
ecordell/pymacaroons | pymacaroons/serializers/binary_serializer.py | BinarySerializer._parse_packet_v2 | def _parse_packet_v2(self, data):
''' Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
ft, n = _decode_uvarint(data)
data = data[n:]
if ft == self._EOS:
return data, PacketV2(ft, None)
payload_len, n = _decode_uvarint(data)
data = data[n:]
if payload_len > len(data):
raise MacaroonDeserializationException(
'field data extends past end of buffer')
return data[payload_len:], PacketV2(ft, data[0:payload_len]) | python | def _parse_packet_v2(self, data):
''' Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
ft, n = _decode_uvarint(data)
data = data[n:]
if ft == self._EOS:
return data, PacketV2(ft, None)
payload_len, n = _decode_uvarint(data)
data = data[n:]
if payload_len > len(data):
raise MacaroonDeserializationException(
'field data extends past end of buffer')
return data[payload_len:], PacketV2(ft, data[0:payload_len]) | Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2 | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/binary_serializer.py#L274-L298 |
ecordell/pymacaroons | pymacaroons/macaroon.py | Macaroon.prepare_for_request | def prepare_for_request(self, discharge_macaroon):
''' Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon
'''
protected = discharge_macaroon.copy()
return HashSignaturesBinder(self).bind(protected) | python | def prepare_for_request(self, discharge_macaroon):
''' Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon
'''
protected = discharge_macaroon.copy()
return HashSignaturesBinder(self).bind(protected) | Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/macaroon.py#L129-L139 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | _caveat_v1_to_dict | def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized | python | def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized | Return a caveat as a dictionary for export as the JSON
macaroon v1 format. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L128-L140 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | _caveat_v2_to_dict | def _caveat_v2_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
'''
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i')
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v')
if c.location:
serialized['l'] = c.location
return serialized | python | def _caveat_v2_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
'''
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i')
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v')
if c.location:
serialized['l'] = c.location
return serialized | Return a caveat as a dictionary for export as the JSON
macaroon v2 format. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L143-L154 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | _add_json_binary_field | def _add_json_binary_field(b, serialized, field):
''' Set the given field to the given val (a bytearray) in the serialized
dictionary.
If the value isn't valid utf-8, we base64 encode it and use field+"64"
as the field name.
'''
try:
val = b.decode("utf-8")
serialized[field] = val
except UnicodeDecodeError:
val = utils.raw_urlsafe_b64encode(b).decode('utf-8')
serialized[field + '64'] = val | python | def _add_json_binary_field(b, serialized, field):
''' Set the given field to the given val (a bytearray) in the serialized
dictionary.
If the value isn't valid utf-8, we base64 encode it and use field+"64"
as the field name.
'''
try:
val = b.decode("utf-8")
serialized[field] = val
except UnicodeDecodeError:
val = utils.raw_urlsafe_b64encode(b).decode('utf-8')
serialized[field + '64'] = val | Set the given field to the given val (a bytearray) in the serialized
dictionary.
If the value isn't valid utf-8, we base64 encode it and use field+"64"
as the field name. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L157-L169 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | _read_json_binary_field | def _read_json_binary_field(deserialized, field):
''' Read the value of a JSON field that may be string or base64-encoded.
'''
val = deserialized.get(field)
if val is not None:
return utils.convert_to_bytes(val)
val = deserialized.get(field + '64')
if val is None:
return None
return utils.raw_urlsafe_b64decode(val) | python | def _read_json_binary_field(deserialized, field):
''' Read the value of a JSON field that may be string or base64-encoded.
'''
val = deserialized.get(field)
if val is not None:
return utils.convert_to_bytes(val)
val = deserialized.get(field + '64')
if val is None:
return None
return utils.raw_urlsafe_b64decode(val) | Read the value of a JSON field that may be string or base64-encoded. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L172-L181 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer.serialize | def serialize(self, m):
'''Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
from pymacaroons import macaroon
if m.version == macaroon.MACAROON_V1:
return self._serialize_v1(m)
return self._serialize_v2(m) | python | def serialize(self, m):
'''Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
from pymacaroons import macaroon
if m.version == macaroon.MACAROON_V1:
return self._serialize_v1(m)
return self._serialize_v2(m) | Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L9-L18 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.