body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def copy(self): '\n Return a copy of the dictionary.\n\n Lazy keys are not evaluated in the original or copied dictionary.\n ' dic = self.__class__(self._dic.copy()) for (key, value_callable) in self._lazyload.items(): dic.set_lazy(key, value_callable) return dic
732,852,584,380,812,800
Return a copy of the dictionary. Lazy keys are not evaluated in the original or copied dictionary.
pycwr/configure/pyart_lazydict.py
copy
1271756664/study
python
def copy(self): '\n Return a copy of the dictionary.\n\n Lazy keys are not evaluated in the original or copied dictionary.\n ' dic = self.__class__(self._dic.copy()) for (key, value_callable) in self._lazyload.items(): dic.set_lazy(key, value_callable) return dic
def set_lazy(self, key, value_callable): ' Set a lazy key to load from a callable object. ' if (key in self._dic): del self._dic[key] self._lazyload[key] = value_callable
-5,685,701,642,325,754,000
Set a lazy key to load from a callable object.
pycwr/configure/pyart_lazydict.py
set_lazy
1271756664/study
python
def set_lazy(self, key, value_callable): ' ' if (key in self._dic): del self._dic[key] self._lazyload[key] = value_callable
def runCase(case_name): '\n\t\n\t:param case_name:\n\t:return: 注意格式 xxx(case_name)()\n\t' return api_route.get(case_name)()
4,925,550,948,916,173,000
:param case_name: :return: 注意格式 xxx(case_name)()
apis/router.py
runCase
DerWalundDieKatze/Yumekui
python
def runCase(case_name): '\n\t\n\t:param case_name:\n\t:return: 注意格式 xxx(case_name)()\n\t' return api_route.get(case_name)()
def setTestStepResult(self, step_number, result: ResultType, comment=None): '"\n Set the result of a test step\n\n :param step_number: Step number\n :param result: The result fo the test step\n :param comment: An optional comment\n ' if (self.testStepResults is None): service = self._polarion.getService('TestManagement') test_steps = service.getTestSteps(self.testCaseURI) number_of_steps = 0 if (test_steps.steps is not None): number_of_steps = len(test_steps.steps.TestStep) self.testStepResults = self._polarion.ArrayOfTestStepResultType() for _i in range(number_of_steps): self.testStepResults.TestStepResult.append(self._polarion.TestStepResultType()) if (step_number < len(self.testStepResults.TestStepResult)): self.testStepResults.TestStepResult[step_number].result = self._polarion.EnumOptionIdType(id=result.value) if (comment is not None): self.testStepResults.TestStepResult[step_number].comment = self._polarion.TextType(content=comment, type='text/html', contentLossy=False) self.save()
5,744,355,366,811,355,000
" Set the result of a test step :param step_number: Step number :param result: The result fo the test step :param comment: An optional comment
polarion/record.py
setTestStepResult
jesper-raemaekers/python-polarion
python
def setTestStepResult(self, step_number, result: ResultType, comment=None): '"\n Set the result of a test step\n\n :param step_number: Step number\n :param result: The result fo the test step\n :param comment: An optional comment\n ' if (self.testStepResults is None): service = self._polarion.getService('TestManagement') test_steps = service.getTestSteps(self.testCaseURI) number_of_steps = 0 if (test_steps.steps is not None): number_of_steps = len(test_steps.steps.TestStep) self.testStepResults = self._polarion.ArrayOfTestStepResultType() for _i in range(number_of_steps): self.testStepResults.TestStepResult.append(self._polarion.TestStepResultType()) if (step_number < len(self.testStepResults.TestStepResult)): self.testStepResults.TestStepResult[step_number].result = self._polarion.EnumOptionIdType(id=result.value) if (comment is not None): self.testStepResults.TestStepResult[step_number].comment = self._polarion.TextType(content=comment, type='text/html', contentLossy=False) self.save()
def getResult(self): '\n Get the test result of this record\n\n :return: The test case result\n :rtype: ResultType\n ' if (self.result is not None): return self.ResultType(self.result.id) return self.ResultType.No
-5,206,828,231,075,465,000
Get the test result of this record :return: The test case result :rtype: ResultType
polarion/record.py
getResult
jesper-raemaekers/python-polarion
python
def getResult(self): '\n Get the test result of this record\n\n :return: The test case result\n :rtype: ResultType\n ' if (self.result is not None): return self.ResultType(self.result.id) return self.ResultType.No
def getComment(self): '\n Get a comment if available. The comment may contain HTML if edited in Polarion!\n\n :return: Get the comment, may contain HTML\n :rtype: string\n ' if (self.comment is not None): return self.comment.content return None
7,513,077,603,057,175,000
Get a comment if available. The comment may contain HTML if edited in Polarion! :return: Get the comment, may contain HTML :rtype: string
polarion/record.py
getComment
jesper-raemaekers/python-polarion
python
def getComment(self): '\n Get a comment if available. The comment may contain HTML if edited in Polarion!\n\n :return: Get the comment, may contain HTML\n :rtype: string\n ' if (self.comment is not None): return self.comment.content return None
@property def testcase_id(self): '\n The test case name including prefix\n ' return self._testcase_name
7,761,441,519,266,238,000
The test case name including prefix
polarion/record.py
testcase_id
jesper-raemaekers/python-polarion
python
@property def testcase_id(self): '\n \n ' return self._testcase_name
def getTestCaseName(self): '\n Get the test case name including prefix\n\n :return: The name\n :rtype: string\n ' return self._testcase_name
3,886,200,417,914,788,000
Get the test case name including prefix :return: The name :rtype: string
polarion/record.py
getTestCaseName
jesper-raemaekers/python-polarion
python
def getTestCaseName(self): '\n Get the test case name including prefix\n\n :return: The name\n :rtype: string\n ' return self._testcase_name
def setComment(self, comment): '\n tries to get the severity enum of this workitem type\n When it fails to get it, the list will be empty\n\n :param comment: Comment string, may contain HTML\n ' self.comment = self._polarion.TextType(content=comment, type='text/html', contentLossy=False)
-6,355,790,804,026,165,000
tries to get the severity enum of this workitem type When it fails to get it, the list will be empty :param comment: Comment string, may contain HTML
polarion/record.py
setComment
jesper-raemaekers/python-polarion
python
def setComment(self, comment): '\n tries to get the severity enum of this workitem type\n When it fails to get it, the list will be empty\n\n :param comment: Comment string, may contain HTML\n ' self.comment = self._polarion.TextType(content=comment, type='text/html', contentLossy=False)
def setResult(self, result: ResultType=ResultType.FAILED, comment=None): '\n Set the result of this record and save it.\n\n :param result: The result of this record\n :param comment: Comment string, may contain HTML\n ' if (comment is not None): self.setComment(comment) if (self.result is not None): self.result.id = result.value else: self.result = self._polarion.EnumOptionIdType(id=result.value) self.save()
-2,210,465,079,516,604,400
Set the result of this record and save it. :param result: The result of this record :param comment: Comment string, may contain HTML
polarion/record.py
setResult
jesper-raemaekers/python-polarion
python
def setResult(self, result: ResultType=ResultType.FAILED, comment=None): '\n Set the result of this record and save it.\n\n :param result: The result of this record\n :param comment: Comment string, may contain HTML\n ' if (comment is not None): self.setComment(comment) if (self.result is not None): self.result.id = result.value else: self.result = self._polarion.EnumOptionIdType(id=result.value) self.save()
def getExecutingUser(self): '\n Gets the executing user if the test was executed\n\n :return: The user\n :rtype: User/None\n ' if (self.executedByURI is not None): return createFromUri(self._polarion, None, self.executedByURI) return None
-5,531,351,703,660,311,000
Gets the executing user if the test was executed :return: The user :rtype: User/None
polarion/record.py
getExecutingUser
jesper-raemaekers/python-polarion
python
def getExecutingUser(self): '\n Gets the executing user if the test was executed\n\n :return: The user\n :rtype: User/None\n ' if (self.executedByURI is not None): return createFromUri(self._polarion, None, self.executedByURI) return None
def hasAttachment(self): '\n Checks if the Record has attachments\n\n :return: True/False\n :rtype: boolean\n ' if (self.attachments is not None): return True return False
7,877,481,670,489,581,000
Checks if the Record has attachments :return: True/False :rtype: boolean
polarion/record.py
hasAttachment
jesper-raemaekers/python-polarion
python
def hasAttachment(self): '\n Checks if the Record has attachments\n\n :return: True/False\n :rtype: boolean\n ' if (self.attachments is not None): return True return False
def getAttachment(self, file_name): '\n Get the attachment data\n\n :param file_name: The attachment file name\n :return: list of bytes\n :rtype: bytes[]\n ' url = None for attachment in self.attachments.TestRunAttachment: if (attachment.fileName == file_name): url = attachment.url if (url is not None): resp = requests.get(url, auth=(self._polarion.user, self._polarion.password)) if resp.ok: return resp.content else: raise Exception(f'Could not download attachment {file_name}') else: raise Exception(f'Could not find attachment with name {file_name}')
8,589,317,731,755,186,000
Get the attachment data :param file_name: The attachment file name :return: list of bytes :rtype: bytes[]
polarion/record.py
getAttachment
jesper-raemaekers/python-polarion
python
def getAttachment(self, file_name): '\n Get the attachment data\n\n :param file_name: The attachment file name\n :return: list of bytes\n :rtype: bytes[]\n ' url = None for attachment in self.attachments.TestRunAttachment: if (attachment.fileName == file_name): url = attachment.url if (url is not None): resp = requests.get(url, auth=(self._polarion.user, self._polarion.password)) if resp.ok: return resp.content else: raise Exception(f'Could not download attachment {file_name}') else: raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentAsFile(self, file_name, file_path): '\n Save an attachment to file.\n\n :param file_name: The attachment file name\n :param file_path: File where to save the attachment\n ' bin = self.getAttachment(file_name) with open(file_path, 'wb') as file: file.write(bin)
6,366,685,034,168,646,000
Save an attachment to file. :param file_name: The attachment file name :param file_path: File where to save the attachment
polarion/record.py
saveAttachmentAsFile
jesper-raemaekers/python-polarion
python
def saveAttachmentAsFile(self, file_name, file_path): '\n Save an attachment to file.\n\n :param file_name: The attachment file name\n :param file_path: File where to save the attachment\n ' bin = self.getAttachment(file_name) with open(file_path, 'wb') as file: file.write(bin)
def deleteAttachment(self, file_name): '\n Delete an attachment.\n\n :param file_name: The attachment file name\n ' service = self._polarion.getService('TestManagement') service.deleteAttachmentFromTestRecord(self._test_run.uri, self._index, file_name) self._reloadFromPolarion()
-7,794,299,231,687,753,000
Delete an attachment. :param file_name: The attachment file name
polarion/record.py
deleteAttachment
jesper-raemaekers/python-polarion
python
def deleteAttachment(self, file_name): '\n Delete an attachment.\n\n :param file_name: The attachment file name\n ' service = self._polarion.getService('TestManagement') service.deleteAttachmentFromTestRecord(self._test_run.uri, self._index, file_name) self._reloadFromPolarion()
def addAttachment(self, file_path, title): '\n Upload an attachment\n\n :param file_path: Source file to upload\n :param title: The title of the attachment\n ' service = self._polarion.getService('TestManagement') file_name = os.path.split(file_path)[1] with open(file_path, 'rb') as file_content: service.addAttachmentToTestRecord(self._test_run.uri, self._index, file_name, title, file_content.read()) self._reloadFromPolarion()
-3,232,457,304,182,577,000
Upload an attachment :param file_path: Source file to upload :param title: The title of the attachment
polarion/record.py
addAttachment
jesper-raemaekers/python-polarion
python
def addAttachment(self, file_path, title): '\n Upload an attachment\n\n :param file_path: Source file to upload\n :param title: The title of the attachment\n ' service = self._polarion.getService('TestManagement') file_name = os.path.split(file_path)[1] with open(file_path, 'rb') as file_content: service.addAttachmentToTestRecord(self._test_run.uri, self._index, file_name, title, file_content.read()) self._reloadFromPolarion()
def testStepHasAttachment(self, step_index): '\n Checks if the a test step has attachments\n\n :param step_index: The test step index\n :return: True/False\n :rtype: boolean\n ' if (self.testStepResults is None): return False if (self.testStepResults.TestStepResult[step_index].attachments is not None): return True return False
1,176,041,346,641,023,000
Checks if the a test step has attachments :param step_index: The test step index :return: True/False :rtype: boolean
polarion/record.py
testStepHasAttachment
jesper-raemaekers/python-polarion
python
def testStepHasAttachment(self, step_index): '\n Checks if the a test step has attachments\n\n :param step_index: The test step index\n :return: True/False\n :rtype: boolean\n ' if (self.testStepResults is None): return False if (self.testStepResults.TestStepResult[step_index].attachments is not None): return True return False
def getAttachmentFromTestStep(self, step_index, file_name): '\n Get the attachment data from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n :return: list of bytes\n :rtype: bytes[]\n ' url = None for attachment in self.testStepResults.TestStepResult[step_index].attachments.TestRunAttachment: if (attachment.fileName == file_name): url = attachment.url if (url is not None): resp = requests.get(url, auth=(self._polarion.user, self._polarion.password)) if resp.ok: return resp.content else: raise Exception(f'Could not download attachment {file_name}') else: raise Exception(f'Could not find attachment with name {file_name}')
4,546,584,508,704,895,500
Get the attachment data from a test step :param step_index: The test step index :param file_name: The attachment file name :return: list of bytes :rtype: bytes[]
polarion/record.py
getAttachmentFromTestStep
jesper-raemaekers/python-polarion
python
def getAttachmentFromTestStep(self, step_index, file_name): '\n Get the attachment data from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n :return: list of bytes\n :rtype: bytes[]\n ' url = None for attachment in self.testStepResults.TestStepResult[step_index].attachments.TestRunAttachment: if (attachment.fileName == file_name): url = attachment.url if (url is not None): resp = requests.get(url, auth=(self._polarion.user, self._polarion.password)) if resp.ok: return resp.content else: raise Exception(f'Could not download attachment {file_name}') else: raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentFromTestStepAsFile(self, step_index, file_name, file_path): '\n Save an attachment to file from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n :param file_path: File where to save the attachment\n ' bin = self.getAttachmentFromTestStep(step_index, file_name) with open(file_path, 'wb') as file: file.write(bin)
-6,703,495,425,838,276,000
Save an attachment to file from a test step :param step_index: The test step index :param file_name: The attachment file name :param file_path: File where to save the attachment
polarion/record.py
saveAttachmentFromTestStepAsFile
jesper-raemaekers/python-polarion
python
def saveAttachmentFromTestStepAsFile(self, step_index, file_name, file_path): '\n Save an attachment to file from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n :param file_path: File where to save the attachment\n ' bin = self.getAttachmentFromTestStep(step_index, file_name) with open(file_path, 'wb') as file: file.write(bin)
def deleteAttachmentFromTestStep(self, step_index, file_name): '\n Delete an attachment from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n ' service = self._polarion.getService('TestManagement') service.deleteAttachmentFromTestStep(self._test_run.uri, self._index, step_index, file_name) self._reloadFromPolarion()
-7,120,527,479,930,367,000
Delete an attachment from a test step :param step_index: The test step index :param file_name: The attachment file name
polarion/record.py
deleteAttachmentFromTestStep
jesper-raemaekers/python-polarion
python
def deleteAttachmentFromTestStep(self, step_index, file_name): '\n Delete an attachment from a test step\n\n :param step_index: The test step index\n :param file_name: The attachment file name\n ' service = self._polarion.getService('TestManagement') service.deleteAttachmentFromTestStep(self._test_run.uri, self._index, step_index, file_name) self._reloadFromPolarion()
def addAttachmentToTestStep(self, step_index, file_path, title): '\n Upload an attachment to a test step\n\n :param step_index: The test step index\n :param file_path: Source file to upload\n :param title: The title of the attachment\n ' service = self._polarion.getService('TestManagement') file_name = os.path.split(file_path)[1] with open(file_path, 'rb') as file_content: service.addAttachmentToTestStep(self._test_run.uri, self._index, step_index, file_name, title, file_content.read()) self._reloadFromPolarion()
-6,414,064,469,549,954,000
Upload an attachment to a test step :param step_index: The test step index :param file_path: Source file to upload :param title: The title of the attachment
polarion/record.py
addAttachmentToTestStep
jesper-raemaekers/python-polarion
python
def addAttachmentToTestStep(self, step_index, file_path, title): '\n Upload an attachment to a test step\n\n :param step_index: The test step index\n :param file_path: Source file to upload\n :param title: The title of the attachment\n ' service = self._polarion.getService('TestManagement') file_name = os.path.split(file_path)[1] with open(file_path, 'rb') as file_content: service.addAttachmentToTestStep(self._test_run.uri, self._index, step_index, file_name, title, file_content.read()) self._reloadFromPolarion()
def save(self): '\n Saves the current test record\n ' new_item = {} for (attr, value) in self.__dict__.items(): if (not attr.startswith('_')): new_item[attr] = value service = self._polarion.getService('TestManagement') service.executeTest(self._test_run.uri, new_item) self._reloadFromPolarion()
1,585,981,437,865,151,000
Saves the current test record
polarion/record.py
save
jesper-raemaekers/python-polarion
python
def save(self): '\n \n ' new_item = {} for (attr, value) in self.__dict__.items(): if (not attr.startswith('_')): new_item[attr] = value service = self._polarion.getService('TestManagement') service.executeTest(self._test_run.uri, new_item) self._reloadFromPolarion()
def _removeHandlersFromLogger(logger, handlerTypes=None): "\n Remove all handlers or handlers of a specified type from a logger.\n\n @param logger: The logger who's handlers should be processed.\n @type logger: A logging.Logger object\n @param handlerTypes: A type of handler or list/tuple of types of handlers\n that should be removed from the logger. If I{None}, all handlers are\n removed.\n @type handlerTypes: L{None}, a logging.Handler subclass or\n I{list}/I{tuple} of logging.Handler subclasses.\n " for handler in logger.handlers: if ((handlerTypes is None) or isinstance(handler, handlerTypes)): logger.removeHandler(handler)
-2,376,610,905,695,259,000
Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses.
src/shotgunEventDaemon.py
_removeHandlersFromLogger
darkvertex/shotgunEvents
python
def _removeHandlersFromLogger(logger, handlerTypes=None): "\n Remove all handlers or handlers of a specified type from a logger.\n\n @param logger: The logger who's handlers should be processed.\n @type logger: A logging.Logger object\n @param handlerTypes: A type of handler or list/tuple of types of handlers\n that should be removed from the logger. If I{None}, all handlers are\n removed.\n @type handlerTypes: L{None}, a logging.Handler subclass or\n I{list}/I{tuple} of logging.Handler subclasses.\n " for handler in logger.handlers: if ((handlerTypes is None) or isinstance(handler, handlerTypes)): logger.removeHandler(handler)
def _addMailHandlerToLogger(logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None): '\n Configure a logger with a handler that sends emails to specified\n addresses.\n\n The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.\n\n @note: Any SMTPHandler already connected to the logger will be removed.\n\n @param logger: The logger to configure\n @type logger: A logging.Logger instance\n @param toAddrs: The addresses to send the email to.\n @type toAddrs: A list of email addresses that will be passed on to the\n SMTPHandler.\n ' if (smtpServer and fromAddr and toAddrs and emailSubject): mailHandler = CustomSMTPHandler(smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler)
-307,098,511,064,583,040
Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler.
src/shotgunEventDaemon.py
_addMailHandlerToLogger
darkvertex/shotgunEvents
python
def _addMailHandlerToLogger(logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None): '\n Configure a logger with a handler that sends emails to specified\n addresses.\n\n The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.\n\n @note: Any SMTPHandler already connected to the logger will be removed.\n\n @param logger: The logger to configure\n @type logger: A logging.Logger instance\n @param toAddrs: The addresses to send the email to.\n @type toAddrs: A list of email addresses that will be passed on to the\n SMTPHandler.\n ' if (smtpServer and fromAddr and toAddrs and emailSubject): mailHandler = CustomSMTPHandler(smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler)
def _getConfigPath(): '\n Get the path of the shotgunEventDaemon configuration file.\n ' paths = ['/etc', os.path.dirname(__file__)] scriptPath = sys.argv[0] if ((scriptPath != '') and (scriptPath != '-c')): scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) paths[:0] = [os.path.dirname(scriptPath)] for path in paths: path = os.path.join(path, 'shotgunEventDaemon.conf') if os.path.exists(path): return path raise EventDaemonError(('Config path not found, searched %s' % ', '.join(paths)))
8,421,777,688,873,840,000
Get the path of the shotgunEventDaemon configuration file.
src/shotgunEventDaemon.py
_getConfigPath
darkvertex/shotgunEvents
python
def _getConfigPath(): '\n \n ' paths = ['/etc', os.path.dirname(__file__)] scriptPath = sys.argv[0] if ((scriptPath != ) and (scriptPath != '-c')): scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) paths[:0] = [os.path.dirname(scriptPath)] for path in paths: path = os.path.join(path, 'shotgunEventDaemon.conf') if os.path.exists(path): return path raise EventDaemonError(('Config path not found, searched %s' % ', '.join(paths)))
def start(self): '\n Start the processing of events.\n\n The last processed id is loaded up from persistent storage on disk and\n the main loop is started.\n ' socket.setdefaulttimeout(60) self.log.info(('Using SG Python API version %s' % sg.__version__)) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning('Keyboard interrupt. Cleaning up...') except Exception as err: msg = 'Crash!!!!! Unexpected error (%s) in main loop.\n\n%s' self.log.critical(msg, type(err), traceback.format_exc(err))
-1,260,733,432,213,487,900
Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started.
src/shotgunEventDaemon.py
start
darkvertex/shotgunEvents
python
def start(self): '\n Start the processing of events.\n\n The last processed id is loaded up from persistent storage on disk and\n the main loop is started.\n ' socket.setdefaulttimeout(60) self.log.info(('Using SG Python API version %s' % sg.__version__)) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning('Keyboard interrupt. Cleaning up...') except Exception as err: msg = 'Crash!!!!! Unexpected error (%s) in main loop.\n\n%s' self.log.critical(msg, type(err), traceback.format_exc(err))
def _loadEventIdData(self): "\n Load the last processed event id from the disk\n\n If no event has ever been processed or if the eventIdFile has been\n deleted from disk, no id will be recoverable. In this case, we will try\n contacting Shotgun to get the latest event's id and we'll start\n processing from there.\n " eventIdFile = self.config.getEventIdFile() if (eventIdFile and os.path.exists(eventIdFile)): try: fh = open(eventIdFile, 'rb') try: self._eventIdData = pickle.load(fh) noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for (pluginName, pluginState) in collection.items(): if (pluginName in maxPluginStates.keys()): if (pluginState[0] > maxPluginStates[pluginName][0]): maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if (pluginName in maxPluginStates.keys()): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() fh = open(eventIdFile, 'rb') line = fh.readline().strip() if line.isdigit(): lastEventId = int(line) self.log.debug('Read last event id (%d) from file.', lastEventId) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError(('Could not load event id from file.\n\n%s' % traceback.format_exc(err))) else: lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData()
-4,428,605,945,949,538,300
Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there.
src/shotgunEventDaemon.py
_loadEventIdData
darkvertex/shotgunEvents
python
def _loadEventIdData(self): "\n Load the last processed event id from the disk\n\n If no event has ever been processed or if the eventIdFile has been\n deleted from disk, no id will be recoverable. In this case, we will try\n contacting Shotgun to get the latest event's id and we'll start\n processing from there.\n " eventIdFile = self.config.getEventIdFile() if (eventIdFile and os.path.exists(eventIdFile)): try: fh = open(eventIdFile, 'rb') try: self._eventIdData = pickle.load(fh) noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for (pluginName, pluginState) in collection.items(): if (pluginName in maxPluginStates.keys()): if (pluginState[0] > maxPluginStates[pluginName][0]): maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if (pluginName in maxPluginStates.keys()): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() fh = open(eventIdFile, 'rb') line = fh.readline().strip() if line.isdigit(): lastEventId = int(line) self.log.debug('Read last event id (%d) from file.', lastEventId) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError(('Could not load event id from file.\n\n%s' % traceback.format_exc(err))) else: lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData()
def _mainLoop(self): '\n Run the event processing loop.\n\n General behavior:\n - Load plugins from disk - see L{load} method.\n - Get new events from Shotgun\n - Loop through events\n - Loop through each plugin\n - Loop through each callback\n - Send the callback an event\n - Once all callbacks are done in all plugins, save the eventId\n - Go to the next event\n - Once all events are processed, wait for the defined fetch interval time and start over.\n\n Caveats:\n - If a plugin is deemed "inactive" (an error occured during\n registration), skip it.\n - If a callback is deemed "inactive" (an error occured during callback\n execution), skip it.\n - Each time through the loop, if the pidFile is gone, stop.\n ' self.log.debug('Starting the event processing loop.') while self._continue: events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() if (len(events) < self.config.getMaxEventBatchSize()): time.sleep(self._fetch_interval) for collection in self._pluginCollections: collection.load() self._loadEventIdData() self.log.debug('Shuting down event processing loop.')
-6,763,117,773,073,454,000
Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop.
src/shotgunEventDaemon.py
_mainLoop
darkvertex/shotgunEvents
python
def _mainLoop(self): '\n Run the event processing loop.\n\n General behavior:\n - Load plugins from disk - see L{load} method.\n - Get new events from Shotgun\n - Loop through events\n - Loop through each plugin\n - Loop through each callback\n - Send the callback an event\n - Once all callbacks are done in all plugins, save the eventId\n - Go to the next event\n - Once all events are processed, wait for the defined fetch interval time and start over.\n\n Caveats:\n - If a plugin is deemed "inactive" (an error occured during\n registration), skip it.\n - If a callback is deemed "inactive" (an error occured during callback\n execution), skip it.\n - Each time through the loop, if the pidFile is gone, stop.\n ' self.log.debug('Starting the event processing loop.') while self._continue: events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() if (len(events) < self.config.getMaxEventBatchSize()): time.sleep(self._fetch_interval) for collection in self._pluginCollections: collection.load() self._loadEventIdData() self.log.debug('Shuting down event processing loop.')
def _getNewEvents(self): '\n Fetch new events from Shotgun.\n\n @return: Recent events that need to be processed by the engine.\n @rtype: I{list} of Shotgun event dictionaries.\n ' nextEventId = None for newId in [coll.getNextUnprocessedEventId() for coll in self._pluginCollections]: if ((newId is not None) and ((nextEventId is None) or (newId < nextEventId))): nextEventId = newId if (nextEventId is not None): filters = [['id', 'greater_than', (nextEventId - 1)]] fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity', 'user', 'project', 'session_uuid', 'created_at'] order = [{'column': 'id', 'direction': 'asc'}] conn_attempts = 0 while True: try: events = self._sg.find('EventLogEntry', filters, fields, order, limit=self.config.getMaxEventBatchSize()) if events: self.log.debug('Got %d events: %d to %d.', len(events), events[0]['id'], events[(- 1)]['id']) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = ('Unknown error: %s' % str(err)) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return []
-7,859,600,377,278,774,000
Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries.
src/shotgunEventDaemon.py
_getNewEvents
darkvertex/shotgunEvents
python
def _getNewEvents(self): '\n Fetch new events from Shotgun.\n\n @return: Recent events that need to be processed by the engine.\n @rtype: I{list} of Shotgun event dictionaries.\n ' nextEventId = None for newId in [coll.getNextUnprocessedEventId() for coll in self._pluginCollections]: if ((newId is not None) and ((nextEventId is None) or (newId < nextEventId))): nextEventId = newId if (nextEventId is not None): filters = [['id', 'greater_than', (nextEventId - 1)]] fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity', 'user', 'project', 'session_uuid', 'created_at'] order = [{'column': 'id', 'direction': 'asc'}] conn_attempts = 0 while True: try: events = self._sg.find('EventLogEntry', filters, fields, order, limit=self.config.getMaxEventBatchSize()) if events: self.log.debug('Got %d events: %d to %d.', len(events), events[0]['id'], events[(- 1)]['id']) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = ('Unknown error: %s' % str(err)) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return []
def _saveEventIdData(self): '\n Save an event Id to persistant storage.\n\n Next time the engine is started it will try to read the event id from\n this location to know at which event it should start processing.\n ' eventIdFile = self.config.getEventIdFile() if (eventIdFile is not None): for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for (colPath, state) in self._eventIdData.items(): if state: try: with open(eventIdFile, 'wb') as fh: pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error('Can not write event id data to %s.\n\n%s', eventIdFile, traceback.format_exc(err)) break else: self.log.warning('No state was found. Not saving to disk.')
-5,762,895,681,315,035,000
Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing.
src/shotgunEventDaemon.py
_saveEventIdData
darkvertex/shotgunEvents
python
def _saveEventIdData(self): '\n Save an event Id to persistant storage.\n\n Next time the engine is started it will try to read the event id from\n this location to know at which event it should start processing.\n ' eventIdFile = self.config.getEventIdFile() if (eventIdFile is not None): for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for (colPath, state) in self._eventIdData.items(): if state: try: with open(eventIdFile, 'wb') as fh: pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error('Can not write event id data to %s.\n\n%s', eventIdFile, traceback.format_exc(err)) break else: self.log.warning('No state was found. Not saving to disk.')
def load(self): '\n Load plugins from disk.\n\n General behavior:\n - Loop on all paths.\n - Find all valid .py plugin files.\n - Loop on all plugin files.\n - For any new plugins, load them, otherwise, refresh them.\n ' newPlugins = {} for basename in os.listdir(self.path): if ((not basename.endswith('.py')) or basename.startswith('.')): continue if (basename in self._plugins): newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin(self._engine, os.path.join(self.path, basename)) newPlugins[basename].load() self._plugins = newPlugins
1,639,488,018,755,254,000
Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them.
src/shotgunEventDaemon.py
load
darkvertex/shotgunEvents
python
def load(self): '\n Load plugins from disk.\n\n General behavior:\n - Loop on all paths.\n - Find all valid .py plugin files.\n - Loop on all plugin files.\n - For any new plugins, load them, otherwise, refresh them.\n ' newPlugins = {} for basename in os.listdir(self.path): if ((not basename.endswith('.py')) or basename.startswith('.')): continue if (basename in self._plugins): newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin(self._engine, os.path.join(self.path, basename)) newPlugins[basename].load() self._plugins = newPlugins
def __init__(self, engine, path): '\n @param engine: The engine that instanciated this plugin.\n @type engine: L{Engine}\n @param path: The path of the plugin file to load.\n @type path: I{str}\n\n @raise ValueError: If the path to the plugin is not a valid file.\n ' self._engine = engine self._path = path if (not os.path.isfile(path)): raise ValueError(('The path to the plugin is not a valid file - %s.' % path)) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} self.logger = logging.getLogger(('plugin.' + self.getName())) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if (self._engine.config.getLogMode() == 1): _setFilePathOnLogger(self.logger, self._engine.config.getLogFile(('plugin.' + self.getName())))
-8,360,313,440,858,122,000
@param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file.
src/shotgunEventDaemon.py
__init__
darkvertex/shotgunEvents
python
def __init__(self, engine, path): '\n @param engine: The engine that instanciated this plugin.\n @type engine: L{Engine}\n @param path: The path of the plugin file to load.\n @type path: I{str}\n\n @raise ValueError: If the path to the plugin is not a valid file.\n ' self._engine = engine self._path = path if (not os.path.isfile(path)): raise ValueError(('The path to the plugin is not a valid file - %s.' % path)) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} self.logger = logging.getLogger(('plugin.' + self.getName())) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if (self._engine.config.getLogMode() == 1): _setFilePathOnLogger(self.logger, self._engine.config.getLogFile(('plugin.' + self.getName())))
def isActive(self): "\n Is the current plugin active. Should it's callbacks be run?\n\n @return: True if this plugin's callbacks should be run, False otherwise.\n @rtype: I{bool}\n " return self._active
7,688,247,206,678,596,000
Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool}
src/shotgunEventDaemon.py
isActive
darkvertex/shotgunEvents
python
def isActive(self): "\n Is the current plugin active. Should it's callbacks be run?\n\n @return: True if this plugin's callbacks should be run, False otherwise.\n @rtype: I{bool}\n " return self._active
def setEmails(self, *emails): "\n Set the email addresses to whom this plugin should send errors.\n\n @param emails: See L{LogFactory.getLogger}'s emails argument for info.\n @type emails: A I{list}/I{tuple} of email addresses or I{bool}.\n " self._engine.setEmailsOnLogger(self.logger, emails)
-3,236,883,789,366,445,600
Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}.
src/shotgunEventDaemon.py
setEmails
darkvertex/shotgunEvents
python
def setEmails(self, *emails): "\n Set the email addresses to whom this plugin should send errors.\n\n @param emails: See L{LogFactory.getLogger}'s emails argument for info.\n @type emails: A I{list}/I{tuple} of email addresses or I{bool}.\n " self._engine.setEmailsOnLogger(self.logger, emails)
def load(self): '\n Load/Reload the plugin and all its callbacks.\n\n If a plugin has never been loaded it will be loaded normally. If the\n plugin has been loaded before it will be reloaded only if the file has\n been modified on disk. In this event callbacks will all be cleared and\n reloaded.\n\n General behavior:\n - Try to load the source of the plugin.\n - Try to find a function called registerCallbacks in the file.\n - Try to run the registration function.\n\n At every step along the way, if any error occurs the whole plugin will\n be deactivated and the function will return.\n ' mtime = os.path.getmtime(self._path) if (self._mtime is None): self._engine.log.info(('Loading plugin at %s' % self._path)) elif (self._mtime < mtime): self._engine.log.info(('Reloading plugin at %s' % self._path)) else: return self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error('Could not load the plugin at %s.\n\n%s', self._path, traceback.format_exc()) return regFunc = getattr(plugin, 'registerCallbacks', None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical('Error running register callback function from plugin at %s.\n\n%s', self._path, traceback.format_exc()) self._active = False else: self._engine.log.critical('Did not find a registerCallbacks function in plugin at %s.', self._path) self._active = False
-8,549,978,434,882,615,000
Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return.
src/shotgunEventDaemon.py
load
darkvertex/shotgunEvents
python
def load(self): '\n Load/Reload the plugin and all its callbacks.\n\n If a plugin has never been loaded it will be loaded normally. If the\n plugin has been loaded before it will be reloaded only if the file has\n been modified on disk. In this event callbacks will all be cleared and\n reloaded.\n\n General behavior:\n - Try to load the source of the plugin.\n - Try to find a function called registerCallbacks in the file.\n - Try to run the registration function.\n\n At every step along the way, if any error occurs the whole plugin will\n be deactivated and the function will return.\n ' mtime = os.path.getmtime(self._path) if (self._mtime is None): self._engine.log.info(('Loading plugin at %s' % self._path)) elif (self._mtime < mtime): self._engine.log.info(('Reloading plugin at %s' % self._path)) else: return self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error('Could not load the plugin at %s.\n\n%s', self._path, traceback.format_exc()) return regFunc = getattr(plugin, 'registerCallbacks', None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical('Error running register callback function from plugin at %s.\n\n%s', self._path, traceback.format_exc()) self._active = False else: self._engine.log.critical('Did not find a registerCallbacks function in plugin at %s.', self._path) self._active = False
def registerCallback(self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True): '\n Register a callback in the plugin.\n ' global sg sgConnection = sg.Shotgun(self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer()) self._callbacks.append(Callback(callback, self, self._engine, sgConnection, matchEvents, args, stopOnError))
6,433,666,557,434,882,000
Register a callback in the plugin.
src/shotgunEventDaemon.py
registerCallback
darkvertex/shotgunEvents
python
def registerCallback(self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True): '\n \n ' global sg sgConnection = sg.Shotgun(self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer()) self._callbacks.append(Callback(callback, self, self._engine, sgConnection, matchEvents, args, stopOnError))
def __iter__(self): '\n A plugin is iterable and will iterate over all its L{Callback} objects.\n ' return self._callbacks.__iter__()
-6,127,632,201,364,838,000
A plugin is iterable and will iterate over all its L{Callback} objects.
src/shotgunEventDaemon.py
__iter__
darkvertex/shotgunEvents
python
def __iter__(self): '\n \n ' return self._callbacks.__iter__()
def __str__(self): '\n Provide the name of the plugin when it is cast as string.\n\n @return: The name of the plugin.\n @rtype: I{str}\n ' return self.getName()
-5,831,211,034,429,896,000
Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str}
src/shotgunEventDaemon.py
__str__
darkvertex/shotgunEvents
python
def __str__(self): '\n Provide the name of the plugin when it is cast as string.\n\n @return: The name of the plugin.\n @rtype: I{str}\n ' return self.getName()
def __init__(self, plugin): '\n Wrap a plugin so it can be passed to a user.\n ' self._plugin = plugin self._allowed = ['logger', 'setEmails', 'registerCallback']
8,840,827,954,657,200,000
Wrap a plugin so it can be passed to a user.
src/shotgunEventDaemon.py
__init__
darkvertex/shotgunEvents
python
def __init__(self, plugin): '\n \n ' self._plugin = plugin self._allowed = ['logger', 'setEmails', 'registerCallback']
def getLogger(self): '\n Get the logger for this plugin.\n\n @return: The logger configured for this plugin.\n @rtype: L{logging.Logger}\n ' return self.logger
1,521,799,279,905,892,600
Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger}
src/shotgunEventDaemon.py
getLogger
darkvertex/shotgunEvents
python
def getLogger(self): '\n Get the logger for this plugin.\n\n @return: The logger configured for this plugin.\n @rtype: L{logging.Logger}\n ' return self.logger
def __init__(self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True): '\n @param callback: The function to run when a Shotgun event occurs.\n @type callback: A function object.\n @param engine: The engine that will dispatch to this callback.\n @type engine: L{Engine}.\n @param shotgun: The Shotgun instance that will be used to communicate\n with your Shotgun server.\n @type shotgun: L{sg.Shotgun}\n @param matchEvents: The event filter to match events against before invoking callback.\n @type matchEvents: dict\n @param args: Any datastructure you would like to be passed to your\n callback function. Defaults to None.\n @type args: Any object.\n\n @raise TypeError: If the callback is not a callable object.\n ' if (not callable(callback)): raise TypeError('The callback must be a callable object (function, method or callable class instance).') self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True if hasattr(callback, '__name__'): self._name = callback.__name__ elif (hasattr(callback, '__class__') and hasattr(callback, '__call__')): self._name = ('%s_%s' % (callback.__class__.__name__, hex(id(callback)))) else: raise ValueError('registerCallback should be called with a function or a callable object instance as callback argument.') self._logger = logging.getLogger(((plugin.logger.name + '.') + self._name)) self._logger.config = self._engine.config
-8,151,614,094,003,039,000
@param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object.
src/shotgunEventDaemon.py
__init__
darkvertex/shotgunEvents
python
def __init__(self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True): '\n @param callback: The function to run when a Shotgun event occurs.\n @type callback: A function object.\n @param engine: The engine that will dispatch to this callback.\n @type engine: L{Engine}.\n @param shotgun: The Shotgun instance that will be used to communicate\n with your Shotgun server.\n @type shotgun: L{sg.Shotgun}\n @param matchEvents: The event filter to match events against before invoking callback.\n @type matchEvents: dict\n @param args: Any datastructure you would like to be passed to your\n callback function. Defaults to None.\n @type args: Any object.\n\n @raise TypeError: If the callback is not a callable object.\n ' if (not callable(callback)): raise TypeError('The callback must be a callable object (function, method or callable class instance).') self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True if hasattr(callback, '__name__'): self._name = callback.__name__ elif (hasattr(callback, '__class__') and hasattr(callback, '__call__')): self._name = ('%s_%s' % (callback.__class__.__name__, hex(id(callback)))) else: raise ValueError('registerCallback should be called with a function or a callable object instance as callback argument.') self._logger = logging.getLogger(((plugin.logger.name + '.') + self._name)) self._logger.config = self._engine.config
def process(self, event): '\n Process an event with the callback object supplied on initialization.\n\n If an error occurs, it will be logged appropriately and the callback\n will be deactivated.\n\n @param event: The Shotgun event to process.\n @type event: I{dict}\n ' if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event['session_uuid']) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = 'An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s' self._logger.critical(msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals)) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace('plugin.', '') end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat((end_time - start_time)) delay = self._prettyTimeDeltaFormat((start_time - event['created_at'])) msg_format = 'event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s' data = [event['id'], event['created_at'].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay] self._engine.timing_logger.info(msg_format, *data) return self._active
4,690,364,775,588,296,000
Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict}
src/shotgunEventDaemon.py
process
darkvertex/shotgunEvents
python
def process(self, event): '\n Process an event with the callback object supplied on initialization.\n\n If an error occurs, it will be logged appropriately and the callback\n will be deactivated.\n\n @param event: The Shotgun event to process.\n @type event: I{dict}\n ' if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event['session_uuid']) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = 'An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s' self._logger.critical(msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals)) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace('plugin.', ) end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat((end_time - start_time)) delay = self._prettyTimeDeltaFormat((start_time - event['created_at'])) msg_format = 'event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s' data = [event['id'], event['created_at'].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay] self._engine.timing_logger.info(msg_format, *data) return self._active
def isActive(self): '\n Check if this callback is active, i.e. if events should be passed to it\n for processing.\n\n @return: True if this callback should process events, False otherwise.\n @rtype: I{bool}\n ' return self._active
-6,180,805,449,513,379,000
Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool}
src/shotgunEventDaemon.py
isActive
darkvertex/shotgunEvents
python
def isActive(self): '\n Check if this callback is active, i.e. if events should be passed to it\n for processing.\n\n @return: True if this callback should process events, False otherwise.\n @rtype: I{bool}\n ' return self._active
def __str__(self): '\n The name of the callback.\n\n @return: The name of the callback\n @rtype: I{str}\n ' return self._name
5,708,799,058,980,545,000
The name of the callback. @return: The name of the callback @rtype: I{str}
src/shotgunEventDaemon.py
__str__
darkvertex/shotgunEvents
python
def __str__(self): '\n The name of the callback.\n\n @return: The name of the callback\n @rtype: I{str}\n ' return self._name
def emit(self, record): '\n Emit a record.\n\n Format the record and send it to the specified addressees.\n ' try: import smtplib from email.utils import formatdate port = self.mailport if (not port): port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = ('From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s' % (self.fromaddr, ','.join(self.toaddrs), self.getSubject(record), formatdate(), msg)) if self.username: if (self.secure is not None): smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
6,078,376,758,493,052,000
Emit a record. Format the record and send it to the specified addressees.
src/shotgunEventDaemon.py
emit
darkvertex/shotgunEvents
python
def emit(self, record): '\n Emit a record.\n\n Format the record and send it to the specified addressees.\n ' try: import smtplib from email.utils import formatdate port = self.mailport if (not port): port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = ('From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s' % (self.fromaddr, ','.join(self.toaddrs), self.getSubject(record), formatdate(), msg)) if self.username: if (self.secure is not None): smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
def _run(self): "\n Start the engine's main loop\n " self._engine.start()
-5,240,224,163,209,701,000
Start the engine's main loop
src/shotgunEventDaemon.py
_run
darkvertex/shotgunEvents
python
def _run(self): "\n \n " self._engine.start()
def SvcStop(self): '\n Stop the Windows service.\n ' self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop()
-4,262,989,655,578,718,700
Stop the Windows service.
src/shotgunEventDaemon.py
SvcStop
darkvertex/shotgunEvents
python
def SvcStop(self): '\n \n ' self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop()
def SvcDoRun(self): '\n Start the Windows service.\n ' servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, '')) self.main()
1,857,521,332,038,923,800
Start the Windows service.
src/shotgunEventDaemon.py
SvcDoRun
darkvertex/shotgunEvents
python
def SvcDoRun(self): '\n \n ' servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, )) self.main()
def main(self): '\n Primary Windows entry point\n ' self._engine.start()
-1,633,724,170,099,434,200
Primary Windows entry point
src/shotgunEventDaemon.py
main
darkvertex/shotgunEvents
python
def main(self): '\n \n ' self._engine.start()
def prime_check(number: int) -> bool: '\n Determines whether a given number is prime or not\n\n >>> prime_check(2)\n True\n >>> prime_check(15)\n False\n >>> prime_check(29)\n True\n ' if (((number % 2) == 0) and (number > 2)): return False return all(((number % i) for i in range(3, (int(math.sqrt(number)) + 1), 2)))
-31,438,179,935,647,390
Determines whether a given number is prime or not >>> prime_check(2) True >>> prime_check(15) False >>> prime_check(29) True
project_euler/problem_007/sol3.py
prime_check
04n0/TheAlgorithms-Python
python
def prime_check(number: int) -> bool: '\n Determines whether a given number is prime or not\n\n >>> prime_check(2)\n True\n >>> prime_check(15)\n False\n >>> prime_check(29)\n True\n ' if (((number % 2) == 0) and (number > 2)): return False return all(((number % i) for i in range(3, (int(math.sqrt(number)) + 1), 2)))
def prime_generator(): '\n Generate a sequence of prime numbers\n ' num = 2 while True: if prime_check(num): (yield num) num += 1
7,299,812,059,533,551,000
Generate a sequence of prime numbers
project_euler/problem_007/sol3.py
prime_generator
04n0/TheAlgorithms-Python
python
def prime_generator(): '\n \n ' num = 2 while True: if prime_check(num): (yield num) num += 1
def solution(nth: int=10001) -> int: '\n Returns the n-th prime number.\n\n >>> solution(6)\n 13\n >>> solution(1)\n 2\n >>> solution(3)\n 5\n >>> solution(20)\n 71\n >>> solution(50)\n 229\n >>> solution(100)\n 541\n ' return next(itertools.islice(prime_generator(), (nth - 1), nth))
-1,264,646,381,383,279,900
Returns the n-th prime number. >>> solution(6) 13 >>> solution(1) 2 >>> solution(3) 5 >>> solution(20) 71 >>> solution(50) 229 >>> solution(100) 541
project_euler/problem_007/sol3.py
solution
04n0/TheAlgorithms-Python
python
def solution(nth: int=10001) -> int: '\n Returns the n-th prime number.\n\n >>> solution(6)\n 13\n >>> solution(1)\n 2\n >>> solution(3)\n 5\n >>> solution(20)\n 71\n >>> solution(50)\n 229\n >>> solution(100)\n 541\n ' return next(itertools.islice(prime_generator(), (nth - 1), nth))
async def extract_object_urls(self, soup) -> List[str]: '\n Extract apartment object urls\n ' items = soup.find_all('a') urls: List[str] = [] for item in items: if ('woning/rotterdam-' in item['href']): urls.append(item['href']) return list(set(urls))
2,149,174,860,420,259,000
Extract apartment object urls
server/app/scrapers/maarten.py
extract_object_urls
damienallen/makelaardij-notify
python
async def extract_object_urls(self, soup) -> List[str]: '\n \n ' items = soup.find_all('a') urls: List[str] = [] for item in items: if ('woning/rotterdam-' in item['href']): urls.append(item['href']) return list(set(urls))
async def get_page_url(self, page_num: int) -> str: '\n Format page url\n ' return f'{self.BASE_URL}/aanbod/rotterdam/'
-6,004,065,819,992,848,000
Format page url
server/app/scrapers/maarten.py
get_page_url
damienallen/makelaardij-notify
python
async def get_page_url(self, page_num: int) -> str: '\n \n ' return f'{self.BASE_URL}/aanbod/rotterdam/'
async def get_apartment_urls(self) -> List[str]: '\n Fetch list of apartment urls from inventory\n ' urls = (await self.scrape_page(0)) return urls
-6,254,682,917,009,416,000
Fetch list of apartment urls from inventory
server/app/scrapers/maarten.py
get_apartment_urls
damienallen/makelaardij-notify
python
async def get_apartment_urls(self) -> List[str]: '\n \n ' urls = (await self.scrape_page(0)) return urls
def extract_features(self, soup): '\n Extract feature metadata from listing\n ' meta_data = {'makelaardij': self.MAKELAARDIJ, 'building': {}, 'unit': {'energy': {}, 'tags': []}} dt = soup.find_all('dt') dd = soup.find_all('dd') for (ind, key) in enumerate(dt): if ('Bouwjaar' in key.string): meta_data['building']['year_constructed'] = self.find_int(dd[ind].string) elif ('Woonoppervlakte' in key.string): meta_data['unit']['area'] = self.find_float(dd[ind].text.split(' ')[0]) elif ('Aantal kamers' in key.string): meta_data['unit']['num_rooms'] = self.find_int(dd[ind].text) elif ('verdiepingen' in key.string): meta_data['unit']['num_floors'] = self.find_int(dd[ind].text) elif ('Status' in key.string): meta_data['available'] = ('Beschikbaar' in dd[ind].text) elif (('Buitenruimte' in key.string) and ('TUIN' in dd[ind].text)): meta_data['unit']['tags'].append('garden') meta_data['address'] = soup.find('span', {'class': 'adres'}).string meta_data['asking_price'] = self.find_int(soup.find('span', {'class': 'price'}).string.replace('.', '')) description = soup.find('div', {'id': 'read-more-content'}).children for p in description: p_text = str(p.text) if ('Eigen grond' in p_text): meta_data['unit']['own_land'] = True elif ('erfpacht' in p_text): meta_data['unit']['own_land'] = False if ('Energielabel' in p_text): label = p_text.split('Energielabel: ')[1][0] meta_data['unit']['energy']['label'] = label break if (not meta_data['unit'].get('area')): raise SkipListing('Unable to find area') return meta_data
-4,377,614,919,145,519,600
Extract feature metadata from listing
server/app/scrapers/maarten.py
extract_features
damienallen/makelaardij-notify
python
def extract_features(self, soup): '\n \n ' meta_data = {'makelaardij': self.MAKELAARDIJ, 'building': {}, 'unit': {'energy': {}, 'tags': []}} dt = soup.find_all('dt') dd = soup.find_all('dd') for (ind, key) in enumerate(dt): if ('Bouwjaar' in key.string): meta_data['building']['year_constructed'] = self.find_int(dd[ind].string) elif ('Woonoppervlakte' in key.string): meta_data['unit']['area'] = self.find_float(dd[ind].text.split(' ')[0]) elif ('Aantal kamers' in key.string): meta_data['unit']['num_rooms'] = self.find_int(dd[ind].text) elif ('verdiepingen' in key.string): meta_data['unit']['num_floors'] = self.find_int(dd[ind].text) elif ('Status' in key.string): meta_data['available'] = ('Beschikbaar' in dd[ind].text) elif (('Buitenruimte' in key.string) and ('TUIN' in dd[ind].text)): meta_data['unit']['tags'].append('garden') meta_data['address'] = soup.find('span', {'class': 'adres'}).string meta_data['asking_price'] = self.find_int(soup.find('span', {'class': 'price'}).string.replace('.', )) description = soup.find('div', {'id': 'read-more-content'}).children for p in description: p_text = str(p.text) if ('Eigen grond' in p_text): meta_data['unit']['own_land'] = True elif ('erfpacht' in p_text): meta_data['unit']['own_land'] = False if ('Energielabel' in p_text): label = p_text.split('Energielabel: ')[1][0] meta_data['unit']['energy']['label'] = label break if (not meta_data['unit'].get('area')): raise SkipListing('Unable to find area') return meta_data
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueQuantity'], extension_depth: int=0, max_extension_depth: Optional[int]=2) -> Union[(StructType, DataType)]: '\n A record of an event made for purposes of maintaining a security log. Typical\n uses include detection of intrusion attempts and monitoring for inappropriate\n usage.\n\n\n id: unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. In order to make the use of extensions safe and\n manageable, there is a strict set of governance applied to the definition and\n use of extensions. Though any implementer is allowed to define an extension,\n there is a set of requirements that SHALL be met as part of the definition of\n the extension.\n\n identifier: Identifies a specific instance of the entity. The reference should always be\n version specific.\n\n reference: Identifies a specific instance of the entity. The reference should be version\n specific.\n\n type: The type of the object that was involved in this audit event.\n\n role: Code representing the role the entity played in the event being audited.\n\n lifecycle: Identifier for the data life-cycle stage for the entity.\n\n securityLabel: Security labels for the identified entity.\n\n name: A name of the entity in the audit event.\n\n description: Text that describes the entity in more detail.\n\n query: The query parameters for a query-type entities.\n\n detail: Tagged value pairs for conveying additional information about the entity.\n\n ' from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema from spark_fhir_schemas.stu3.complex_types.auditevent_detail import AuditEvent_DetailSchema if ((max_recursion_limit and (nesting_list.count('AuditEvent_Entity') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['AuditEvent_Entity']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('identifier', IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('reference', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('type', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('role', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('lifecycle', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('securityLabel', ArrayType(CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('name', StringType(), True), StructField('description', StringType(), True), StructField('query', StringType(), True), StructField('detail', ArrayType(AuditEvent_DetailSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] return schema
-8,171,934,014,997,929,000
A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. id: unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. identifier: Identifies a specific instance of the entity. The reference should always be version specific. reference: Identifies a specific instance of the entity. The reference should be version specific. type: The type of the object that was involved in this audit event. role: Code representing the role the entity played in the event being audited. lifecycle: Identifier for the data life-cycle stage for the entity. securityLabel: Security labels for the identified entity. name: A name of the entity in the audit event. description: Text that describes the entity in more detail. query: The query parameters for a query-type entities. detail: Tagged value pairs for conveying additional information about the entity.
spark_fhir_schemas/stu3/complex_types/auditevent_entity.py
get_schema
icanbwell/SparkFhirSchemas
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueQuantity'], extension_depth: int=0, max_extension_depth: Optional[int]=2) -> Union[(StructType, DataType)]: '\n A record of an event made for purposes of maintaining a security log. Typical\n uses include detection of intrusion attempts and monitoring for inappropriate\n usage.\n\n\n id: unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. In order to make the use of extensions safe and\n manageable, there is a strict set of governance applied to the definition and\n use of extensions. Though any implementer is allowed to define an extension,\n there is a set of requirements that SHALL be met as part of the definition of\n the extension.\n\n identifier: Identifies a specific instance of the entity. The reference should always be\n version specific.\n\n reference: Identifies a specific instance of the entity. The reference should be version\n specific.\n\n type: The type of the object that was involved in this audit event.\n\n role: Code representing the role the entity played in the event being audited.\n\n lifecycle: Identifier for the data life-cycle stage for the entity.\n\n securityLabel: Security labels for the identified entity.\n\n name: A name of the entity in the audit event.\n\n description: Text that describes the entity in more detail.\n\n query: The query parameters for a query-type entities.\n\n detail: Tagged value pairs for conveying additional information about the entity.\n\n ' from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema from spark_fhir_schemas.stu3.complex_types.auditevent_detail import AuditEvent_DetailSchema if ((max_recursion_limit and (nesting_list.count('AuditEvent_Entity') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['AuditEvent_Entity']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('identifier', IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('reference', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('type', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('role', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('lifecycle', CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('securityLabel', ArrayType(CodingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('name', StringType(), True), StructField('description', StringType(), True), StructField('query', StringType(), True), StructField('detail', ArrayType(AuditEvent_DetailSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] return schema
def test_get_deed_or_license_path_by4(self): '\n 4.0 formula:\n /licenses/VERSION/LICENSE_deed_LANGAUGE.html\n /licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml\n\n 4.0 examples:\n /licenses/4.0/by-nc-nd_deed_en.html\n /licenses/4.0/by-nc-nd_legalcode_en.html\n /licenses/4.0/by_deed_en.html\n /licenses/4.0/by_legalcode_en.html\n /licenses/4.0/by_deed_zh-Hans.html\n /licenses/4.0/by_legalcode_zh-Hans.html\n ' self._test_get_deed_or_license_path([('4.0', 'by-nc-nd', '', 'en', 'licenses/by-nc-nd/4.0/deed.en.html', ['deed.html', 'index.html'], 'licenses/by-nc-nd/4.0/legalcode.en.html', ['legalcode.html']), ('4.0', 'by', '', 'en', 'licenses/by/4.0/deed.en.html', ['deed.html', 'index.html'], 'licenses/by/4.0/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('4.0', 'by', '', 'zh-Hans', 'licenses/by/4.0/deed.zh-Hans.html', [], 'licenses/by/4.0/legalcode.zh-Hans.html', [])])
6,298,925,950,783,103,000
4.0 formula: /licenses/VERSION/LICENSE_deed_LANGAUGE.html /licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml 4.0 examples: /licenses/4.0/by-nc-nd_deed_en.html /licenses/4.0/by-nc-nd_legalcode_en.html /licenses/4.0/by_deed_en.html /licenses/4.0/by_legalcode_en.html /licenses/4.0/by_deed_zh-Hans.html /licenses/4.0/by_legalcode_zh-Hans.html
licenses/tests/test_models.py
test_get_deed_or_license_path_by4
kerahui/cc-licenses
python
def test_get_deed_or_license_path_by4(self): '\n 4.0 formula:\n /licenses/VERSION/LICENSE_deed_LANGAUGE.html\n /licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml\n\n 4.0 examples:\n /licenses/4.0/by-nc-nd_deed_en.html\n /licenses/4.0/by-nc-nd_legalcode_en.html\n /licenses/4.0/by_deed_en.html\n /licenses/4.0/by_legalcode_en.html\n /licenses/4.0/by_deed_zh-Hans.html\n /licenses/4.0/by_legalcode_zh-Hans.html\n ' self._test_get_deed_or_license_path([('4.0', 'by-nc-nd', , 'en', 'licenses/by-nc-nd/4.0/deed.en.html', ['deed.html', 'index.html'], 'licenses/by-nc-nd/4.0/legalcode.en.html', ['legalcode.html']), ('4.0', 'by', , 'en', 'licenses/by/4.0/deed.en.html', ['deed.html', 'index.html'], 'licenses/by/4.0/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('4.0', 'by', , 'zh-Hans', 'licenses/by/4.0/deed.zh-Hans.html', [], 'licenses/by/4.0/legalcode.zh-Hans.html', [])])
def test_get_deed_or_license_path_by3(self): '\n 3.0 formula:\n /licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html\n /licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html\n\n 3.0 examples:\n /licenses/3.0/xu/by_deed_en.html\n /licenses/3.0/xu/by_legalcode_en.html\n /licenses/3.0/am/by_deed_hy.html\n /licenses/3.0/am/by_legalcode_hy.html\n /licenses/3.0/rs/by_deed_rs-Cyrl.html\n /licenses/3.0/rs/by_legalcode_rs-Cyrl.html\n For jurisdiction, I used "xu" to mean "unported".\n See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501\n ' self._test_get_deed_or_license_path([('3.0', 'by', '', 'en', 'licenses/by/3.0/xu/deed.en.html', ['../licenses/by/3.0/xu/deed.en.html', '../deed.html', '../index.html'], 'licenses/by/3.0/xu/legalcode.en.html', ['../licenses/by/3.0/xu/legalcode.en.html', '../legalcode.html'])]) self._test_get_deed_or_license_path([('3.0', 'by', 'ca', 'en', 'licenses/by/3.0/ca/deed.en.html', ['deed.html', 'index.html'], 'licenses/by/3.0/ca/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('3.0', 'by-sa', 'ca', 'fr', 'licenses/by-sa/3.0/ca/deed.fr.html', [], 'licenses/by-sa/3.0/ca/legalcode.fr.html', [])]) self._test_get_deed_or_license_path([('3.0', 'by-nc-nd', 'am', 'hy', 'licenses/by-nc-nd/3.0/am/deed.hy.html', ['deed.html', 'index.html'], 'licenses/by-nc-nd/3.0/am/legalcode.hy.html', ['legalcode.html'])])
731,381,742,001,778,600
3.0 formula: /licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html /licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html 3.0 examples: /licenses/3.0/xu/by_deed_en.html /licenses/3.0/xu/by_legalcode_en.html /licenses/3.0/am/by_deed_hy.html /licenses/3.0/am/by_legalcode_hy.html /licenses/3.0/rs/by_deed_rs-Cyrl.html /licenses/3.0/rs/by_legalcode_rs-Cyrl.html For jurisdiction, I used "xu" to mean "unported". See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501
licenses/tests/test_models.py
test_get_deed_or_license_path_by3
kerahui/cc-licenses
python
def test_get_deed_or_license_path_by3(self): '\n 3.0 formula:\n /licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html\n /licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html\n\n 3.0 examples:\n /licenses/3.0/xu/by_deed_en.html\n /licenses/3.0/xu/by_legalcode_en.html\n /licenses/3.0/am/by_deed_hy.html\n /licenses/3.0/am/by_legalcode_hy.html\n /licenses/3.0/rs/by_deed_rs-Cyrl.html\n /licenses/3.0/rs/by_legalcode_rs-Cyrl.html\n For jurisdiction, I used "xu" to mean "unported".\n See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501\n ' self._test_get_deed_or_license_path([('3.0', 'by', , 'en', 'licenses/by/3.0/xu/deed.en.html', ['../licenses/by/3.0/xu/deed.en.html', '../deed.html', '../index.html'], 'licenses/by/3.0/xu/legalcode.en.html', ['../licenses/by/3.0/xu/legalcode.en.html', '../legalcode.html'])]) self._test_get_deed_or_license_path([('3.0', 'by', 'ca', 'en', 'licenses/by/3.0/ca/deed.en.html', ['deed.html', 'index.html'], 'licenses/by/3.0/ca/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('3.0', 'by-sa', 'ca', 'fr', 'licenses/by-sa/3.0/ca/deed.fr.html', [], 'licenses/by-sa/3.0/ca/legalcode.fr.html', [])]) self._test_get_deed_or_license_path([('3.0', 'by-nc-nd', 'am', 'hy', 'licenses/by-nc-nd/3.0/am/deed.hy.html', ['deed.html', 'index.html'], 'licenses/by-nc-nd/3.0/am/legalcode.hy.html', ['legalcode.html'])])
def test_get_deed_or_license_path_cc0(self): '\n cc0 formula:\n /publicdomain/VERSION/LICENSE_deed_LANGAUGE.html\n /publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html\n\n cc0 examples:\n /publicdomain/1.0/zero_deed_en.html\n /publicdomain/1.0/zero_legalcode_en.html\n /publicdomain/1.0/zero_deed_ja.html\n /publicdomain/1.0/zero_legalcode_ja.html\n ' self._test_get_deed_or_license_path([('1.0', 'CC0', '', 'en', 'publicdomain/zero/1.0/deed.en.html', ['deed.html', 'index.html'], 'publicdomain/zero/1.0/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('1.0', 'CC0', '', 'ja', 'publicdomain/zero/1.0/deed.ja.html', [], 'publicdomain/zero/1.0/legalcode.ja.html', [])])
8,075,320,904,855,932,000
cc0 formula: /publicdomain/VERSION/LICENSE_deed_LANGAUGE.html /publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html cc0 examples: /publicdomain/1.0/zero_deed_en.html /publicdomain/1.0/zero_legalcode_en.html /publicdomain/1.0/zero_deed_ja.html /publicdomain/1.0/zero_legalcode_ja.html
licenses/tests/test_models.py
test_get_deed_or_license_path_cc0
kerahui/cc-licenses
python
def test_get_deed_or_license_path_cc0(self): '\n cc0 formula:\n /publicdomain/VERSION/LICENSE_deed_LANGAUGE.html\n /publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html\n\n cc0 examples:\n /publicdomain/1.0/zero_deed_en.html\n /publicdomain/1.0/zero_legalcode_en.html\n /publicdomain/1.0/zero_deed_ja.html\n /publicdomain/1.0/zero_legalcode_ja.html\n ' self._test_get_deed_or_license_path([('1.0', 'CC0', , 'en', 'publicdomain/zero/1.0/deed.en.html', ['deed.html', 'index.html'], 'publicdomain/zero/1.0/legalcode.en.html', ['legalcode.html'])]) self._test_get_deed_or_license_path([('1.0', 'CC0', , 'ja', 'publicdomain/zero/1.0/deed.ja.html', [], 'publicdomain/zero/1.0/legalcode.ja.html', [])])
def default_hparams(): 'Generates the hparams used to train note rnn used in paper.' return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-05, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True)
-6,123,440,437,575,037,000
Generates the hparams used to train note rnn used in paper.
magenta/models/rl_tuner/rl_tuner_ops.py
default_hparams
Aaravmaheshwari/magenta
python
def default_hparams(): return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-05, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True)
def basic_rnn_hparams(): 'Generates the hparams used to train a basic_rnn.\n\n These are the hparams used in the .mag file found at\n https://github.com/tensorflow/magenta/tree/master/magenta/models/\n melody_rnn#pre-trained\n\n Returns:\n Hyperparameters of the downloadable basic_rnn pre-trained model.\n ' return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES)
2,471,212,309,086,837,000
Generates the hparams used to train a basic_rnn. These are the hparams used in the .mag file found at https://github.com/tensorflow/magenta/tree/master/magenta/models/ melody_rnn#pre-trained Returns: Hyperparameters of the downloadable basic_rnn pre-trained model.
magenta/models/rl_tuner/rl_tuner_ops.py
basic_rnn_hparams
Aaravmaheshwari/magenta
python
def basic_rnn_hparams(): 'Generates the hparams used to train a basic_rnn.\n\n These are the hparams used in the .mag file found at\n https://github.com/tensorflow/magenta/tree/master/magenta/models/\n melody_rnn#pre-trained\n\n Returns:\n Hyperparameters of the downloadable basic_rnn pre-trained model.\n ' return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES)
def default_dqn_hparams(): 'Generates the default hparams for RLTuner DQN model.' return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01)
-408,690,727,625,031,550
Generates the default hparams for RLTuner DQN model.
magenta/models/rl_tuner/rl_tuner_ops.py
default_dqn_hparams
Aaravmaheshwari/magenta
python
def default_dqn_hparams(): return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01)
def autocorrelate(signal, lag=1): "Gives the correlation coefficient for the signal's correlation with itself.\n\n Args:\n signal: The signal on which to compute the autocorrelation. Can be a list.\n lag: The offset at which to correlate the signal with itself. E.g. if lag\n is 1, will compute the correlation between the signal and itself 1 beat\n later.\n Returns:\n Correlation coefficient.\n " n = len(signal) x = (np.asarray(signal) - np.mean(signal)) c0 = np.var(signal) return (((x[lag:] * x[:(n - lag)]).sum() / float(n)) / c0)
-3,527,570,645,793,660,000
Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient.
magenta/models/rl_tuner/rl_tuner_ops.py
autocorrelate
Aaravmaheshwari/magenta
python
def autocorrelate(signal, lag=1): "Gives the correlation coefficient for the signal's correlation with itself.\n\n Args:\n signal: The signal on which to compute the autocorrelation. Can be a list.\n lag: The offset at which to correlate the signal with itself. E.g. if lag\n is 1, will compute the correlation between the signal and itself 1 beat\n later.\n Returns:\n Correlation coefficient.\n " n = len(signal) x = (np.asarray(signal) - np.mean(signal)) c0 = np.var(signal) return (((x[lag:] * x[:(n - lag)]).sum() / float(n)) / c0)
def linear_annealing(n, total, p_initial, p_final): 'Linearly interpolates a probability between p_initial and p_final.\n\n Current probability is based on the current step, n. Used to linearly anneal\n the exploration probability of the RLTuner.\n\n Args:\n n: The current step.\n total: The total number of steps that will be taken (usually the length of\n the exploration period).\n p_initial: The initial probability.\n p_final: The final probability.\n\n Returns:\n The current probability (between p_initial and p_final).\n ' if (n >= total): return p_final else: return (p_initial - ((n * (p_initial - p_final)) / total))
7,249,117,379,146,749,000
Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final).
magenta/models/rl_tuner/rl_tuner_ops.py
linear_annealing
Aaravmaheshwari/magenta
python
def linear_annealing(n, total, p_initial, p_final): 'Linearly interpolates a probability between p_initial and p_final.\n\n Current probability is based on the current step, n. Used to linearly anneal\n the exploration probability of the RLTuner.\n\n Args:\n n: The current step.\n total: The total number of steps that will be taken (usually the length of\n the exploration period).\n p_initial: The initial probability.\n p_final: The final probability.\n\n Returns:\n The current probability (between p_initial and p_final).\n ' if (n >= total): return p_final else: return (p_initial - ((n * (p_initial - p_final)) / total))
def softmax(x): 'Compute softmax values for each sets of scores in x.' e_x = np.exp((x - np.max(x))) return (e_x / e_x.sum(axis=0))
-8,078,771,299,122,488,000
Compute softmax values for each sets of scores in x.
magenta/models/rl_tuner/rl_tuner_ops.py
softmax
Aaravmaheshwari/magenta
python
def softmax(x): e_x = np.exp((x - np.max(x))) return (e_x / e_x.sum(axis=0))
def sample_softmax(softmax_vect): 'Samples a note from an array of softmax probabilities.\n\n Tries to do this with numpy, which requires that the probabilities add to 1.0\n with extreme precision. If this fails, uses a manual implementation.\n\n Args:\n softmax_vect: An array of probabilities.\n Returns:\n The index of the note that was chosen/sampled.\n ' try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if ((upto + softmax_vect[i]) >= r): return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return (len(softmax_vect) - 1)
-7,324,834,732,328,166,000
Samples a note from an array of softmax probabilities. Tries to do this with numpy, which requires that the probabilities add to 1.0 with extreme precision. If this fails, uses a manual implementation. Args: softmax_vect: An array of probabilities. Returns: The index of the note that was chosen/sampled.
magenta/models/rl_tuner/rl_tuner_ops.py
sample_softmax
Aaravmaheshwari/magenta
python
def sample_softmax(softmax_vect): 'Samples a note from an array of softmax probabilities.\n\n Tries to do this with numpy, which requires that the probabilities add to 1.0\n with extreme precision. If this fails, uses a manual implementation.\n\n Args:\n softmax_vect: An array of probabilities.\n Returns:\n The index of the note that was chosen/sampled.\n ' try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if ((upto + softmax_vect[i]) >= r): return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return (len(softmax_vect) - 1)
def decoder(event_list, transpose_amount): 'Translates a sequence generated by RLTuner to MonophonicMelody form.\n\n Args:\n event_list: Integer list of encoded notes.\n transpose_amount: Key to transpose to.\n Returns:\n Integer list of MIDI values.\n ' return [((e - NUM_SPECIAL_EVENTS) if (e < NUM_SPECIAL_EVENTS) else ((e + INITIAL_MIDI_VALUE) - transpose_amount)) for e in event_list]
-5,413,410,686,043,745,000
Translates a sequence generated by RLTuner to MonophonicMelody form. Args: event_list: Integer list of encoded notes. transpose_amount: Key to transpose to. Returns: Integer list of MIDI values.
magenta/models/rl_tuner/rl_tuner_ops.py
decoder
Aaravmaheshwari/magenta
python
def decoder(event_list, transpose_amount): 'Translates a sequence generated by RLTuner to MonophonicMelody form.\n\n Args:\n event_list: Integer list of encoded notes.\n transpose_amount: Key to transpose to.\n Returns:\n Integer list of MIDI values.\n ' return [((e - NUM_SPECIAL_EVENTS) if (e < NUM_SPECIAL_EVENTS) else ((e + INITIAL_MIDI_VALUE) - transpose_amount)) for e in event_list]
def make_onehot(int_list, one_hot_length): 'Convert each int to a one-hot vector.\n\n A one-hot vector is 0 everywhere except at the index equal to the\n encoded value.\n\n For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...]\n\n Args:\n int_list: A list of ints, each of which will get a one-hot encoding.\n one_hot_length: The length of the one-hot vector to be created.\n Returns:\n A list of one-hot encodings of the ints.\n ' return [[(1.0 if (j == i) else 0.0) for j in range(one_hot_length)] for i in int_list]
7,837,502,958,555,476,000
Convert each int to a one-hot vector. A one-hot vector is 0 everywhere except at the index equal to the encoded value. For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...] Args: int_list: A list of ints, each of which will get a one-hot encoding. one_hot_length: The length of the one-hot vector to be created. Returns: A list of one-hot encodings of the ints.
magenta/models/rl_tuner/rl_tuner_ops.py
make_onehot
Aaravmaheshwari/magenta
python
def make_onehot(int_list, one_hot_length): 'Convert each int to a one-hot vector.\n\n A one-hot vector is 0 everywhere except at the index equal to the\n encoded value.\n\n For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...]\n\n Args:\n int_list: A list of ints, each of which will get a one-hot encoding.\n one_hot_length: The length of the one-hot vector to be created.\n Returns:\n A list of one-hot encodings of the ints.\n ' return [[(1.0 if (j == i) else 0.0) for j in range(one_hot_length)] for i in int_list]
def get_inner_scope(scope_str): 'Takes a tensorflow scope string and finds the inner scope.\n\n Inner scope is one layer more internal.\n\n Args:\n scope_str: Tensorflow variable scope string.\n Returns:\n Scope string with outer scope stripped off.\n ' idx = scope_str.find('/') return scope_str[(idx + 1):]
-7,477,195,212,486,831,000
Takes a tensorflow scope string and finds the inner scope. Inner scope is one layer more internal. Args: scope_str: Tensorflow variable scope string. Returns: Scope string with outer scope stripped off.
magenta/models/rl_tuner/rl_tuner_ops.py
get_inner_scope
Aaravmaheshwari/magenta
python
def get_inner_scope(scope_str): 'Takes a tensorflow scope string and finds the inner scope.\n\n Inner scope is one layer more internal.\n\n Args:\n scope_str: Tensorflow variable scope string.\n Returns:\n Scope string with outer scope stripped off.\n ' idx = scope_str.find('/') return scope_str[(idx + 1):]
def trim_variable_postfixes(scope_str): 'Trims any extra numbers added to a tensorflow scope string.\n\n Necessary to align variables in graph and checkpoint\n\n Args:\n scope_str: Tensorflow variable scope string.\n Returns:\n Scope string with extra numbers trimmed off.\n ' idx = scope_str.find(':') return scope_str[:idx]
7,660,496,240,275,590,000
Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off.
magenta/models/rl_tuner/rl_tuner_ops.py
trim_variable_postfixes
Aaravmaheshwari/magenta
python
def trim_variable_postfixes(scope_str): 'Trims any extra numbers added to a tensorflow scope string.\n\n Necessary to align variables in graph and checkpoint\n\n Args:\n scope_str: Tensorflow variable scope string.\n Returns:\n Scope string with extra numbers trimmed off.\n ' idx = scope_str.find(':') return scope_str[:idx]
def get_variable_names(graph, scope): 'Finds all the variable names in a graph that begin with a given scope.\n\n Args:\n graph: A tensorflow graph.\n scope: A string scope.\n Returns:\n List of variables.\n ' with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)]
4,936,847,139,153,747,000
Finds all the variable names in a graph that begin with a given scope. Args: graph: A tensorflow graph. scope: A string scope. Returns: List of variables.
magenta/models/rl_tuner/rl_tuner_ops.py
get_variable_names
Aaravmaheshwari/magenta
python
def get_variable_names(graph, scope): 'Finds all the variable names in a graph that begin with a given scope.\n\n Args:\n graph: A tensorflow graph.\n scope: A string scope.\n Returns:\n List of variables.\n ' with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)]
def get_next_file_name(directory, prefix, extension): "Finds next available filename in directory by appending numbers to prefix.\n\n E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already\n contains 'myfile.png' and 'myfile1.png', this function will return\n 'myfile2.png'.\n\n Args:\n directory: Path to the relevant directory.\n prefix: The filename prefix to use.\n extension: String extension of the file, eg. '.mid'.\n Returns:\n String name of the file.\n " name = ((((directory + '/') + prefix) + '.') + extension) i = 0 while os.path.isfile(name): i += 1 name = (((((directory + '/') + prefix) + str(i)) + '.') + extension) return name
-6,008,818,441,906,766,000
Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file.
magenta/models/rl_tuner/rl_tuner_ops.py
get_next_file_name
Aaravmaheshwari/magenta
python
def get_next_file_name(directory, prefix, extension): "Finds next available filename in directory by appending numbers to prefix.\n\n E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already\n contains 'myfile.png' and 'myfile1.png', this function will return\n 'myfile2.png'.\n\n Args:\n directory: Path to the relevant directory.\n prefix: The filename prefix to use.\n extension: String extension of the file, eg. '.mid'.\n Returns:\n String name of the file.\n " name = ((((directory + '/') + prefix) + '.') + extension) i = 0 while os.path.isfile(name): i += 1 name = (((((directory + '/') + prefix) + str(i)) + '.') + extension) return name
def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): 'Makes a default LSTM cell for use in the NoteRNNLoader graph.\n\n This model is only to be used for loading the checkpoint from the research\n paper. In general, events_rnn_graph.make_rnn_cell should be used instead.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n state_is_tuple: A boolean specifying whether to use tuple of hidden matrix\n and cell matrix as a state instead of a concatenated matrix.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n ' cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell
4,142,728,209,454,180,400
Makes a default LSTM cell for use in the NoteRNNLoader graph. This model is only to be used for loading the checkpoint from the research paper. In general, events_rnn_graph.make_rnn_cell should be used instead. Args: rnn_layer_sizes: A list of integer sizes (in units) for each layer of the RNN. state_is_tuple: A boolean specifying whether to use tuple of hidden matrix and cell matrix as a state instead of a concatenated matrix. Returns: A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
magenta/models/rl_tuner/rl_tuner_ops.py
make_rnn_cell
Aaravmaheshwari/magenta
python
def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): 'Makes a default LSTM cell for use in the NoteRNNLoader graph.\n\n This model is only to be used for loading the checkpoint from the research\n paper. In general, events_rnn_graph.make_rnn_cell should be used instead.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n state_is_tuple: A boolean specifying whether to use tuple of hidden matrix\n and cell matrix as a state instead of a concatenated matrix.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n ' cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell
def log_sum_exp(xs): 'Computes the log sum exp value of a tensor.' maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return (tf.squeeze(maxes, [(- 1)]) + tf.log(tf.reduce_sum(tf.exp(xs), (- 1))))
4,708,533,451,265,391,000
Computes the log sum exp value of a tensor.
magenta/models/rl_tuner/rl_tuner_ops.py
log_sum_exp
Aaravmaheshwari/magenta
python
def log_sum_exp(xs): maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return (tf.squeeze(maxes, [(- 1)]) + tf.log(tf.reduce_sum(tf.exp(xs), (- 1))))
def get_error_code(self): 'Get exception error code' return self.error_code
2,325,030,184,817,204,700
Get exception error code
srfax/srfax.py
get_error_code
sunbeamer/srfax-api-python
python
def get_error_code(self): return self.error_code
def get_cause(self): 'Get exception cause' return self.cause
-1,379,205,058,938,806,800
Get exception cause
srfax/srfax.py
get_cause
sunbeamer/srfax-api-python
python
def get_cause(self): return self.cause
def get_retry(self): 'Get retry option (should we retry the request?)' return self.retry
-3,069,908,002,933,401,600
Get retry option (should we retry the request?)
srfax/srfax.py
get_retry
sunbeamer/srfax-api-python
python
def get_retry(self): return self.retry
def queue_fax(self, to_fax_number, filepath, caller_id=None, sender_email=None, account_code=None): 'Queue fax for sending' to_fax_number = SRFax.verify_fax_numbers(to_fax_number) fax_type = ('BROADCAST' if (len(to_fax_number) > 1) else 'SINGLE') to_fax_number = '|'.join(to_fax_number) try: if isinstance(filepath, basestring): filepath = [filepath] except NameError: if isinstance(filepath, str): filepath = [filepath] if (not isinstance(filepath, list)): raise TypeError('filepath not properly defined') if (len(filepath) > 5): raise Exception('More than 5 files defined in filepath') params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sCallerID': (caller_id or self.caller_id), 'sSenderEmail': (sender_email or self.sender_email), 'sFaxType': fax_type, 'sToFaxNumber': to_fax_number, 'sAccountCode': (account_code or self.account_code or '')} SRFax.verify_parameters(params) for i in range(len(filepath)): path = filepath[i] basename = os.path.basename(path) if (not isinstance(basename, str)): basename = basename.decode('utf-8') params[('sFileName_%d' % (i + 1))] = basename content = SRFax.get_file_content(path) if (not isinstance(content, str)): content = content.decode() params[('sFileContent_%d' % (i + 1))] = content return self.process_request('Queue_Fax', params)
3,631,779,985,675,308,500
Queue fax for sending
srfax/srfax.py
queue_fax
sunbeamer/srfax-api-python
python
def queue_fax(self, to_fax_number, filepath, caller_id=None, sender_email=None, account_code=None): to_fax_number = SRFax.verify_fax_numbers(to_fax_number) fax_type = ('BROADCAST' if (len(to_fax_number) > 1) else 'SINGLE') to_fax_number = '|'.join(to_fax_number) try: if isinstance(filepath, basestring): filepath = [filepath] except NameError: if isinstance(filepath, str): filepath = [filepath] if (not isinstance(filepath, list)): raise TypeError('filepath not properly defined') if (len(filepath) > 5): raise Exception('More than 5 files defined in filepath') params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sCallerID': (caller_id or self.caller_id), 'sSenderEmail': (sender_email or self.sender_email), 'sFaxType': fax_type, 'sToFaxNumber': to_fax_number, 'sAccountCode': (account_code or self.account_code or )} SRFax.verify_parameters(params) for i in range(len(filepath)): path = filepath[i] basename = os.path.basename(path) if (not isinstance(basename, str)): basename = basename.decode('utf-8') params[('sFileName_%d' % (i + 1))] = basename content = SRFax.get_file_content(path) if (not isinstance(content, str)): content = content.decode() params[('sFileContent_%d' % (i + 1))] = content return self.process_request('Queue_Fax', params)
def get_fax_status(self, fax_id): 'Get fax status' params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sFaxDetailsID': fax_id} SRFax.verify_parameters(params) response = self.process_request('Get_FaxStatus', params) if (len(response) == 1): response = response[0] return response
-3,783,029,833,016,914,000
Get fax status
srfax/srfax.py
get_fax_status
sunbeamer/srfax-api-python
python
def get_fax_status(self, fax_id): params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sFaxDetailsID': fax_id} SRFax.verify_parameters(params) response = self.process_request('Get_FaxStatus', params) if (len(response) == 1): response = response[0] return response
def get_fax_inbox(self, period='ALL'): 'Get fax inbox' params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sPeriod': period} SRFax.verify_parameters(params) return self.process_request('Get_Fax_Inbox', params)
1,799,947,527,271,448,300
Get fax inbox
srfax/srfax.py
get_fax_inbox
sunbeamer/srfax-api-python
python
def get_fax_inbox(self, period='ALL'): params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sPeriod': period} SRFax.verify_parameters(params) return self.process_request('Get_Fax_Inbox', params)
def get_fax_outbox(self, period='ALL'): 'Get fax outbox' params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sPeriod': period} SRFax.verify_parameters(params) return self.process_request('Get_Fax_Outbox', params)
-3,963,924,699,439,529,500
Get fax outbox
srfax/srfax.py
get_fax_outbox
sunbeamer/srfax-api-python
python
def get_fax_outbox(self, period='ALL'): params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sPeriod': period} SRFax.verify_parameters(params) return self.process_request('Get_Fax_Outbox', params)
def retrieve_fax(self, fax_filename, folder, fax_id): 'Retrieve fax content in Base64 format' assert (folder in ['IN', 'OUT']) params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sFaxFileName': fax_filename, 'sDirection': folder, 'sFaxDetailsID': fax_id} SRFax.verify_parameters(params) response = self.process_request('Retrieve_Fax', params) if (len(response) == 1): response = response[0] return response
-308,339,134,267,783,300
Retrieve fax content in Base64 format
srfax/srfax.py
retrieve_fax
sunbeamer/srfax-api-python
python
def retrieve_fax(self, fax_filename, folder, fax_id): assert (folder in ['IN', 'OUT']) params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sFaxFileName': fax_filename, 'sDirection': folder, 'sFaxDetailsID': fax_id} SRFax.verify_parameters(params) response = self.process_request('Retrieve_Fax', params) if (len(response) == 1): response = response[0] return response
def delete_fax(self, fax_filename, folder): 'Delete fax files from server' assert (folder in ['IN', 'OUT']) if isinstance(fax_filename, str): fax_filename = [fax_filename] if (not isinstance(fax_filename, list)): raise TypeError('fax_filename not properly defined') if (len(fax_filename) > 5): raise Exception('More than 5 files defined in fax_filename') params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sDirection': folder} SRFax.verify_parameters(params) for i in range(len(fax_filename)): params[('sFileName_%d' % (i + 1))] = fax_filename[i] return self.process_request('Delete_Fax', params)
-7,622,009,533,996,599,000
Delete fax files from server
srfax/srfax.py
delete_fax
sunbeamer/srfax-api-python
python
def delete_fax(self, fax_filename, folder): assert (folder in ['IN', 'OUT']) if isinstance(fax_filename, str): fax_filename = [fax_filename] if (not isinstance(fax_filename, list)): raise TypeError('fax_filename not properly defined') if (len(fax_filename) > 5): raise Exception('More than 5 files defined in fax_filename') params = {'access_id': self.access_id, 'access_pwd': self.access_pwd, 'sDirection': folder} SRFax.verify_parameters(params) for i in range(len(fax_filename)): params[('sFileName_%d' % (i + 1))] = fax_filename[i] return self.process_request('Delete_Fax', params)
def process_request(self, method, params): 'Process SRFax SOAP request' params['action'] = method try: response = requests.post(self.url, json=params) except Exception as exc: raise SRFaxError('REQUESTFAILED', 'REST request failed', cause=exc, retry=True) return SRFax.process_response(response)
-6,875,549,667,463,366,000
Process SRFax SOAP request
srfax/srfax.py
process_request
sunbeamer/srfax-api-python
python
def process_request(self, method, params): params['action'] = method try: response = requests.post(self.url, json=params) except Exception as exc: raise SRFaxError('REQUESTFAILED', 'REST request failed', cause=exc, retry=True) return SRFax.process_response(response)
@staticmethod def process_response(response): 'Process SRFax SOAP response' if (not response): raise SRFaxError('INVALIDRESPONSE', 'Empty response', retry=True) if response.ok: response = response.json() if (('Status' not in response) or ('Result' not in response)): raise SRFaxError('INVALIDRESPONSE', ('Status and/or Result not in response: %s' % response), retry=True) result = response['Result'] LOGGER.debug(('Result: %s' % result)) if (response['Status'] != 'Success'): errmsg = result if (isinstance(errmsg, list) and (len(errmsg) == 1) and ('ErrorCode' in errmsg[0])): errmsg = errmsg[0]['ErrorCode'] raise SRFaxError('REQUESTFAILED', errmsg) if (result is None): result = True return result
8,468,178,039,069,426,000
Process SRFax SOAP response
srfax/srfax.py
process_response
sunbeamer/srfax-api-python
python
@staticmethod def process_response(response): if (not response): raise SRFaxError('INVALIDRESPONSE', 'Empty response', retry=True) if response.ok: response = response.json() if (('Status' not in response) or ('Result' not in response)): raise SRFaxError('INVALIDRESPONSE', ('Status and/or Result not in response: %s' % response), retry=True) result = response['Result'] LOGGER.debug(('Result: %s' % result)) if (response['Status'] != 'Success'): errmsg = result if (isinstance(errmsg, list) and (len(errmsg) == 1) and ('ErrorCode' in errmsg[0])): errmsg = errmsg[0]['ErrorCode'] raise SRFaxError('REQUESTFAILED', errmsg) if (result is None): result = True return result
@staticmethod def verify_parameters(params): 'Verify that dict values are set' for key in params.keys(): if (params[key] is None): raise TypeError(('%s not set' % key))
8,079,586,257,489,199,000
Verify that dict values are set
srfax/srfax.py
verify_parameters
sunbeamer/srfax-api-python
python
@staticmethod def verify_parameters(params): for key in params.keys(): if (params[key] is None): raise TypeError(('%s not set' % key))
@staticmethod def is_e164_number(number): 'Simple check if number is in E.164 format' if (isinstance(number, str) and RE_E164.match(number)): return True return False
8,022,049,498,608,379,000
Simple check if number is in E.164 format
srfax/srfax.py
is_e164_number
sunbeamer/srfax-api-python
python
@staticmethod def is_e164_number(number): if (isinstance(number, str) and RE_E164.match(number)): return True return False
@staticmethod def is_nanp_number(number): 'Simple check if number is inside North American Numbering Plan' if (isinstance(number, str) and RE_NANP.match(number)): return True return False
-4,080,957,327,917,256,000
Simple check if number is inside North American Numbering Plan
srfax/srfax.py
is_nanp_number
sunbeamer/srfax-api-python
python
@staticmethod def is_nanp_number(number): if (isinstance(number, str) and RE_NANP.match(number)): return True return False
@staticmethod def verify_fax_numbers(to_fax_number): 'Verify and prepare fax numbers for use at SRFax' try: if isinstance(to_fax_number, basestring): to_fax_number = [to_fax_number] except NameError: if isinstance(to_fax_number, str): to_fax_number = [to_fax_number] if (not isinstance(to_fax_number, list)): raise TypeError('to_fax_number not properly defined') for i in range(len(to_fax_number)): number = str(to_fax_number[i]) if (not SRFax.is_e164_number(number)): raise TypeError(('Number not in E.164 format: %s' % number)) if SRFax.is_nanp_number(number): to_fax_number[i] = number[1:] else: to_fax_number[i] = ('011' + number[1:]) return to_fax_number
-4,816,944,131,299,759,000
Verify and prepare fax numbers for use at SRFax
srfax/srfax.py
verify_fax_numbers
sunbeamer/srfax-api-python
python
@staticmethod def verify_fax_numbers(to_fax_number): try: if isinstance(to_fax_number, basestring): to_fax_number = [to_fax_number] except NameError: if isinstance(to_fax_number, str): to_fax_number = [to_fax_number] if (not isinstance(to_fax_number, list)): raise TypeError('to_fax_number not properly defined') for i in range(len(to_fax_number)): number = str(to_fax_number[i]) if (not SRFax.is_e164_number(number)): raise TypeError(('Number not in E.164 format: %s' % number)) if SRFax.is_nanp_number(number): to_fax_number[i] = number[1:] else: to_fax_number[i] = ('011' + number[1:]) return to_fax_number
@staticmethod def get_file_content(filepath): 'Read and return file content Base64 encoded' if (not os.path.exists(filepath)): raise Exception(('File does not exists: %s' % filepath)) if (not os.path.isfile(filepath)): raise Exception(('Not a file: %s' % filepath)) content = None try: fdp = open(filepath, 'rb') except IOError: raise else: content = fdp.read() fdp.close() if (not content): raise Exception(('Error reading file or file empty: %s' % filepath)) return base64.b64encode(content)
-1,608,719,954,331,864,600
Read and return file content Base64 encoded
srfax/srfax.py
get_file_content
sunbeamer/srfax-api-python
python
@staticmethod def get_file_content(filepath): if (not os.path.exists(filepath)): raise Exception(('File does not exists: %s' % filepath)) if (not os.path.isfile(filepath)): raise Exception(('Not a file: %s' % filepath)) content = None try: fdp = open(filepath, 'rb') except IOError: raise else: content = fdp.read() fdp.close() if (not content): raise Exception(('Error reading file or file empty: %s' % filepath)) return base64.b64encode(content)
def has_transformation(self, other): ' Checks to see if there exist transformers for other\n\n Parameters\n ----------\n other : ModelType subclass\n The object being checked for transformer\n\n Returns\n -------\n bool\n Does the specified transformer exist for other?\n ' transformer = self._get_transformer_to(other) return (transformer is not None)
-3,339,349,924,968,295,000
Checks to see if there exist transformers for other Parameters ---------- other : ModelType subclass The object being checked for transformer Returns ------- bool Does the specified transformer exist for other?
qiime2/core/transform.py
has_transformation
ebolyen/qiime2
python
def has_transformation(self, other): ' Checks to see if there exist transformers for other\n\n Parameters\n ----------\n other : ModelType subclass\n The object being checked for transformer\n\n Returns\n -------\n bool\n Does the specified transformer exist for other?\n ' transformer = self._get_transformer_to(other) return (transformer is not None)
def concrete_sample(logits, temperature, shape=torch.Size([])): '\n Sampling for Concrete distribution.\n\n See Eq. 10 of Maddison et al., 2017.\n ' uniform_shape = (torch.Size(shape) + logits.shape) u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32, device=logits.device)) gumbels = (- torch.log((- torch.log(u)))) scores = ((logits + gumbels) / temperature) return scores.softmax(dim=(- 1))
-3,301,323,707,461,947,400
Sampling for Concrete distribution. See Eq. 10 of Maddison et al., 2017.
selection/layers/utils.py
concrete_sample
iancovert/dl-selection
python
def concrete_sample(logits, temperature, shape=torch.Size([])): '\n Sampling for Concrete distribution.\n\n See Eq. 10 of Maddison et al., 2017.\n ' uniform_shape = (torch.Size(shape) + logits.shape) u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32, device=logits.device)) gumbels = (- torch.log((- torch.log(u)))) scores = ((logits + gumbels) / temperature) return scores.softmax(dim=(- 1))
def bernoulli_concrete_sample(logits, temperature, shape=torch.Size([])): '\n Sampling for BinConcrete distribution.\n\n See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.\n ' uniform_shape = (torch.Size(shape) + logits.shape) u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32, device=logits.device)) return torch.sigmoid(((((F.logsigmoid(logits) - F.logsigmoid((- logits))) + torch.log(u)) - torch.log((1 - u))) / temperature))
7,209,884,946,331,013,000
Sampling for BinConcrete distribution. See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.
selection/layers/utils.py
bernoulli_concrete_sample
iancovert/dl-selection
python
def bernoulli_concrete_sample(logits, temperature, shape=torch.Size([])): '\n Sampling for BinConcrete distribution.\n\n See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.\n ' uniform_shape = (torch.Size(shape) + logits.shape) u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32, device=logits.device)) return torch.sigmoid(((((F.logsigmoid(logits) - F.logsigmoid((- logits))) + torch.log(u)) - torch.log((1 - u))) / temperature))
@pytest.mark.parametrize('ambient_dim', [2, 3]) @pytest.mark.parametrize('dformat', ['xml', 'hdf', 'binary']) def test_unstructured_vertex_grid(ambient_dim, dformat, npoints=64): 'Test constructing a vertex grid with different ways to define the\n points and connectivity.\n ' from pyvisfile.xdmf import NumpyDataArray, DataArray, _data_item_from_numpy connectivity = np.arange(npoints, dtype=np.uint32) points = np.random.rand(ambient_dim, npoints) if (dformat == 'xml'): connectivity = NumpyDataArray(connectivity, name='connectivity') points = NumpyDataArray(points.T, name='points') elif (dformat in ['hdf', 'binary']): if (dformat == 'hdf'): cdata = 'geometry.h5:/Grid/Connectivity' pdata = 'geometry.h5:/Grid/Points' else: cdata = 'connectivity.out' pdata = 'points.out' connectivity = DataArray((_data_item_from_numpy(connectivity, name='connectivity', data=cdata),)) points = DataArray((_data_item_from_numpy(points.T, name='points', data=pdata),)) else: raise ValueError(f"unknown format: '{dformat}'") from pyvisfile.xdmf import TopologyType from pyvisfile.xdmf import XdmfUnstructuredGrid grid = XdmfUnstructuredGrid(points, connectivity, topology_type=TopologyType.Polyvertex, name='polyvertex') from pyvisfile.xdmf import XdmfWriter writer = XdmfWriter((grid,)) filename = f'test_unstructured_vertex_{dformat}_{ambient_dim}d.xmf' writer.write_pretty(filename)
3,856,762,145,171,941,000
Test constructing a vertex grid with different ways to define the points and connectivity.
test/test_xdmf.py
test_unstructured_vertex_grid
alexfikl/pyvisfile
python
@pytest.mark.parametrize('ambient_dim', [2, 3]) @pytest.mark.parametrize('dformat', ['xml', 'hdf', 'binary']) def test_unstructured_vertex_grid(ambient_dim, dformat, npoints=64): 'Test constructing a vertex grid with different ways to define the\n points and connectivity.\n ' from pyvisfile.xdmf import NumpyDataArray, DataArray, _data_item_from_numpy connectivity = np.arange(npoints, dtype=np.uint32) points = np.random.rand(ambient_dim, npoints) if (dformat == 'xml'): connectivity = NumpyDataArray(connectivity, name='connectivity') points = NumpyDataArray(points.T, name='points') elif (dformat in ['hdf', 'binary']): if (dformat == 'hdf'): cdata = 'geometry.h5:/Grid/Connectivity' pdata = 'geometry.h5:/Grid/Points' else: cdata = 'connectivity.out' pdata = 'points.out' connectivity = DataArray((_data_item_from_numpy(connectivity, name='connectivity', data=cdata),)) points = DataArray((_data_item_from_numpy(points.T, name='points', data=pdata),)) else: raise ValueError(f"unknown format: '{dformat}'") from pyvisfile.xdmf import TopologyType from pyvisfile.xdmf import XdmfUnstructuredGrid grid = XdmfUnstructuredGrid(points, connectivity, topology_type=TopologyType.Polyvertex, name='polyvertex') from pyvisfile.xdmf import XdmfWriter writer = XdmfWriter((grid,)) filename = f'test_unstructured_vertex_{dformat}_{ambient_dim}d.xmf' writer.write_pretty(filename)
@pytest.mark.parametrize('ambient_dim', [2, 3]) def test_unstructured_simplex_grid(ambient_dim, nelements=16): 'Test constructing a grid with a more complicated topology.' from pyvisfile.xdmf import TopologyType if (ambient_dim == 1): topology_type = TopologyType.Polyline simplices_per_quad = 1 if (ambient_dim == 2): topology_type = TopologyType.Triangle simplices_per_quad = 2 elif (ambient_dim == 3): topology_type = TopologyType.Tetrahedron simplices_per_quad = 6 else: raise ValueError('unsupported dimension') x = np.linspace((- 1.0), 1.0, (nelements + 1)) npoints = len(x) points = np.empty(((ambient_dim,) + ((npoints,) * ambient_dim))) for idim in range(ambient_dim): points[idim] = x.reshape(((npoints,) + ((1,) * ((ambient_dim - 1) - idim)))) from pyvisfile.xdmf import NumpyDataArray points = NumpyDataArray(points.reshape(ambient_dim, (- 1)).T, name='points') from pyvisfile.xdmf import _XDMF_ELEMENT_NODE_COUNT connectivity = _simplex_box_connectivity(npoints=((npoints,) * ambient_dim), nelements=(simplices_per_quad * (nelements ** ambient_dim)), nvertices=_XDMF_ELEMENT_NODE_COUNT[topology_type]) temperature = (np.sin(((2.0 * np.pi) * points.ary[:, 0])) + np.cos(((2.0 * np.pi) * points.ary[:, 1]))) temperature = NumpyDataArray(temperature, name='temperature') velocity = (points.ary + np.array([0, 1, 2][:ambient_dim]).reshape(1, (- 1))) velocity = NumpyDataArray(velocity, name='velocity') vorticity = NumpyDataArray(make_obj_array(velocity.ary), name='vorticity') from pyvisfile.xdmf import XdmfUnstructuredGrid grid = XdmfUnstructuredGrid(points, connectivity, topology_type=topology_type, name='simplex') grid.add_attribute(temperature) grid.add_attribute(velocity) grid.add_attribute(vorticity) from pyvisfile.xdmf import XdmfWriter writer = XdmfWriter((grid,)) filename = f'test_unstructured_simplex_{ambient_dim}d.xmf' writer.write_pretty(filename)
-2,399,577,849,240,787,500
Test constructing a grid with a more complicated topology.
test/test_xdmf.py
test_unstructured_simplex_grid
alexfikl/pyvisfile
python
@pytest.mark.parametrize('ambient_dim', [2, 3]) def test_unstructured_simplex_grid(ambient_dim, nelements=16): from pyvisfile.xdmf import TopologyType if (ambient_dim == 1): topology_type = TopologyType.Polyline simplices_per_quad = 1 if (ambient_dim == 2): topology_type = TopologyType.Triangle simplices_per_quad = 2 elif (ambient_dim == 3): topology_type = TopologyType.Tetrahedron simplices_per_quad = 6 else: raise ValueError('unsupported dimension') x = np.linspace((- 1.0), 1.0, (nelements + 1)) npoints = len(x) points = np.empty(((ambient_dim,) + ((npoints,) * ambient_dim))) for idim in range(ambient_dim): points[idim] = x.reshape(((npoints,) + ((1,) * ((ambient_dim - 1) - idim)))) from pyvisfile.xdmf import NumpyDataArray points = NumpyDataArray(points.reshape(ambient_dim, (- 1)).T, name='points') from pyvisfile.xdmf import _XDMF_ELEMENT_NODE_COUNT connectivity = _simplex_box_connectivity(npoints=((npoints,) * ambient_dim), nelements=(simplices_per_quad * (nelements ** ambient_dim)), nvertices=_XDMF_ELEMENT_NODE_COUNT[topology_type]) temperature = (np.sin(((2.0 * np.pi) * points.ary[:, 0])) + np.cos(((2.0 * np.pi) * points.ary[:, 1]))) temperature = NumpyDataArray(temperature, name='temperature') velocity = (points.ary + np.array([0, 1, 2][:ambient_dim]).reshape(1, (- 1))) velocity = NumpyDataArray(velocity, name='velocity') vorticity = NumpyDataArray(make_obj_array(velocity.ary), name='vorticity') from pyvisfile.xdmf import XdmfUnstructuredGrid grid = XdmfUnstructuredGrid(points, connectivity, topology_type=topology_type, name='simplex') grid.add_attribute(temperature) grid.add_attribute(velocity) grid.add_attribute(vorticity) from pyvisfile.xdmf import XdmfWriter writer = XdmfWriter((grid,)) filename = f'test_unstructured_simplex_{ambient_dim}d.xmf' writer.write_pretty(filename)
def _sanity_check(self, result: HttpResponse) -> None: '\n Use this for tests that are geared toward specific edge cases, but\n which still want the home page to load properly.\n ' html = result.content.decode('utf-8') if ('Compose your message' not in html): raise AssertionError('Home page probably did not load.')
-7,585,729,800,057,153,000
Use this for tests that are geared toward specific edge cases, but which still want the home page to load properly.
zerver/tests/test_home.py
_sanity_check
rhencke/zulip
python
def _sanity_check(self, result: HttpResponse) -> None: '\n Use this for tests that are geared toward specific edge cases, but\n which still want the home page to load properly.\n ' html = result.content.decode('utf-8') if ('Compose your message' not in html): raise AssertionError('Home page probably did not load.')
def test_eisenstein_hu(): ' Test Eisenstein & Hu Linear matter power spectrum with\n and without wiggles using astropy default cosmology' cosmology = default_cosmology.get() A_s = 2.1982e-09 n_s = 0.969453 kwmap = 0.02 scalar_input = 1 scalar_output_w = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap, wiggle=True) scalar_output_nw = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap, wiggle=False) assert np.isscalar(scalar_output_w) assert np.isscalar(scalar_output_nw) array_shape = (10,) array_input = np.random.uniform(size=array_shape) array_output_w = power_spectrum(array_input, A_s, n_s, cosmology, kwmap, wiggle=True) array_output_nw = power_spectrum(array_input, A_s, n_s, cosmology, kwmap, wiggle=False) assert (array_output_w.shape == array_shape) assert (array_output_nw.shape == array_shape) wavenumber = np.logspace((- 3), 1, num=5, base=10.0) pk_eisensteinhu_w = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap, wiggle=True) pk_eisensteinhu_nw = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap, wiggle=False) pk_cosmosis_w = np.array([6474.60158, 37161.0099, 9657.02614, 114.604456, 0.391399918]) pk_cosmosis_nw = np.array([6472.186, 37733.0704, 10006.2077, 113.08298, 0.383094714]) assert np.allclose(pk_eisensteinhu_w, pk_cosmosis_w) assert np.allclose(pk_eisensteinhu_nw, pk_cosmosis_nw) negative_wavenumber_scalar = 0 with pytest.raises(ValueError): power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap, wiggle=True) with pytest.raises(ValueError): power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap, wiggle=False) negative_wavenumber_array = [0, 1, (- 2), 3] with pytest.raises(ValueError): power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap, wiggle=True) with pytest.raises(ValueError): power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap, wiggle=False)
6,900,489,699,790,302,000
Test Eisenstein & Hu Linear matter power spectrum with and without wiggles using astropy default cosmology
skypy/linear/tests/test_eisenstein_hu.py
test_eisenstein_hu
Lucia-Fonseca/skypy
python
def test_eisenstein_hu(): ' Test Eisenstein & Hu Linear matter power spectrum with\n and without wiggles using astropy default cosmology' cosmology = default_cosmology.get() A_s = 2.1982e-09 n_s = 0.969453 kwmap = 0.02 scalar_input = 1 scalar_output_w = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap, wiggle=True) scalar_output_nw = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap, wiggle=False) assert np.isscalar(scalar_output_w) assert np.isscalar(scalar_output_nw) array_shape = (10,) array_input = np.random.uniform(size=array_shape) array_output_w = power_spectrum(array_input, A_s, n_s, cosmology, kwmap, wiggle=True) array_output_nw = power_spectrum(array_input, A_s, n_s, cosmology, kwmap, wiggle=False) assert (array_output_w.shape == array_shape) assert (array_output_nw.shape == array_shape) wavenumber = np.logspace((- 3), 1, num=5, base=10.0) pk_eisensteinhu_w = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap, wiggle=True) pk_eisensteinhu_nw = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap, wiggle=False) pk_cosmosis_w = np.array([6474.60158, 37161.0099, 9657.02614, 114.604456, 0.391399918]) pk_cosmosis_nw = np.array([6472.186, 37733.0704, 10006.2077, 113.08298, 0.383094714]) assert np.allclose(pk_eisensteinhu_w, pk_cosmosis_w) assert np.allclose(pk_eisensteinhu_nw, pk_cosmosis_nw) negative_wavenumber_scalar = 0 with pytest.raises(ValueError): power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap, wiggle=True) with pytest.raises(ValueError): power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap, wiggle=False) negative_wavenumber_array = [0, 1, (- 2), 3] with pytest.raises(ValueError): power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap, wiggle=True) with pytest.raises(ValueError): power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap, wiggle=False)
def set_logging_level(verbosity, logger=None, stderr_output=False): 'Set up logging for the CLI.\n\n We either set up global logging based on the verbosity\n or, if `logger` is specified, we only limit to a single\n sqlfluff logger. Verbosity is applied in the same way.\n\n Implementation: If `logger` is not specified, the handler\n is attached to the `sqlfluff` logger. If it is specified\n then it attaches the the logger in question. In addition\n if `logger` is specified, then that logger will also\n not propagate.\n ' fluff_logger = logging.getLogger('sqlfluff') fluff_logger.propagate = False colorama.init() handler = logging.StreamHandler(stream=(sys.stderr if stderr_output else sys.stdout)) handler.setFormatter(logging.Formatter('\x1b[0m%(levelname)-10s %(message)s')) handler.addFilter(RedWarningsFilter()) if logger: focus_logger = logging.getLogger('sqlfluff.{0}'.format(logger)) focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) parser_logger = logging.getLogger('sqlfluff.parser') if (verbosity < 3): fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif (verbosity == 3): fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif (verbosity == 4): fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif (verbosity > 4): fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG)
-2,421,650,648,819,382,000
Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate.
src/sqlfluff/cli/commands.py
set_logging_level
tmastny/sqlfluff
python
def set_logging_level(verbosity, logger=None, stderr_output=False): 'Set up logging for the CLI.\n\n We either set up global logging based on the verbosity\n or, if `logger` is specified, we only limit to a single\n sqlfluff logger. Verbosity is applied in the same way.\n\n Implementation: If `logger` is not specified, the handler\n is attached to the `sqlfluff` logger. If it is specified\n then it attaches the the logger in question. In addition\n if `logger` is specified, then that logger will also\n not propagate.\n ' fluff_logger = logging.getLogger('sqlfluff') fluff_logger.propagate = False colorama.init() handler = logging.StreamHandler(stream=(sys.stderr if stderr_output else sys.stdout)) handler.setFormatter(logging.Formatter('\x1b[0m%(levelname)-10s %(message)s')) handler.addFilter(RedWarningsFilter()) if logger: focus_logger = logging.getLogger('sqlfluff.{0}'.format(logger)) focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) parser_logger = logging.getLogger('sqlfluff.parser') if (verbosity < 3): fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif (verbosity == 3): fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif (verbosity == 4): fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif (verbosity > 4): fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG)
def common_options(f): 'Add common options to commands via a decorator.\n\n These are applied to all of the cli commands.\n ' f = click.version_option()(f) f = click.option('-v', '--verbose', count=True, help='Verbosity, how detailed should the output be. This is *stackable*, so `-vv` is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`.')(f) f = click.option('-n', '--nocolor', is_flag=True, help='No color - if this is set then the output will be without ANSI color codes.')(f) return f
5,106,378,517,505,907,000
Add common options to commands via a decorator. These are applied to all of the cli commands.
src/sqlfluff/cli/commands.py
common_options
tmastny/sqlfluff
python
def common_options(f): 'Add common options to commands via a decorator.\n\n These are applied to all of the cli commands.\n ' f = click.version_option()(f) f = click.option('-v', '--verbose', count=True, help='Verbosity, how detailed should the output be. This is *stackable*, so `-vv` is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`.')(f) f = click.option('-n', '--nocolor', is_flag=True, help='No color - if this is set then the output will be without ANSI color codes.')(f) return f